catch up with trunk

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/positions@1149576 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index e998676..6060445 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -489,6 +489,15 @@
   MultiTermQuery now stores TermState per leaf reader during rewrite to re-
   seek the term dictionary in TermQuery / TermWeight.
   (Simon Willnauer, Mike McCandless, Robert Muir)
+
+* LUCENE-3292: IndexWriter no longer shares the same SegmentReader
+  instance for merging and NRT readers, which enables directory impls
+  to separately tune IO flags for each.  (Varun Thacker, Simon
+  Willnauer, Mike McCandless)
+
+* LUCENE-3328: BooleanQuery now uses a specialized ConjunctionScorer if all
+  boolean clauses are required and instances of TermQuery. 
+  (Simon Willnauer, Robert Muir)
   
 Bug fixes
 
@@ -554,6 +563,11 @@
   greatly reduce RAM required during building, and CPU consumed, at
   the cost of a somewhat larger FST.  (Mike McCandless)
 
+Test Cases
+
+* LUCENE-3327: Fix AIOOBE when TestFSTs is run with
+  -Dtests.verbose=true (James Dyer via Mike McCandless)
+
 ======================= Lucene 3.3.0 =======================
 
 Changes in backwards compatibility policy
diff --git a/lucene/build.xml b/lucene/build.xml
index 8916e78..ddd1265 100644
--- a/lucene/build.xml
+++ b/lucene/build.xml
@@ -75,15 +75,15 @@
 	
   <path id="backwards.junit.classpath">
     <path refid="junit-path"/>
+    <path refid="classpath"/>
     <pathelement location="${build.dir.backwards}/classes/test"/>
-    <pathelement location="${build.dir}/${final.name}.jar"/>
     <pathelement path="${java.class.path}"/>
   </path>
 
   <!-- remove this -->
   <target name="test-tag" depends="test-backwards" description="deprecated"/>
   
-  <target name="compile-backwards" depends="compile-core, jar-core"
+  <target name="compile-backwards" depends="compile-core"
   	description="Runs tests of a previous Lucene version.">
 	<sequential>
       <mkdir dir="${build.dir.backwards}"/>	  
@@ -96,22 +96,42 @@
   	</sequential>
   </target>	
 
-  <target name="test-backwards" /><!--add here after 4.0: depends="compile-backwards, junit-backwards-mkdir, junit-backwards-sequential, junit-backwards-parallel"-->
+  <target name="test-backwards" /><!--add here after 4.0: depends="compile-backwards, backwards-test-warning, junit-backwards-mkdir, junit-backwards-sequential, junit-backwards-parallel"-->
 
   <target name="junit-backwards-mkdir">
     <mkdir dir="${build.dir.backwards}/test"/>
   </target>
 
+  <target name="check-backwards-params">
+    <condition property="backwards.ignoring.params">
+      <or>
+        <istrue value="${tests.nightly}"/>
+        <not><equals arg1="${tests.multiplier}" arg2="1"/></not>
+      </or>
+    </condition>
+  </target>
+
+  <target name="backwards-test-warning" depends="check-backwards-params" if="backwards.ignoring.params">
+    <echo>
+       Warning: Ignoring your multiplier and nightly settings for backwards tests.
+       These tests are for API compatibility only!
+    </echo>
+  </target>
+
   <macrodef name="backwards-test-macro">
   	<attribute name="threadNum" default="1"/>
   	<attribute name="threadTotal" default="1"/>
   	<sequential>
-  	  <!-- run branch tests against trunk jar -->
+  	  <!-- run branch tests against trunk jar:
+          Note: we disable multiplier/nightly because the purpose is to find API breaks
+          -->
       <test-macro 
     	dataDir="${backwards.dir}/src/test" 
     	tempDir="${build.dir.backwards}/test" 
     	junit.classpath="backwards.junit.classpath" 
     	junit.output.dir="${junit.output.dir.backwards}" 
+        tests.nightly="false"
+        tests.multiplier="1"
         threadNum="@{threadNum}" 
         threadTotal="@{threadTotal}"/>
   	</sequential>
@@ -390,7 +410,8 @@
                   classifier="javadoc"/>
         </artifact-attachments>
       </m2-deploy>
-      <m2-deploy pom.xml="src/test-framework/pom.xml">
+      <m2-deploy pom.xml="src/test-framework/pom.xml"
+                 jar.file="${build.dir}/lucene-test-framework-${version}.jar">
         <artifact-attachments>
           <attach file="${build.dir}/lucene-test-framework-${version}-src.jar"
                   classifier="sources"/>
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index ea91fb0..e2104a5 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -490,6 +490,8 @@
   	<attribute name="tempDir" default="${build.dir}/test"/>
   	<attribute name="threadNum" default="1"/>
   	<attribute name="threadTotal" default="1"/>
+        <attribute name="tests.nightly" default="${tests.nightly}"/>
+        <attribute name="tests.multiplier" default="${tests.multiplier}"/>
 
     <sequential>
 	    <condition property="runall">
@@ -540,10 +542,10 @@
               <!-- logging config file -->
               <sysproperty key="java.util.logging.config.file" value="${tests.loggingfile}"/>
           <!-- set whether or not nightly tests should run -->
-          <sysproperty key="tests.nightly" value="${tests.nightly}"/>
+          <sysproperty key="tests.nightly" value="@{tests.nightly}"/>
 
 	      <!-- TODO: create propertyset for test properties, so each project can have its own set -->
-              <sysproperty key="tests.multiplier" value="${tests.multiplier}"/>
+              <sysproperty key="tests.multiplier" value="@{tests.multiplier}"/>
 	      <sysproperty key="tempDir" file="@{tempDir}/@{threadNum}"/>
 
 	      <sysproperty key="lucene.version" value="${dev.version}"/>
diff --git a/lucene/contrib/CHANGES.txt b/lucene/contrib/CHANGES.txt
index b26d575..313ea27 100644
--- a/lucene/contrib/CHANGES.txt
+++ b/lucene/contrib/CHANGES.txt
@@ -83,6 +83,7 @@
    Removed contrib/wordnet.  (Robert Muir, Mike McCandless)
 
 API Changes
+
  * LUCENE-3296: PKIndexSplitter & MultiPassIndexSplitter now have version
    constructors. PKIndexSplitter accepts a IndexWriterConfig for each of 
    the target indexes. (Simon Willnauer, Jason Rutherglen)
@@ -95,6 +96,12 @@
       
 Bug Fixes
 
+ * LUCENE-3326: Fixed bug if you used MoreLikeThis.like(Reader), it would
+   try to re-analyze the same Reader multiple times, passing different
+   field names to the analyzer. Additionally MoreLikeThisQuery would take
+   your string and encode/decode it using the default charset, it now uses
+   a StringReader.  Finally, MoreLikeThis's methods that take File, URL, InputStream,
+   are deprecated, please create the Reader yourself. (Trejkaz, Robert Muir)
 
 ======================= Lucene 3.3.0 =======================
 
diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/index/NRTManager.java b/lucene/contrib/misc/src/java/org/apache/lucene/index/NRTManager.java
index 1e9d20b..9fb8d0e 100644
--- a/lucene/contrib/misc/src/java/org/apache/lucene/index/NRTManager.java
+++ b/lucene/contrib/misc/src/java/org/apache/lucene/index/NRTManager.java
@@ -233,7 +233,7 @@
    **/
   public synchronized IndexSearcher get(long targetGen, boolean requireDeletes) {
 
-    assert noDeletesSearchingGen.get() >= searchingGen.get();
+    assert noDeletesSearchingGen.get() >= searchingGen.get(): "noDeletesSearchingGen=" + noDeletesSearchingGen.get() + " searchingGen=" + searchingGen.get();
 
     if (targetGen > getCurrentSearchingGen(requireDeletes)) {
       // Must wait
diff --git a/lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/LikeThisQueryBuilder.java b/lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/LikeThisQueryBuilder.java
index 4fac9b0..179051e 100644
--- a/lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/LikeThisQueryBuilder.java
+++ b/lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/LikeThisQueryBuilder.java
@@ -96,7 +96,7 @@
 		}
 		
 		
-		MoreLikeThisQuery mlt=new MoreLikeThisQuery(DOMUtils.getText(e),fields,analyzer);
+		MoreLikeThisQuery mlt=new MoreLikeThisQuery(DOMUtils.getText(e),fields,analyzer, fields[0]);
 		mlt.setMaxQueryTerms(DOMUtils.getAttribute(e,"maxQueryTerms",defaultMaxQueryTerms));
 		mlt.setMinTermFrequency(DOMUtils.getAttribute(e,"minTermFrequency",defaultMinTermFrequency));
 		mlt.setPercentTermsToMatch(DOMUtils.getAttribute(e,"percentTermsToMatch",defaultPercentTermsToMatch)/100);
diff --git a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
index a4e7f5c..772f0ca 100644
--- a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
+++ b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
@@ -235,7 +235,7 @@
           delCount += applyQueryDeletes(packet.queriesIterable(), reader);
           segAllDeletes = reader.numDocs() == 0;
         } finally {
-          readerPool.release(reader);
+          readerPool.release(reader, IOContext.Context.READ);
         }
         anyNewDeletes |= delCount > 0;
 
@@ -277,7 +277,7 @@
             delCount += applyQueryDeletes(coalescedDeletes.queriesIterable(), reader);
             segAllDeletes = reader.numDocs() == 0;
           } finally {
-            readerPool.release(reader);
+            readerPool.release(reader, IOContext.Context.READ);
           }
           anyNewDeletes |= delCount > 0;
 
diff --git a/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java b/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
index bc29b35..f708a72 100644
--- a/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
+++ b/lucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
@@ -466,6 +466,8 @@
 
         // Ignore the exception if it was due to abort:
         if (!(exc instanceof MergePolicy.MergeAbortedException)) {
+          //System.out.println(Thread.currentThread().getName() + ": CMS: exc");
+          //exc.printStackTrace(System.out);
           if (!suppressExceptions) {
             // suppressExceptions is normally only set during
             // testing.
diff --git a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
index 59e39bc..094eeb3 100644
--- a/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
+++ b/lucene/src/java/org/apache/lucene/index/DirectoryReader.java
@@ -143,12 +143,12 @@
   }
 
   // Used by near real-time search
-  DirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor, CodecProvider codecs, boolean applyAllDeletes) throws IOException {
+  DirectoryReader(IndexWriter writer, SegmentInfos infos, CodecProvider codecs, boolean applyAllDeletes) throws IOException {
     this.directory = writer.getDirectory();
     this.readOnly = true;
     this.applyAllDeletes = applyAllDeletes;       // saved for reopen
 
-    this.termInfosIndexDivisor = termInfosIndexDivisor;
+    this.termInfosIndexDivisor = writer.getConfig().getReaderTermsIndexDivisor();
     if (codecs == null) {
       this.codecs = CodecProvider.getDefault();
     } else {
@@ -171,8 +171,7 @@
       try {
         final SegmentInfo info = infos.info(i);
         assert info.dir == dir;
-        final SegmentReader reader = writer.readerPool.getReadOnlyClone(info, true, termInfosIndexDivisor,
-                                                                        IOContext.READ);
+        final SegmentReader reader = writer.readerPool.getReadOnlyClone(info, IOContext.READ);
         if (reader.numDocs() > 0 || writer.getKeepFullyDeletedSegments()) {
           reader.readerFinishedListeners = readerFinishedListeners;
           readers.add(reader);
diff --git a/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java b/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java
index c33d803..87a656a 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexUpgrader.java
@@ -114,12 +114,12 @@
   }
   
   public void upgrade() throws IOException {
-    if (!IndexReader.indexExists(dir)) {
+    if (!IndexReader.indexExists(dir, iwc.getCodecProvider())) {
       throw new IndexNotFoundException(dir.toString());
     }
   
     if (!deletePriorCommits) {
-      final Collection<IndexCommit> commits = IndexReader.listCommits(dir);
+      final Collection<IndexCommit> commits = DirectoryReader.listCommits(dir, iwc.getCodecProvider());
       if (commits.size() > 1) {
         throw new IllegalArgumentException("This tool was invoked to not delete prior commit points, but the following commits were found: " + commits);
       }
diff --git a/lucene/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
index 32c8c97..41b7f17 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java
@@ -53,7 +53,6 @@
 import org.apache.lucene.store.LockObtainFailedException;
 import org.apache.lucene.store.MergeInfo;
 import org.apache.lucene.util.BitVector;
-import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.Constants;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.StringHelper;
@@ -378,7 +377,7 @@
         // just like we do when loading segments_N
         synchronized(this) {
           maybeApplyDeletes(applyAllDeletes);
-          r = new DirectoryReader(this, segmentInfos, config.getReaderTermsIndexDivisor(), codecs, applyAllDeletes);
+          r = new DirectoryReader(this, segmentInfos, codecs, applyAllDeletes);
           if (infoStream != null) {
             message("return reader version=" + r.getVersion() + " reader=" + r);
           }
@@ -416,18 +415,48 @@
    *  has been called on this instance). */
 
   class ReaderPool {
+    
+    final class SegmentCacheKey {
+      public final SegmentInfo si;
+      public final IOContext.Context context;
+      
+      public SegmentCacheKey(SegmentInfo segInfo, IOContext.Context context) {
+        assert context == IOContext.Context.MERGE || context == IOContext.Context.READ;
+        this.si = segInfo;
+        this.context = context;
+      }
+      
+      @Override
+      public int hashCode() {
+        return si.hashCode() + context.hashCode();
+      }
 
-    private final Map<SegmentInfo,SegmentReader> readerMap = new HashMap<SegmentInfo,SegmentReader>();
+      @Override
+      public String toString() {
+        return "SegmentCacheKey(" + si + "," + context + ")";
+      }
+
+      @Override
+      public boolean equals(Object _other) {
+        if (!(_other instanceof SegmentCacheKey)) {
+          return false;
+        }
+        final SegmentCacheKey other = (SegmentCacheKey) _other;
+        return si.equals(other.si) && context == other.context;
+      }
+    }
+
+    private final Map<SegmentCacheKey,SegmentReader> readerMap = new HashMap<SegmentCacheKey,SegmentReader>();
 
     /** Forcefully clear changes for the specified segments.  This is called on successful merge. */
     synchronized void clear(List<SegmentInfo> infos) throws IOException {
       if (infos == null) {
-        for (Map.Entry<SegmentInfo,SegmentReader> ent: readerMap.entrySet()) {
+        for (Map.Entry<SegmentCacheKey,SegmentReader> ent: readerMap.entrySet()) {
           ent.getValue().hasChanges = false;
         }
       } else {
         for (final SegmentInfo info: infos) {
-          final SegmentReader r = readerMap.get(info);
+          final SegmentReader r = readerMap.get(new SegmentCacheKey(info, IOContext.Context.MERGE));
           if (r != null) {
             r.hasChanges = false;
           }
@@ -437,9 +466,13 @@
 
     // used only by asserts
     public synchronized boolean infoIsLive(SegmentInfo info) {
+      return infoIsLive(info, "");
+    }
+
+    public synchronized boolean infoIsLive(SegmentInfo info, String message) {
       int idx = segmentInfos.indexOf(info);
-      assert idx != -1: "info=" + info + " isn't in pool";
-      assert segmentInfos.info(idx) == info: "info=" + info + " doesn't match live info in segmentInfos";
+      assert idx != -1: "info=" + info + " isn't live: " + message;
+      assert segmentInfos.info(idx) == info: "info=" + info + " doesn't match live info in segmentInfos: " + message;
       return true;
     }
 
@@ -460,8 +493,8 @@
      * @param sr
      * @throws IOException
      */
-    public synchronized boolean release(SegmentReader sr) throws IOException {
-      return release(sr, false);
+    public synchronized boolean release(SegmentReader sr, IOContext.Context context) throws IOException {
+      return release(sr, false, context);
     }
 
     /**
@@ -474,10 +507,32 @@
      * @throws IOException
      */
     public synchronized boolean release(SegmentReader sr, boolean drop) throws IOException {
+      final SegmentCacheKey cacheKey = new SegmentCacheKey(sr.getSegmentInfo(), IOContext.Context.READ);
+      final SegmentReader other = readerMap.get(cacheKey);
+      if (sr == other) {
+        return release(sr, drop, IOContext.Context.READ);
+      } else {
+        assert sr == readerMap.get(new SegmentCacheKey(sr.getSegmentInfo(), IOContext.Context.MERGE));
+        return release(sr, drop, IOContext.Context.MERGE);
+      }
+    }
 
-      final boolean pooled = readerMap.containsKey(sr.getSegmentInfo());
+    /**
+     * Release the segment reader (i.e. decRef it and close if there
+     * are no more references.
+     * @return true if this release altered the index (eg
+     * the SegmentReader had pending changes to del docs and
+     * was closed).  Caller must call checkpoint() if so.
+     * @param sr
+     * @throws IOException
+     */
+    public synchronized boolean release(SegmentReader sr, boolean drop, IOContext.Context context) throws IOException {
 
-      assert !pooled || readerMap.get(sr.getSegmentInfo()) == sr;
+      SegmentCacheKey cacheKey = new SegmentCacheKey(sr.getSegmentInfo(), context);
+      
+      final boolean pooled = readerMap.containsKey(cacheKey);
+
+      assert !pooled || readerMap.get(cacheKey) == sr;
 
       // Drop caller's ref; for an external reader (not
       // pooled), this decRef will close it
@@ -492,9 +547,12 @@
         // Discard (don't save) changes when we are dropping
         // the reader; this is used only on the sub-readers
         // after a successful merge.
-        sr.hasChanges &= !drop;
-
-        final boolean hasChanges = sr.hasChanges;
+        final boolean hasChanges;
+        if (drop) {
+          hasChanges = sr.hasChanges = false;
+        } else {
+          hasChanges = sr.hasChanges;
+        }
 
         // Drop our ref -- this will commit any pending
         // changes to the dir
@@ -502,7 +560,20 @@
 
         // We are the last ref to this reader; since we're
         // not pooling readers, we release it:
-        readerMap.remove(sr.getSegmentInfo());
+        readerMap.remove(cacheKey);
+
+        if (drop && context == IOContext.Context.MERGE) {
+          // Also drop the READ reader if present: we don't
+          // need its deletes since they've been carried
+          // over to the merged segment
+          cacheKey = new SegmentCacheKey(sr.getSegmentInfo(), IOContext.Context.READ);
+          SegmentReader sr2 = readerMap.get(cacheKey);
+          if (sr2 != null) {
+            readerMap.remove(cacheKey);
+            sr2.hasChanges = false;
+            sr2.close();
+          }
+        }
 
         return hasChanges;
       }
@@ -511,16 +582,26 @@
     }
 
     public synchronized void drop(List<SegmentInfo> infos) throws IOException {
+      drop(infos, IOContext.Context.READ);
+      drop(infos, IOContext.Context.MERGE);
+    }
+
+    public synchronized void drop(List<SegmentInfo> infos, IOContext.Context context) throws IOException {
       for(SegmentInfo info : infos) {
-        drop(info);
+        drop(info, context);
       }
     }
 
     public synchronized void drop(SegmentInfo info) throws IOException {
-      final SegmentReader sr = readerMap.get(info);
-      if (sr != null) {
+      drop(info, IOContext.Context.READ);
+      drop(info, IOContext.Context.MERGE);
+    }
+
+    public synchronized void drop(SegmentInfo info, IOContext.Context context) throws IOException {
+      final SegmentReader sr;
+      if ((sr = readerMap.remove(new SegmentCacheKey(info, context))) != null) {
         sr.hasChanges = false;
-        readerMap.remove(info);
+        readerMap.remove(new SegmentCacheKey(info, context));
         sr.close();
       }
     }
@@ -532,14 +613,14 @@
       // sync'd on IW:
       assert Thread.holdsLock(IndexWriter.this);
 
-      Iterator<Map.Entry<SegmentInfo,SegmentReader>> iter = readerMap.entrySet().iterator();
+      Iterator<Map.Entry<SegmentCacheKey,SegmentReader>> iter = readerMap.entrySet().iterator();
       while (iter.hasNext()) {
 
-        Map.Entry<SegmentInfo,SegmentReader> ent = iter.next();
+        Map.Entry<SegmentCacheKey,SegmentReader> ent = iter.next();
 
         SegmentReader sr = ent.getValue();
         if (sr.hasChanges) {
-          assert infoIsLive(sr.getSegmentInfo());
+          assert infoIsLive(sr.getSegmentInfo(), "key=" + ent.getKey());
           sr.doCommit(null);
 
           // Must checkpoint w/ deleter, because this
@@ -567,10 +648,9 @@
       // We invoke deleter.checkpoint below, so we must be
       // sync'd on IW:
       assert Thread.holdsLock(IndexWriter.this);
-
+      
       for (SegmentInfo info : infos) {
-
-        final SegmentReader sr = readerMap.get(info);
+        final SegmentReader sr = readerMap.get(new SegmentCacheKey(info, IOContext.Context.READ));
         if (sr != null && sr.hasChanges) {
           assert infoIsLive(info);
           sr.doCommit(null);
@@ -582,13 +662,17 @@
       }
     }
 
+    public synchronized SegmentReader getReadOnlyClone(SegmentInfo info, IOContext context) throws IOException {
+      return getReadOnlyClone(info, true, context);
+    }
+
     /**
      * Returns a ref to a clone.  NOTE: this clone is not
      * enrolled in the pool, so you should simply close()
      * it when you're done (ie, do not call release()).
      */
-    public synchronized SegmentReader getReadOnlyClone(SegmentInfo info, boolean doOpenStores, int termInfosIndexDivisor, IOContext context) throws IOException {
-      SegmentReader sr = get(info, doOpenStores, context, termInfosIndexDivisor);
+    public synchronized SegmentReader getReadOnlyClone(SegmentInfo info, boolean doOpenStores, IOContext context) throws IOException {
+      SegmentReader sr = get(info, doOpenStores, context);
       try {
         return (SegmentReader) sr.clone(true);
       } finally {
@@ -596,6 +680,10 @@
       }
     }
 
+    public synchronized SegmentReader get(SegmentInfo info, IOContext context) throws IOException {
+      return get(info, true, context);
+    }
+
     /**
      * Obtain a SegmentReader from the readerPool.  The reader
      * must be returned by calling {@link #release(SegmentReader)}
@@ -605,53 +693,24 @@
      * @throws IOException
      */
     public synchronized SegmentReader get(SegmentInfo info, boolean doOpenStores, IOContext context) throws IOException {
-      return get(info, doOpenStores, context, config.getReaderTermsIndexDivisor());
-    }
 
-    /**
-     * Obtain a SegmentReader from the readerPool.  The reader
-     * must be returned by calling {@link #release(SegmentReader)}
-     *
-     * @see #release(SegmentReader)
-     * @param info
-     * @param doOpenStores
-     * @param readBufferSize
-     * @param termsIndexDivisor
-     * @throws IOException
-     */
-    public synchronized SegmentReader get(SegmentInfo info, boolean doOpenStores, IOContext context, int termsIndexDivisor) throws IOException {
-
-      //      if (poolReaders) {
-      //        readBufferSize = BufferedIndexInput.BUFFER_SIZE;
-      //      }
-      
-      // TODO: context should be part of the key used to cache that reader in the pool.
-
-      SegmentReader sr = readerMap.get(info);
+      SegmentCacheKey cacheKey = new SegmentCacheKey(info, context.context);
+      SegmentReader sr = readerMap.get(cacheKey);
       if (sr == null) {
         // TODO: we may want to avoid doing this while
         // synchronized
         // Returns a ref, which we xfer to readerMap:
-        sr = SegmentReader.get(false, info.dir, info, doOpenStores, termsIndexDivisor, context);
+        sr = SegmentReader.get(false, info.dir, info, doOpenStores, context.context == IOContext.Context.MERGE ? -1 : config.getReaderTermsIndexDivisor(), context);
         sr.readerFinishedListeners = readerFinishedListeners;
 
         if (info.dir == directory) {
           // Only pool if reader is not external
-          readerMap.put(info, sr);
+          readerMap.put(cacheKey, sr);
         }
       } else {
         if (doOpenStores) {
           sr.openDocStores();
         }
-        if (termsIndexDivisor != -1) {
-          // If this reader was originally opened because we
-          // needed to merge it, we didn't load the terms
-          // index.  But now, if the caller wants the terms
-          // index (eg because it's doing deletes, or an NRT
-          // reader is being opened) we ask the reader to
-          // load its terms index.
-          sr.loadTermsIndex(termsIndexDivisor);
-        }
       }
 
       // Return a ref to our caller
@@ -664,13 +723,23 @@
 
     // Returns a ref
     public synchronized SegmentReader getIfExists(SegmentInfo info) throws IOException {
-      SegmentReader sr = readerMap.get(info);
+      SegmentReader sr = getIfExists(info, IOContext.Context.READ);
+      if (sr == null) {
+        sr = getIfExists(info, IOContext.Context.MERGE);
+      }
+      return sr;
+    }
+    
+    // Returns a ref
+    public synchronized SegmentReader getIfExists(SegmentInfo info, IOContext.Context context) throws IOException {
+      SegmentCacheKey cacheKey = new SegmentCacheKey(info, context);
+      SegmentReader sr = readerMap.get(cacheKey);
       if (sr != null) {
         sr.incRef();
       }
       return sr;
     }
-  }
+  }  
 
   /**
    * Obtain the number of deleted docs for a pooled reader.
@@ -687,7 +756,7 @@
       }
     } finally {
       if (reader != null) {
-        readerPool.release(reader);
+        readerPool.release(reader, false);
       }
     }
   }
@@ -2853,7 +2922,7 @@
     }
     if (!keepFullyDeletedSegments && result.allDeleted != null) {
       if (infoStream != null) {
-        message("drop 100% deleted segments: " + result.allDeleted);
+        message("drop 100% deleted segments: " + segString(result.allDeleted));
       }
       for (SegmentInfo info : result.allDeleted) {
         // If a merge has already registered for this
@@ -2929,16 +2998,27 @@
     for(int i=0; i < sourceSegments.size(); i++) {
       SegmentInfo info = sourceSegments.get(i);
       minGen = Math.min(info.getBufferedDeletesGen(), minGen);
-      int docCount = info.docCount;
-      final SegmentReader previousReader = merge.readerClones.get(i);
-      if (previousReader == null) {
-        // Reader was skipped because it was 100% deletions
-        continue;
+      final int docCount = info.docCount;
+      final BitVector prevLiveDocs = merge.readerLiveDocs.get(i);
+      final BitVector currentLiveDocs;
+      {
+        final SegmentReader currentReader = readerPool.getIfExists(info, IOContext.Context.READ);
+        if (currentReader != null) {
+          currentLiveDocs = (BitVector) currentReader.getLiveDocs();
+          readerPool.release(currentReader, false, IOContext.Context.READ);
+        } else {
+          assert readerPool.infoIsLive(info);
+          if (info.hasDeletions()) {
+            currentLiveDocs = new BitVector(directory,
+                                            info.getDelFileName(),
+                                            new IOContext(IOContext.Context.READ));
+          } else {
+            currentLiveDocs = null;
+          }
+        }
       }
-      final Bits prevLiveDocs = previousReader.getLiveDocs();
-      final SegmentReader currentReader = merge.readers.get(i);
-      final Bits currentLiveDocs = currentReader.getLiveDocs();
-      if (previousReader.hasDeletions()) {
+
+      if (prevLiveDocs != null) {
 
         // There were deletes on this segment when the merge
         // started.  The merge has collapsed away those
@@ -2947,14 +3027,14 @@
         // newly flushed deletes but mapping them to the new
         // docIDs.
 
-        if (currentReader.numDeletedDocs() > previousReader.numDeletedDocs()) {
-          // This means this segment has had new deletes
-          // committed since we started the merge, so we
+        if (currentLiveDocs.count() < prevLiveDocs.count()) {
+          // This means this segment received new deletes
+          // since we started the merge, so we
           // must merge them:
           for(int j=0;j<docCount;j++) {
-            if (!prevLiveDocs.get(j))
+            if (!prevLiveDocs.get(j)) {
               assert !currentLiveDocs.get(j);
-            else {
+            } else {
               if (!currentLiveDocs.get(j)) {
                 mergedReader.doDelete(docUpto);
                 delCount++;
@@ -2963,9 +3043,10 @@
             }
           }
         } else {
-          docUpto += docCount - previousReader.numDeletedDocs();
+          assert currentLiveDocs.count() == prevLiveDocs.count(): "currentLiveDocs.count()==" + currentLiveDocs.count() + " vs prevLiveDocs.count()=" + prevLiveDocs.count() + " info=" + info;
+          docUpto += currentLiveDocs.count();
         }
-      } else if (currentReader.hasDeletions()) {
+      } else if (currentLiveDocs != null) {
         // This segment had no deletes before but now it
         // does:
         for(int j=0; j<docCount; j++) {
@@ -2975,9 +3056,10 @@
           }
           docUpto++;
         }
-      } else
+      } else {
         // No deletes before or after
         docUpto += info.docCount;
+      }
     }
 
     assert mergedReader.numDeletedDocs() == delCount;
@@ -3373,13 +3455,14 @@
   private final synchronized void closeMergeReaders(MergePolicy.OneMerge merge, boolean suppressExceptions) throws IOException {
     final int numSegments = merge.readers.size();
     Throwable th = null;
-    
+
     boolean anyChanges = false;
     boolean drop = !suppressExceptions;
+    
     for (int i = 0; i < numSegments; i++) {
       if (merge.readers.get(i) != null) {
         try {
-          anyChanges |= readerPool.release(merge.readers.get(i), drop);
+          anyChanges |= readerPool.release(merge.readers.get(i), drop, IOContext.Context.MERGE);
         } catch (Throwable t) {
           if (th == null) {
             th = t;
@@ -3387,20 +3470,6 @@
         }
         merge.readers.set(i, null);
       }
-      
-      if (i < merge.readerClones.size() && merge.readerClones.get(i) != null) {
-        try {
-          merge.readerClones.get(i).close();
-        } catch (Throwable t) {
-          if (th == null) {
-            th = t;
-          }
-        }
-        // This was a private clone and we had the
-        // only reference
-        assert merge.readerClones.get(i).getRefCount() == 0: "refCount should be 0 but is " + merge.readerClones.get(i).getRefCount();
-        merge.readerClones.set(i, null);
-      }
     }
     
     if (suppressExceptions && anyChanges) {
@@ -3416,6 +3485,27 @@
     }
   }
 
+  private synchronized BitVector getLiveDocsClone(SegmentInfo info, SegmentReader other) throws IOException {
+    final SegmentReader delReader = readerPool.getIfExists(info, IOContext.Context.READ);
+    BitVector liveDocs;
+    if (delReader != null) {
+      liveDocs = (BitVector) delReader.getLiveDocs();
+      readerPool.release(delReader, false, IOContext.Context.READ);
+      if (liveDocs != null) {
+        // We clone the del docs because other
+        // deletes may come in while we're merging.  We
+        // need frozen deletes while merging, and then
+        // we carry over any new deletions in
+        // commitMergedDeletes.
+        liveDocs = (BitVector) liveDocs.clone();
+      }
+    } else {
+      liveDocs = (BitVector) other.getLiveDocs();
+    }
+
+    return liveDocs;
+  }
+
   /** Does the actual (time-consuming) work of the merge,
    *  but without holding synchronized lock on IndexWriter
    *  instance */
@@ -3440,7 +3530,8 @@
     }
 
     merge.readers = new ArrayList<SegmentReader>();
-    merge.readerClones = new ArrayList<SegmentReader>();
+    merge.readerLiveDocs = new ArrayList<BitVector>();
+
     // This is try/finally to make sure merger's readers are
     // closed:
     boolean success = false;
@@ -3453,20 +3544,17 @@
 
         // Hold onto the "live" reader; we will use this to
         // commit merged deletes
-        final SegmentReader reader = readerPool.get(info, true,
-                                                    context,
-                                                    -config.getReaderTermsIndexDivisor());
+        final SegmentReader reader = readerPool.get(info, context);
+
+        // Carefully pull the most recent live docs:
+        final BitVector liveDocs = getLiveDocsClone(info, reader);
+
+        merge.readerLiveDocs.add(liveDocs);
         merge.readers.add(reader);
 
-        // We clone the segment readers because other
-        // deletes may come in while we're merging so we
-        // need readers that will not change
-        final SegmentReader clone = (SegmentReader) reader.clone(true);
-        merge.readerClones.add(clone);
-
-        if (clone.numDocs() > 0) {
-          merger.add(clone);
-          totDocCount += clone.numDocs();
+        if (liveDocs == null || liveDocs.count() > 0) {
+          merger.add(reader, liveDocs);
+          totDocCount += liveDocs == null ? reader.maxDoc() : liveDocs.count();
         }
         segUpto++;
       }
@@ -3562,25 +3650,24 @@
       }
 
       final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
-      final int termsIndexDivisor;
-      final boolean loadDocStores;
-
-      if (mergedSegmentWarmer != null) {
-        // Load terms index & doc stores so the segment
-        // warmer can run searches, load documents/term
-        // vectors
-        termsIndexDivisor = config.getReaderTermsIndexDivisor();
-        loadDocStores = true;
-      } else {
-        termsIndexDivisor = -1;
-        loadDocStores = false;
-      }
 
       // TODO: in the non-realtime case, we may want to only
       // keep deletes (it's costly to open entire reader
       // when we just need deletes)
 
-      final SegmentReader mergedReader = readerPool.get(merge.info, loadDocStores, context, termsIndexDivisor);
+      final boolean loadDocStores;
+      if (mergedSegmentWarmer != null) {
+        // Load terms index & doc stores so the segment
+        // warmer can run searches, load documents/term
+        // vectors
+        loadDocStores = true;
+      } else {
+        loadDocStores = false;
+      }
+
+      // Force READ context because we merge deletes onto
+      // this reader:
+      final SegmentReader mergedReader = readerPool.get(merge.info, loadDocStores, new IOContext(IOContext.Context.READ));
       try {
         if (poolReaders && mergedSegmentWarmer != null) {
           mergedSegmentWarmer.warm(mergedReader);
@@ -3592,7 +3679,7 @@
         }
       } finally {
         synchronized(this) {
-          if (readerPool.release(mergedReader)) {
+          if (readerPool.release(mergedReader, IOContext.Context.READ)) {
             // Must checkpoint after releasing the
             // mergedReader since it may have written a new
             // deletes file:
@@ -3667,7 +3754,7 @@
       }
     } finally {
       if (reader != null) {
-        readerPool.release(reader);
+        readerPool.release(reader, false);
       }
     }
     return buffer.toString();
diff --git a/lucene/src/java/org/apache/lucene/index/MergePolicy.java b/lucene/src/java/org/apache/lucene/index/MergePolicy.java
index 7298ecd..a5092f3 100644
--- a/lucene/src/java/org/apache/lucene/index/MergePolicy.java
+++ b/lucene/src/java/org/apache/lucene/index/MergePolicy.java
@@ -17,15 +17,16 @@
  * limitations under the License.
  */
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.MergeInfo;
-import org.apache.lucene.util.SetOnce;
+import org.apache.lucene.util.BitVector;
 import org.apache.lucene.util.SetOnce.AlreadySetException;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
+import org.apache.lucene.util.SetOnce;
 
 /**
  * <p>Expert: a MergePolicy determines the sequence of
@@ -75,7 +76,7 @@
     int maxNumSegmentsOptimize;     // used by IndexWriter
     public long estimatedMergeBytes;       // used by IndexWriter
     List<SegmentReader> readers;        // used by IndexWriter
-    List<SegmentReader> readerClones;   // used by IndexWriter
+    List<BitVector> readerLiveDocs;   // used by IndexWriter
     public final List<SegmentInfo> segments;
     public final int totalDocCount;
     boolean aborted;
diff --git a/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java b/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java
index 8159230..076e213 100644
--- a/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java
+++ b/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java
@@ -174,14 +174,6 @@
     public void close() throws IOException {
       IOUtils.closeSafely(false, codecs.values());
     }
-
-    @Override
-    public void loadTermsIndex(int indexDivisor) throws IOException {
-      Iterator<FieldsProducer> it = codecs.values().iterator();
-      while (it.hasNext()) {
-        it.next().loadTermsIndex(indexDivisor);
-      }
-    }
   }
 
   @Override
diff --git a/lucene/src/java/org/apache/lucene/index/SegmentMerger.java b/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
index f937194..52159ab 100644
--- a/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
+++ b/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
@@ -54,7 +54,7 @@
   private String segment;
   private int termIndexInterval = IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL;
 
-  private List<IndexReader> readers = new ArrayList<IndexReader>();
+  private List<MergeState.IndexReaderAndLiveDocs> readers = new ArrayList<MergeState.IndexReaderAndLiveDocs>();
   private final FieldInfos fieldInfos;
 
   private int mergedDocs;
@@ -100,7 +100,21 @@
    * @param reader
    */
   final void add(IndexReader reader) {
-    ReaderUtil.gatherSubReaders(readers, reader);
+    try {
+      new ReaderUtil.Gather(reader) {
+        @Override
+        protected void add(int base, IndexReader r) {
+          readers.add(new MergeState.IndexReaderAndLiveDocs(r, r.getLiveDocs()));
+        }
+      }.run();
+    } catch (IOException ioe) {
+      // won't happen
+      throw new RuntimeException(ioe);
+    }
+  }
+
+  final void add(SegmentReader reader, Bits liveDocs) {
+    readers.add(new MergeState.IndexReaderAndLiveDocs(reader, liveDocs));
   }
 
   /**
@@ -122,8 +136,9 @@
     mergePerDoc();
     mergeNorms();
 
-    if (fieldInfos.hasVectors())
+    if (fieldInfos.hasVectors()) {
       mergeVectors();
+    }
     return mergedDocs;
   }
 
@@ -188,9 +203,9 @@
     // FieldInfos, then we can do a bulk copy of the
     // stored fields:
     for (int i = 0; i < numReaders; i++) {
-      IndexReader reader = readers.get(i);
-      if (reader instanceof SegmentReader) {
-        SegmentReader segmentReader = (SegmentReader) reader;
+      MergeState.IndexReaderAndLiveDocs reader = readers.get(i);
+      if (reader.reader instanceof SegmentReader) {
+        SegmentReader segmentReader = (SegmentReader) reader.reader;
         boolean same = true;
         FieldInfos segmentFieldInfos = segmentReader.fieldInfos();
         for (FieldInfo fi : segmentFieldInfos) {
@@ -215,7 +230,8 @@
    * @throws IOException if there is a low-level IO error
    */
   private int mergeFields() throws CorruptIndexException, IOException {
-    for (IndexReader reader : readers) {
+    for (MergeState.IndexReaderAndLiveDocs readerAndLiveDocs : readers) {
+      final IndexReader reader = readerAndLiveDocs.reader;
       if (reader instanceof SegmentReader) {
         SegmentReader segmentReader = (SegmentReader) reader;
         FieldInfos readerFieldInfos = segmentReader.fieldInfos();
@@ -244,7 +260,7 @@
     final FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, context);
     try {
       int idx = 0;
-      for (IndexReader reader : readers) {
+      for (MergeState.IndexReaderAndLiveDocs reader : readers) {
         final SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
         FieldsReader matchingFieldsReader = null;
         if (matchingSegmentReader != null) {
@@ -253,7 +269,7 @@
             matchingFieldsReader = fieldsReader;
           }
         }
-        if (reader.hasDeletions()) {
+        if (reader.liveDocs != null) {
           docCount += copyFieldsWithDeletions(fieldsWriter,
                                               reader, matchingFieldsReader);
         } else {
@@ -280,12 +296,12 @@
     return docCount;
   }
 
-  private int copyFieldsWithDeletions(final FieldsWriter fieldsWriter, final IndexReader reader,
+  private int copyFieldsWithDeletions(final FieldsWriter fieldsWriter, final MergeState.IndexReaderAndLiveDocs reader,
                                       final FieldsReader matchingFieldsReader)
     throws IOException, MergeAbortedException, CorruptIndexException {
     int docCount = 0;
-    final int maxDoc = reader.maxDoc();
-    final Bits liveDocs = reader.getLiveDocs();
+    final int maxDoc = reader.reader.maxDoc();
+    final Bits liveDocs = reader.liveDocs;
     assert liveDocs != null;
     if (matchingFieldsReader != null) {
       // We can bulk-copy because the fieldInfos are "congruent"
@@ -321,7 +337,7 @@
         }
         // NOTE: it's very important to first assign to doc then pass it to
         // termVectorsWriter.addAllDocVectors; see LUCENE-1282
-        Document doc = reader.document(j);
+        Document doc = reader.reader.document(j);
         fieldsWriter.addDocument(doc, fieldInfos);
         docCount++;
         checkAbort.work(300);
@@ -330,10 +346,10 @@
     return docCount;
   }
 
-  private int copyFieldsNoDeletions(final FieldsWriter fieldsWriter, final IndexReader reader,
+  private int copyFieldsNoDeletions(final FieldsWriter fieldsWriter, final MergeState.IndexReaderAndLiveDocs reader,
                                     final FieldsReader matchingFieldsReader)
     throws IOException, MergeAbortedException, CorruptIndexException {
-    final int maxDoc = reader.maxDoc();
+    final int maxDoc = reader.reader.maxDoc();
     int docCount = 0;
     if (matchingFieldsReader != null) {
       // We can bulk-copy because the fieldInfos are "congruent"
@@ -348,7 +364,7 @@
       for (; docCount < maxDoc; docCount++) {
         // NOTE: it's very important to first assign to doc then pass it to
         // termVectorsWriter.addAllDocVectors; see LUCENE-1282
-        Document doc = reader.document(docCount);
+        Document doc = reader.reader.document(docCount);
         fieldsWriter.addDocument(doc, fieldInfos);
         checkAbort.work(300);
       }
@@ -361,12 +377,11 @@
    * @throws IOException
    */
   private final void mergeVectors() throws IOException {
-    TermVectorsWriter termVectorsWriter =
-      new TermVectorsWriter(directory, segment, fieldInfos, context);
+    TermVectorsWriter termVectorsWriter = new TermVectorsWriter(directory, segment, fieldInfos, context);
 
     try {
       int idx = 0;
-      for (final IndexReader reader : readers) {
+      for (final MergeState.IndexReaderAndLiveDocs reader : readers) {
         final SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
         TermVectorsReader matchingVectorsReader = null;
         if (matchingSegmentReader != null) {
@@ -377,11 +392,10 @@
             matchingVectorsReader = vectorsReader;
           }
         }
-        if (reader.hasDeletions()) {
+        if (reader.liveDocs != null) {
           copyVectorsWithDeletions(termVectorsWriter, matchingVectorsReader, reader);
         } else {
           copyVectorsNoDeletions(termVectorsWriter, matchingVectorsReader, reader);
-
         }
       }
     } finally {
@@ -402,10 +416,10 @@
 
   private void copyVectorsWithDeletions(final TermVectorsWriter termVectorsWriter,
                                         final TermVectorsReader matchingVectorsReader,
-                                        final IndexReader reader)
+                                        final MergeState.IndexReaderAndLiveDocs reader)
     throws IOException, MergeAbortedException {
-    final int maxDoc = reader.maxDoc();
-    final Bits liveDocs = reader.getLiveDocs();
+    final int maxDoc = reader.reader.maxDoc();
+    final Bits liveDocs = reader.liveDocs;
     if (matchingVectorsReader != null) {
       // We can bulk-copy because the fieldInfos are "congruent"
       for (int docNum = 0; docNum < maxDoc;) {
@@ -440,7 +454,7 @@
 
         // NOTE: it's very important to first assign to vectors then pass it to
         // termVectorsWriter.addAllDocVectors; see LUCENE-1282
-        TermFreqVector[] vectors = reader.getTermFreqVectors(docNum);
+        TermFreqVector[] vectors = reader.reader.getTermFreqVectors(docNum);
         termVectorsWriter.addAllDocVectors(vectors);
         checkAbort.work(300);
       }
@@ -449,9 +463,9 @@
 
   private void copyVectorsNoDeletions(final TermVectorsWriter termVectorsWriter,
                                       final TermVectorsReader matchingVectorsReader,
-                                      final IndexReader reader)
+                                      final MergeState.IndexReaderAndLiveDocs reader)
       throws IOException, MergeAbortedException {
-    final int maxDoc = reader.maxDoc();
+    final int maxDoc = reader.reader.maxDoc();
     if (matchingVectorsReader != null) {
       // We can bulk-copy because the fieldInfos are "congruent"
       int docCount = 0;
@@ -466,7 +480,7 @@
       for (int docNum = 0; docNum < maxDoc; docNum++) {
         // NOTE: it's very important to first assign to vectors then pass it to
         // termVectorsWriter.addAllDocVectors; see LUCENE-1282
-        TermFreqVector[] vectors = reader.getTermFreqVectors(docNum);
+        TermFreqVector[] vectors = reader.reader.getTermFreqVectors(docNum);
         termVectorsWriter.addAllDocVectors(vectors);
         checkAbort.work(300);
       }
@@ -487,23 +501,17 @@
     
     final List<Fields> fields = new ArrayList<Fields>();
     final List<ReaderUtil.Slice> slices = new ArrayList<ReaderUtil.Slice>();
-    final List<Bits> bits = new ArrayList<Bits>();
-    final List<Integer> bitsStarts = new ArrayList<Integer>();
 
-    for(IndexReader r : readers) {
-      final Fields f = r.fields();
-      final int maxDoc = r.maxDoc();
+    for(MergeState.IndexReaderAndLiveDocs r : readers) {
+      final Fields f = r.reader.fields();
+      final int maxDoc = r.reader.maxDoc();
       if (f != null) {
         slices.add(new ReaderUtil.Slice(docBase, maxDoc, fields.size()));
         fields.add(f);
-        bits.add(r.getLiveDocs());
-        bitsStarts.add(docBase);
       }
       docBase += maxDoc;
     }
 
-    bitsStarts.add(docBase);
-
     // we may gather more readers than mergeState.readerCount
     mergeState = new MergeState();
     mergeState.readers = readers;
@@ -524,31 +532,32 @@
 
     for(int i=0;i<mergeState.readerCount;i++) {
 
-      final IndexReader reader = readers.get(i);
+      final MergeState.IndexReaderAndLiveDocs reader = readers.get(i);
 
       mergeState.docBase[i] = docBase;
-      docBase += reader.numDocs();
-      inputDocBase += reader.maxDoc();
-      if (reader.hasDeletions()) {
+      inputDocBase += reader.reader.maxDoc();
+      final int maxDoc = reader.reader.maxDoc();
+      if (reader.liveDocs != null) {
         int delCount = 0;
-        final Bits liveDocs = reader.getLiveDocs();
+        final Bits liveDocs = reader.liveDocs;
         assert liveDocs != null;
-        final int maxDoc = reader.maxDoc();
         final int[] docMap = mergeState.docMaps[i] = new int[maxDoc];
         int newDocID = 0;
         for(int j=0;j<maxDoc;j++) {
           if (!liveDocs.get(j)) {
             docMap[j] = -1;
-            delCount++;  // only for assert
+            delCount++;
           } else {
             docMap[j] = newDocID++;
           }
         }
-        assert delCount == reader.numDeletedDocs(): "reader delCount=" + reader.numDeletedDocs() + " vs recomputed delCount=" + delCount;
+        docBase += maxDoc - delCount;
+      } else {
+        docBase += maxDoc;
       }
 
       if (payloadProcessorProvider != null) {
-        mergeState.dirPayloadProcessor[i] = payloadProcessorProvider.getDirProcessor(reader.directory());
+        mergeState.dirPayloadProcessor[i] = payloadProcessorProvider.getDirProcessor(reader.reader.directory());
       }
     }
     codec = segmentWriteState.segmentCodecs.codec();
@@ -565,22 +574,17 @@
   private void mergePerDoc() throws IOException {
     final List<PerDocValues> perDocProducers = new ArrayList<PerDocValues>();    
     final List<ReaderUtil.Slice> perDocSlices = new ArrayList<ReaderUtil.Slice>();
-    final List<Bits> perDocBits = new ArrayList<Bits>();
-    final List<Integer> perDocBitsStarts = new ArrayList<Integer>();
     int docBase = 0;
-    for (IndexReader r : readers) {
-      final int maxDoc = r.maxDoc();
-      final PerDocValues producer = r.perDocValues();
+    for (MergeState.IndexReaderAndLiveDocs r : readers) {
+      final int maxDoc = r.reader.maxDoc();
+      final PerDocValues producer = r.reader.perDocValues();
       if (producer != null) {
         perDocSlices.add(new ReaderUtil.Slice(docBase, maxDoc, perDocProducers
             .size()));
         perDocProducers.add(producer);
-        perDocBits.add(r.getLiveDocs());
-        perDocBitsStarts.add(docBase);
       }
       docBase += maxDoc;
     }
-    perDocBitsStarts.add(docBase);
     if (!perDocSlices.isEmpty()) {
       final PerDocConsumer docsConsumer = codec
           .docsConsumer(new PerDocWriteState(segmentWriteState));
@@ -616,22 +620,22 @@
             output = directory.createOutput(IndexFileNames.segmentFileName(segment, "", IndexFileNames.NORMS_EXTENSION), context);
             output.writeBytes(SegmentNorms.NORMS_HEADER, SegmentNorms.NORMS_HEADER.length);
           }
-          for (IndexReader reader : readers) {
-            final int maxDoc = reader.maxDoc();
-            byte normBuffer[] = reader.norms(fi.name);
+          for (MergeState.IndexReaderAndLiveDocs reader : readers) {
+            final int maxDoc = reader.reader.maxDoc();
+            byte normBuffer[] = reader.reader.norms(fi.name);
             if (normBuffer == null) {
               // Can be null if this segment doesn't have
               // any docs with this field
               normBuffer = new byte[maxDoc];
               Arrays.fill(normBuffer, (byte)0);
             }
-            if (!reader.hasDeletions()) {
+            if (reader.liveDocs == null) {
               //optimized case for segments without deleted docs
               output.writeBytes(normBuffer, maxDoc);
             } else {
               // this segment has deleted docs, so we have to
               // check for every doc if it is deleted or not
-              final Bits liveDocs = reader.getLiveDocs();
+              final Bits liveDocs = reader.liveDocs;
               for (int k = 0; k < maxDoc; k++) {
                 if (liveDocs.get(k)) {
                   output.writeByte(normBuffer[k]);
diff --git a/lucene/src/java/org/apache/lucene/index/SegmentReader.java b/lucene/src/java/org/apache/lucene/index/SegmentReader.java
index e0af456..89a0ba6 100644
--- a/lucene/src/java/org/apache/lucene/index/SegmentReader.java
+++ b/lucene/src/java/org/apache/lucene/index/SegmentReader.java
@@ -31,7 +31,6 @@
 import org.apache.lucene.document.FieldSelector;
 import org.apache.lucene.index.FieldInfo.IndexOptions;
 import org.apache.lucene.index.codecs.PerDocValues;
-import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
@@ -161,9 +160,6 @@
     // NOTE: the bitvector is stored using the regular directory, not cfs
     if (hasDeletions(si)) {
       liveDocs = new BitVector(directory(), si.getDelFileName(), new IOContext(context, true));
-      if (liveDocs.getVersion() < BitVector.VERSION_DGAPS_CLEARED) {
-        liveDocs.invertAll();
-      }
       liveDocsRef = new AtomicInteger(1);
       assert checkLiveCounts();
       if (liveDocs.size() != si.docCount) {
@@ -637,15 +633,6 @@
     }
   }
 
-  // NOTE: only called from IndexWriter when a near
-  // real-time reader is opened, or applyDeletes is run,
-  // sharing a segment that's still being merged.  This
-  // method is not thread safe, and relies on the
-  // synchronization in IndexWriter
-  void loadTermsIndex(int indexDivisor) throws IOException {
-    core.fields.loadTermsIndex(indexDivisor);
-  }
-
   // for testing only
   boolean normsClosed() {
     if (singleNormStream != null) {
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java
index a01a8b6..c827926 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/BlockTermsReader.java
@@ -165,11 +165,6 @@
   }
   
   @Override
-  public void loadTermsIndex(int indexDivisor) throws IOException {
-    indexReader.loadTermsIndex(indexDivisor);
-  }
-
-  @Override
   public void close() throws IOException {
     try {
       try {
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/DocValuesConsumer.java b/lucene/src/java/org/apache/lucene/index/codecs/DocValuesConsumer.java
index 591399e..a07c42a 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/DocValuesConsumer.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/DocValuesConsumer.java
@@ -103,21 +103,20 @@
     // TODO we need some kind of compatibility notation for values such
     // that two slightly different segments can be merged eg. fixed vs.
     // variable byte len or float32 vs. float64
-    int docBase = 0;
     boolean merged = false;
     /*
      * We ignore the given DocValues here and merge from the subReaders directly
      * to support bulk copies on the DocValues Writer level. if this gets merged
      * with MultiDocValues the writer can not optimize for bulk-copyable data
      */
-    for (final IndexReader reader : mergeState.readers) {
-      final IndexDocValues r = reader.docValues(mergeState.fieldInfo.name);
+    for(int readerIDX=0;readerIDX<mergeState.readers.size();readerIDX++) {
+      final org.apache.lucene.index.codecs.MergeState.IndexReaderAndLiveDocs reader = mergeState.readers.get(readerIDX);
+      final IndexDocValues r = reader.reader.docValues(mergeState.fieldInfo.name);
       if (r != null) {
         merged = true;
-        merge(new Writer.MergeState(r, docBase, reader.maxDoc(),
-                                    reader.getLiveDocs()));
+        merge(new Writer.MergeState(r, mergeState.docBase[readerIDX], reader.reader.maxDoc(),
+                                    reader.liveDocs));
       }
-      docBase += reader.numDocs();
     }
     if (merged) {
       finish(mergeState.mergedDocCount);
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/FieldsProducer.java b/lucene/src/java/org/apache/lucene/index/codecs/FieldsProducer.java
index 8a8e3f5..acb08ca 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/FieldsProducer.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/FieldsProducer.java
@@ -34,7 +34,6 @@
 
 public abstract class FieldsProducer extends Fields implements Closeable {
   public abstract void close() throws IOException;
-  public abstract void loadTermsIndex(int indexDivisor) throws IOException;
 
   public static final FieldsProducer EMPTY = new FieldsProducer() {
     
@@ -47,12 +46,7 @@
     public FieldsEnum iterator() throws IOException {
       return FieldsEnum.EMPTY;
     }
-    
-    @Override
-    public void loadTermsIndex(int indexDivisor) throws IOException {
-      
-    }
-    
+
     @Override
     public void close() throws IOException {
       
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexReader.java b/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexReader.java
index fa2880c..36e82b3 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexReader.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/FixedGapTermsIndexReader.java
@@ -20,7 +20,6 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
-import org.apache.lucene.store.IOContext.Context;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.SegmentInfo;
@@ -31,7 +30,6 @@
 import org.apache.lucene.util.packed.PackedInts;
 
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.Collection;
 import java.util.Comparator;
 import java.io.IOException;
@@ -75,6 +73,8 @@
 
     this.termComp = termComp;
 
+    assert indexDivisor == -1 || indexDivisor > 0;
+
     in = dir.openInput(IndexFileNames.segmentFileName(segment, codecId, FixedGapTermsIndexWriter.TERMS_INDEX_EXTENSION), context);
     
     boolean success = false;
@@ -251,7 +251,7 @@
       }
     }
 
-    public void loadTermsIndex() throws IOException {
+    private void loadTermsIndex() throws IOException {
       if (coreIndex == null) {
         coreIndex = new CoreFieldIndex(indexStart, termsStart, packedIndexStart, packedOffsetsStart, numIndexTerms);
       }
@@ -375,29 +375,6 @@
     }
   }
 
-  // Externally synced in IndexWriter
-  @Override
-  public void loadTermsIndex(int indexDivisor) throws IOException {
-    if (!indexLoaded) {
-
-      if (indexDivisor < 0) {
-        this.indexDivisor = -indexDivisor;
-      } else {
-        this.indexDivisor = indexDivisor;
-      }
-      this.totalIndexInterval = indexInterval * this.indexDivisor;
-
-      Iterator<FieldIndexData> it = fields.values().iterator();
-      while(it.hasNext()) {
-        it.next().loadTermsIndex();
-      }
-
-      indexLoaded = true;
-      in.close();
-      termBytesReader = termBytes.freeze(true);
-    }
-  }
-
   @Override
   public FieldIndexEnum getFieldEnum(FieldInfo fieldInfo) {
     final FieldIndexData fieldData = fields.get(fieldInfo);
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/MergeState.java b/lucene/src/java/org/apache/lucene/index/codecs/MergeState.java
index cfd9577..3f6ac20 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/MergeState.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/MergeState.java
@@ -26,13 +26,25 @@
 import org.apache.lucene.index.PayloadProcessorProvider.DirPayloadProcessor;
 import org.apache.lucene.index.PayloadProcessorProvider.PayloadProcessor;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Bits;
 
 /** Holds common state used during segment merging
  *
  * @lucene.experimental */
 public class MergeState {
+
+  public static class IndexReaderAndLiveDocs {
+    public final IndexReader reader;
+    public final Bits liveDocs;
+
+    public IndexReaderAndLiveDocs(IndexReader reader, Bits liveDocs) {
+      this.reader = reader;
+      this.liveDocs = liveDocs;
+    }
+  }
+
   public FieldInfos fieldInfos;
-  public List<IndexReader> readers;               // Readers being merged
+  public List<IndexReaderAndLiveDocs> readers;    // Readers & liveDocs being merged
   public int readerCount;                         // Number of readers being merged
   public int[][] docMaps;                         // Maps docIDs around deletions
   public int[] docBase;                           // New docID base per reader
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/TermsIndexReaderBase.java b/lucene/src/java/org/apache/lucene/index/codecs/TermsIndexReaderBase.java
index 29dbf23..9123ff3 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/TermsIndexReaderBase.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/TermsIndexReaderBase.java
@@ -43,8 +43,6 @@
 
   public abstract FieldIndexEnum getFieldEnum(FieldInfo fieldInfo);
 
-  public abstract void loadTermsIndex(int indexDivisor) throws IOException;
-
   public abstract void close() throws IOException;
 
   public abstract void getExtensions(Collection<String> extensions);
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/VariableGapTermsIndexReader.java b/lucene/src/java/org/apache/lucene/index/codecs/VariableGapTermsIndexReader.java
index bbc064d..f9d1552 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/VariableGapTermsIndexReader.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/VariableGapTermsIndexReader.java
@@ -23,7 +23,6 @@
 import java.io.Writer;             // for toDot
 import java.util.Collection;
 import java.util.HashMap;
-import java.util.Iterator;
 
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -63,6 +62,7 @@
     in = dir.openInput(IndexFileNames.segmentFileName(segment, codecId, VariableGapTermsIndexWriter.TERMS_INDEX_EXTENSION), new IOContext(context, true));
     this.segment = segment;
     boolean success = false;
+    assert indexDivisor == -1 || indexDivisor > 0;
 
     try {
       
@@ -170,7 +170,7 @@
       }
     }
 
-    public void loadTermsIndex() throws IOException {
+    private void loadTermsIndex() throws IOException {
       if (fst == null) {
         IndexInput clone = (IndexInput) in.clone();
         clone.seek(indexStart);
@@ -205,27 +205,6 @@
     }
   }
 
-  // Externally synced in IndexWriter
-  @Override
-  public void loadTermsIndex(int indexDivisor) throws IOException {
-    if (!indexLoaded) {
-
-      if (indexDivisor < 0) {
-        this.indexDivisor = -indexDivisor;
-      } else {
-        this.indexDivisor = indexDivisor;
-      }
-
-      Iterator<FieldIndexData> it = fields.values().iterator();
-      while(it.hasNext()) {
-        it.next().loadTermsIndex();
-      }
-
-      indexLoaded = true;
-      in.close();
-    }
-  }
-
   @Override
   public FieldIndexEnum getFieldEnum(FieldInfo fieldInfo) {
     final FieldIndexData fieldData = fields.get(fieldInfo);
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/memory/MemoryCodec.java b/lucene/src/java/org/apache/lucene/index/codecs/memory/MemoryCodec.java
index 459229c..273258f 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/memory/MemoryCodec.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/memory/MemoryCodec.java
@@ -767,11 +767,6 @@
       }
       
       @Override
-      public void loadTermsIndex(int indexDivisor) {
-        // no op
-      }
-
-      @Override
       public void close() {
         // Drop ref to FST:
         for(TermsReader termsReader : fields.values()) {
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java b/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java
index e9fc45a..d9e5133 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java
@@ -171,30 +171,6 @@
   }
 
   @Override
-  synchronized public void loadTermsIndex(int indexDivisor) throws IOException {
-    if (tis == null) {
-      Directory dir0;
-      if (si.getUseCompoundFile()) {
-        // In some cases, we were originally opened when CFS
-        // was not used, but then we are asked to open the
-        // terms reader with index, the segment has switched
-        // to CFS
-
-        if (!(dir instanceof CompoundFileDirectory)) {
-          dir0 = cfsReader = dir.openCompoundInput(IndexFileNames.segmentFileName(si.name, "", IndexFileNames.COMPOUND_FILE_EXTENSION), context);
-        } else {
-          dir0 = dir;
-        }
-        dir0 = cfsReader;
-      } else {
-        dir0 = dir;
-      }
-
-      tis = new TermInfosReader(dir0, si.name, fieldInfos, context, indexDivisor);
-    }
-  }
-
-  @Override
   public void close() throws IOException {
     if (tis != null) {
       tis.close();
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextFieldsReader.java b/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextFieldsReader.java
index b90a778..36edb49 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextFieldsReader.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextFieldsReader.java
@@ -593,10 +593,6 @@
   }
 
   @Override
-  public void loadTermsIndex(int indexDivisor) {
-  }
-
-  @Override
   public void close() throws IOException {
     in.close();
   }
diff --git a/lucene/src/java/org/apache/lucene/search/BooleanQuery.java b/lucene/src/java/org/apache/lucene/search/BooleanQuery.java
index be670eb..ef18ff4 100644
--- a/lucene/src/java/org/apache/lucene/search/BooleanQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/BooleanQuery.java
@@ -18,10 +18,14 @@
  */
 
 import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.util.ToStringUtils;
 import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.ConjunctionTermScorer.DocsAndFreqs;
+import org.apache.lucene.search.Similarity.ExactDocScorer;
+import org.apache.lucene.search.TermQuery.TermWeight;
 
 import java.io.IOException;
 import java.util.*;
@@ -166,17 +170,24 @@
     protected ArrayList<Weight> weights;
     protected int maxCoord;  // num optional + num required
     private final boolean disableCoord;
+    private final boolean termConjunction;
 
     public BooleanWeight(IndexSearcher searcher, boolean disableCoord)
       throws IOException {
       this.similarityProvider = searcher.getSimilarityProvider();
       this.disableCoord = disableCoord;
       weights = new ArrayList<Weight>(clauses.size());
+      boolean termConjunction = clauses.isEmpty() || minNrShouldMatch != 0 ? false : true;
       for (int i = 0 ; i < clauses.size(); i++) {
         BooleanClause c = clauses.get(i);
-        weights.add(c.getQuery().createWeight(searcher));
+        Weight w = c.getQuery().createWeight(searcher);
+        if (!(c.isRequired() && (w instanceof TermWeight))) {
+          termConjunction = false;
+        }
+        weights.add(w);
         if (!c.isProhibited()) maxCoord++;
       }
+      this.termConjunction = termConjunction;
     }
 
     @Override
@@ -290,6 +301,10 @@
     @Override
     public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext)
         throws IOException {
+      if (termConjunction) {
+        // specialized scorer for term conjunctions
+        return createConjunctionTermScorer(context, scorerContext);
+      }
       List<Scorer> required = new ArrayList<Scorer>();
       List<Scorer> prohibited = new ArrayList<Scorer>();
       List<Scorer> optional = new ArrayList<Scorer>();
@@ -328,6 +343,28 @@
       // Return a BooleanScorer2
       return new BooleanScorer2(this, disableCoord, minNrShouldMatch, required, prohibited, optional, maxCoord, scorerContext.needsPositions);
     }
+
+    private Scorer createConjunctionTermScorer(AtomicReaderContext context, ScorerContext scorerContext)
+        throws IOException {
+      final DocsAndFreqs[] docsAndFreqs = new DocsAndFreqs[weights.size()];
+      for (int i = 0; i < docsAndFreqs.length; i++) {
+        final TermWeight weight = (TermWeight) weights.get(i);
+        final TermsEnum termsEnum = weight.getTermsEnum(context);
+        if (termsEnum == null) {
+          return null;
+        }
+        final ExactDocScorer docScorer = weight.createDocScorer(context);
+        if (scorerContext.needsPositions) {
+          docsAndFreqs[i] = new DocsAndFreqs(termsEnum.docs(
+              context.reader.getLiveDocs(), null), termsEnum.docFreq(), docScorer, weight.createDocsAndPosEnumFactory(context, scorerContext));
+        } else {
+          docsAndFreqs[i] = new DocsAndFreqs(termsEnum.docs(
+            context.reader.getLiveDocs(), null), termsEnum.docFreq(), docScorer, null);
+        }
+      }
+      return new ConjunctionTermScorer(this, disableCoord ? 1.0f : coord(
+          docsAndFreqs.length, docsAndFreqs.length), docsAndFreqs, scorerContext.needsPositions);
+    }
     
     @Override
     public boolean scoresDocsOutOfOrder() {
diff --git a/lucene/src/java/org/apache/lucene/search/ConjunctionTermScorer.java b/lucene/src/java/org/apache/lucene/search/ConjunctionTermScorer.java
new file mode 100644
index 0000000..828b73e
--- /dev/null
+++ b/lucene/src/java/org/apache/lucene/search/ConjunctionTermScorer.java
@@ -0,0 +1,136 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.search.Similarity.ExactDocScorer;
+import org.apache.lucene.search.TermQuery.DocsAndPositionsEnumFactory;
+import org.apache.lucene.search.positions.ConjunctionPositionIterator;
+import org.apache.lucene.search.positions.PositionIntervalIterator;
+import org.apache.lucene.util.ArrayUtil;
+import java.io.IOException;
+import java.util.Comparator;
+
+/** Scorer for conjunctions, sets of terms, all of which are required. */
+final class ConjunctionTermScorer extends Scorer {
+  private final float coord;
+  private int lastDoc = -1;
+  private final DocsAndFreqs[] docsAndFreqs;
+  private final DocsAndFreqs lead;
+  private final DocsAndFreqs[] orderedDocsAndFreqs;
+
+  ConjunctionTermScorer(Weight weight, float coord,
+      DocsAndFreqs[] docsAndFreqs, boolean needsPositions) throws IOException {
+    super(weight);
+    this.coord = coord;
+    this.docsAndFreqs = docsAndFreqs;
+    if (needsPositions) {
+      orderedDocsAndFreqs = new DocsAndFreqs[docsAndFreqs.length];
+      System.arraycopy(docsAndFreqs, 0, orderedDocsAndFreqs, 0, orderedDocsAndFreqs.length);
+    } else {
+      orderedDocsAndFreqs = null;
+    }
+    // Sort the array the first time to allow the least frequent DocsEnum to
+    // lead the matching.
+    ArrayUtil.mergeSort(docsAndFreqs, new Comparator<DocsAndFreqs>() {
+      public int compare(DocsAndFreqs o1, DocsAndFreqs o2) {
+        return o1.freq - o2.freq;
+      }
+    });
+
+    lead = docsAndFreqs[0]; // least frequent DocsEnum leads the intersection
+  }
+
+  private int doNext(int doc) throws IOException {
+    do {
+      if (lead.doc == DocsEnum.NO_MORE_DOCS) {
+        return NO_MORE_DOCS;
+      }
+      advanceHead: do {
+        for (int i = 1; i < docsAndFreqs.length; i++) {
+          if (docsAndFreqs[i].doc < doc) {
+            docsAndFreqs[i].doc = docsAndFreqs[i].docs.advance(doc);
+          }
+          if (docsAndFreqs[i].doc > doc) {
+            // DocsEnum beyond the current doc - break and advance lead
+            break advanceHead;
+          }
+        }
+        // success - all DocsEnums are on the same doc
+        return doc;
+      } while (true);
+      // advance head for next iteration
+      doc = lead.doc = lead.docs.nextDoc();  
+    } while (true);
+  }
+
+  @Override
+  public int advance(int target) throws IOException {
+    lead.doc = lead.docs.advance(target);
+    return lastDoc = doNext(lead.doc);
+  }
+
+  @Override
+  public int docID() {
+    return lastDoc;
+  }
+
+  @Override
+  public int nextDoc() throws IOException {
+    lead.doc = lead.docs.nextDoc();
+    return lastDoc = doNext(lead.doc);
+  }
+
+  @Override
+  public float score() throws IOException {
+    float sum = 0.0f;
+    for (DocsAndFreqs docs : docsAndFreqs) {
+      sum += docs.docScorer.score(lastDoc, docs.docs.freq());
+    }
+    return sum * coord;
+  }
+
+  @Override
+  public PositionIntervalIterator positions() throws IOException {
+    if (orderedDocsAndFreqs == null) {
+      throw new IllegalStateException("no positions requested for this scorer");
+    }
+    PositionIntervalIterator[] iterators = new PositionIntervalIterator[orderedDocsAndFreqs.length];
+    for (int i = 0; i < iterators.length; i++) {
+      DocsAndPositionsEnumFactory factory = orderedDocsAndFreqs[i].factory;
+      iterators[i] = new TermScorer.TermPositions(this, factory.create(), factory.doPayloads);
+    }
+      // only created if needed for this scorer - no penalty for non-positional queries
+    return new ConjunctionPositionIterator(this, iterators);
+  }
+
+  static final class DocsAndFreqs {
+    final DocsEnum docs;
+    final int freq;
+    final ExactDocScorer docScorer;
+    int doc = -1;
+    final DocsAndPositionsEnumFactory factory;
+
+    DocsAndFreqs(DocsEnum docs, int freq, ExactDocScorer docScorer, TermQuery.DocsAndPositionsEnumFactory factory) {
+      this.docs = docs;
+      this.freq = freq;
+      this.docScorer = docScorer;
+      this.factory = factory;
+    }
+  }
+}
diff --git a/lucene/src/java/org/apache/lucene/search/TermQuery.java b/lucene/src/java/org/apache/lucene/search/TermQuery.java
index 6c990b0..755448e 100644
--- a/lucene/src/java/org/apache/lucene/search/TermQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/TermQuery.java
@@ -25,10 +25,12 @@
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader.ReaderContext;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.Similarity.ExactDocScorer;
+import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.TermContext;
 import org.apache.lucene.util.ReaderUtil;
@@ -42,7 +44,7 @@
   private int docFreq;
   private transient TermContext perReaderTermState;
 
-  private class TermWeight extends Weight {
+  final class TermWeight extends Weight {
     private final Similarity similarity;
     private final Similarity.Stats stats;
     private transient TermContext termStates;
@@ -73,30 +75,48 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
-      final String field = term.field();
-      final IndexReader reader = context.reader;
       assert termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termStates.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
-      final TermState state = termStates.get(context.ord);
-      if (state == null) { // term is not present in that reader
-        assert termNotInReader(reader, field, term.bytes()) : "no termstate found but term exists in reader";
+      final TermsEnum termsEnum = getTermsEnum(context);
+      if (termsEnum == null) {
         return null;
       }
-      final DocsEnum docs = reader.termDocsEnum(reader.getLiveDocs(), field,
-          term.bytes(), state);
+      // TODO should we reuse the DocsEnum here? 
+      final DocsEnum docs = termsEnum.docs(context.reader.getLiveDocs(), null);
+      assert docs != null;
       if (scorerContext.needsPositions) {
-        final DocsAndPositionsEnum docsAndPos = reader.termPositionsEnum(
-            reader.getLiveDocs(), field, term.bytes(), state);
         assert docs != null;
-        if (docsAndPos == null) {
-          throw new IllegalStateException("field \"" + term.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run Query (term=" + term.text() + ")");
-        }
-        return new TermScorer(this, docs, new DocsAndPositionsEnumFactory(reader, term, state, scorerContext.needsPayloads), similarity.exactDocScorer(
-            stats, field, context));
+        return new TermScorer(this, docs, createDocsAndPosEnumFactory(context, scorerContext),createDocScorer(context));
       } else {
         assert docs != null;
-        return new TermScorer(this, docs, similarity.exactDocScorer(stats,
-            field, context));
+        return new TermScorer(this, docs, createDocScorer(context));
       }
+      
+    }
+    
+    /**
+     * Creates an {@link ExactDocScorer} for this {@link TermWeight}*/
+    ExactDocScorer createDocScorer(AtomicReaderContext context)
+        throws IOException {
+      return similarity.exactDocScorer(stats, term.field(), context);
+    }
+    
+    DocsAndPositionsEnumFactory createDocsAndPosEnumFactory(AtomicReaderContext context, ScorerContext scorerContext) {
+      return new DocsAndPositionsEnumFactory(context.reader, term, termStates.get(context.ord), scorerContext.needsPayloads);
+    }
+    /**
+     * Returns a {@link TermsEnum} positioned at this weights Term or null if
+     * the term does not exist in the given context
+     */
+    TermsEnum getTermsEnum(AtomicReaderContext context) throws IOException {
+      final TermState state = termStates.get(context.ord);
+      if (state == null) { // term is not present in that reader
+        assert termNotInReader(context.reader, term.field(), term.bytes()) : "no termstate found but term exists in reader";
+        return null;
+      }
+      final TermsEnum termsEnum = context.reader.terms(term.field())
+          .getThreadTermsEnum();
+      termsEnum.seekExact(term.bytes(), state);
+      return termsEnum;
     }
     
     private boolean termNotInReader(IndexReader reader, String field, BytesRef bytes) throws IOException {
@@ -223,11 +243,8 @@
     }
 
     DocsAndPositionsEnum create() throws IOException {
-      DocsAndPositionsEnum termPositionsEnum = reader.termPositionsEnum(reader.getLiveDocs(), term.field(),
+      return reader.termPositionsEnum(reader.getLiveDocs(), term.field(),
           term.bytes(), state);
-      return termPositionsEnum;
-//      return reader.termPositionsEnum(reader.getLiveDocs(), term.field(),
-//          term.bytes(), state);
     }
   }
 }
diff --git a/lucene/src/java/org/apache/lucene/search/TermScorer.java b/lucene/src/java/org/apache/lucene/search/TermScorer.java
index 981e1b4..bf55e11 100644
--- a/lucene/src/java/org/apache/lucene/search/TermScorer.java
+++ b/lucene/src/java/org/apache/lucene/search/TermScorer.java
@@ -174,17 +174,17 @@
   @Override
   public PositionIntervalIterator positions() throws IOException {
     assert docsAndPosFactory != null;
-    return new TermPositions(docsAndPosFactory.create(), docsAndPosFactory.doPayloads);
+    return new TermPositions(this, docsAndPosFactory.create(), docsAndPosFactory.doPayloads);
   }
 
-  private final class TermPositions extends PositionIntervalIterator {
+ static final class TermPositions extends PositionIntervalIterator {
     private final PositionInterval interval;
     int positionsPending;
     private final DocsAndPositionsEnum docsAndPos;
     private int docID = -1;
 
-    public TermPositions(DocsAndPositionsEnum docsAndPos, boolean doPayloads) {
-      super(TermScorer.this);
+    public TermPositions(Scorer scorer, DocsAndPositionsEnum docsAndPos, boolean doPayloads) {
+      super(scorer);
       this.docsAndPos = docsAndPos;
       this.interval = doPayloads ? new PayloadPosInterval(docsAndPos, this)
           : new PositionInterval();
@@ -219,7 +219,7 @@
     public int advanceTo(int docId) throws IOException {
       int advance = docsAndPos.advance(docId);
       if (advance != NO_MORE_DOCS) {
-        positionsPending = freq = docsAndPos.freq();
+        positionsPending = docsAndPos.freq();
       }
       interval.reset();
       return docID = docsAndPos.docID();
diff --git a/lucene/src/java/org/apache/lucene/search/TopScoreDocCollector.java b/lucene/src/java/org/apache/lucene/search/TopScoreDocCollector.java
index d8f3175..c542e18 100644
--- a/lucene/src/java/org/apache/lucene/search/TopScoreDocCollector.java
+++ b/lucene/src/java/org/apache/lucene/search/TopScoreDocCollector.java
@@ -82,8 +82,13 @@
       assert !Float.isNaN(score);
 
       totalHits++;
+      if (score < pqTop.score) {
+        // Doesn't compete w/ bottom entry in queue
+        return;
+      }
       doc += docBase;
-      if (score < pqTop.score || (score == pqTop.score && doc > pqTop.doc)) {
+      if (score == pqTop.score && doc > pqTop.doc) {
+        // Break tie in score by doc ID:
         return;
       }
       pqTop.doc = doc;
diff --git a/lucene/src/java/org/apache/lucene/search/positions/BooleanPositionIterator.java b/lucene/src/java/org/apache/lucene/search/positions/BooleanPositionIterator.java
index 0968cfc..d27f7e8 100644
--- a/lucene/src/java/org/apache/lucene/search/positions/BooleanPositionIterator.java
+++ b/lucene/src/java/org/apache/lucene/search/positions/BooleanPositionIterator.java
@@ -39,6 +39,13 @@
       iterators[i] = subScorers[i].positions();
     }
   }
+  
+  public BooleanPositionIterator(Scorer scorer, PositionIntervalIterator[] iterators,
+      IntervalQueue queue) throws IOException {
+    super(scorer);
+    this.queue = queue;
+    this.iterators = iterators;
+  }
 
   @Override
   public PositionIntervalIterator[] subs(boolean inOrder) {
diff --git a/lucene/src/java/org/apache/lucene/search/positions/ConjunctionPositionIterator.java b/lucene/src/java/org/apache/lucene/search/positions/ConjunctionPositionIterator.java
index 399225d..aad60b3 100644
--- a/lucene/src/java/org/apache/lucene/search/positions/ConjunctionPositionIterator.java
+++ b/lucene/src/java/org/apache/lucene/search/positions/ConjunctionPositionIterator.java
@@ -37,6 +37,13 @@
     this (scorer, subScorers, subScorers.length);
   }
   
+  public ConjunctionPositionIterator(Scorer scorer, PositionIntervalIterator... iterators) throws IOException {
+    super(scorer, iterators, new IntervalQueueAnd(iterators.length));
+    queue = (IntervalQueueAnd) super.queue; // avoid lots of casts?
+    this.nrMustMatch = iterators.length;
+
+  }
+  
   public ConjunctionPositionIterator(Scorer scorer, Scorer[] subScorers, int nrMustMatch) throws IOException {
     super(scorer, subScorers, new IntervalQueueAnd(subScorers.length));
     queue = (IntervalQueueAnd) super.queue; // avoid lots of casts?
diff --git a/lucene/src/java/org/apache/lucene/util/BitVector.java b/lucene/src/java/org/apache/lucene/util/BitVector.java
index ba82cbc..8ac0a1f 100644
--- a/lucene/src/java/org/apache/lucene/util/BitVector.java
+++ b/lucene/src/java/org/apache/lucene/util/BitVector.java
@@ -353,6 +353,11 @@
       } else {
         readBits(input);
       }
+
+      if (version < VERSION_DGAPS_CLEARED) {
+        invertAll();
+      }
+
       assert verifyCount();
     } finally {
       input.close();
diff --git a/lucene/src/test-framework/org/apache/lucene/store/MockDirectoryWrapper.java b/lucene/src/test-framework/org/apache/lucene/store/MockDirectoryWrapper.java
index 3a05b3f..4064b46 100644
--- a/lucene/src/test-framework/org/apache/lucene/store/MockDirectoryWrapper.java
+++ b/lucene/src/test-framework/org/apache/lucene/store/MockDirectoryWrapper.java
@@ -272,14 +272,18 @@
   }
 
   void maybeThrowIOException() throws IOException {
+    maybeThrowIOException(null);
+  }
+
+  void maybeThrowIOException(String message) throws IOException {
     if (randomIOExceptionRate > 0.0) {
       int number = Math.abs(randomState.nextInt() % 1000);
       if (number < randomIOExceptionRate*1000) {
         if (LuceneTestCase.VERBOSE) {
-          System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception");
+          System.out.println(Thread.currentThread().getName() + ": MockDirectoryWrapper: now throw random exception" + (message == null ? "" : " (" + message + ")"));
           new Throwable().printStackTrace(System.out);
         }
-        throw new IOException("a random IOException");
+        throw new IOException("a random IOException" + (message == null ? "" : "(" + message + ")"));
       }
     }
   }
diff --git a/lucene/src/test-framework/org/apache/lucene/store/MockIndexInputWrapper.java b/lucene/src/test-framework/org/apache/lucene/store/MockIndexInputWrapper.java
index 66e972d..f04e4e3 100644
--- a/lucene/src/test-framework/org/apache/lucene/store/MockIndexInputWrapper.java
+++ b/lucene/src/test-framework/org/apache/lucene/store/MockIndexInputWrapper.java
@@ -147,4 +147,10 @@
   public long readVLong() throws IOException {
     return delegate.readVLong();
   }
+
+  @Override
+  public String toString() {
+    return "MockIndexInputWrapper(" + delegate + ")";
+  }
 }
+
diff --git a/lucene/src/test-framework/org/apache/lucene/store/MockIndexOutputWrapper.java b/lucene/src/test-framework/org/apache/lucene/store/MockIndexOutputWrapper.java
index 0f93567..83f29b7 100644
--- a/lucene/src/test-framework/org/apache/lucene/store/MockIndexOutputWrapper.java
+++ b/lucene/src/test-framework/org/apache/lucene/store/MockIndexOutputWrapper.java
@@ -126,7 +126,7 @@
       // Maybe throw random exception; only do this on first
       // write to a new file:
       first = false;
-      dir.maybeThrowIOException();
+      dir.maybeThrowIOException(name);
     }
   }
 
@@ -156,4 +156,9 @@
     // TODO: we may need to check disk full here as well
     dir.maybeThrowDeterministicException();
   }
+
+  @Override
+  public String toString() {
+    return "MockIndexOutputWrapper(" + delegate + ")";
+  }
 }
diff --git a/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java b/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
index f73e69e..59effca 100644
--- a/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
@@ -754,7 +754,7 @@
    * is active and {@link #RANDOM_MULTIPLIER}, but also with some random fudge.
    */
   public static int atLeast(Random random, int i) {
-    int min = (TEST_NIGHTLY ? 5*i : i) * RANDOM_MULTIPLIER;
+    int min = (TEST_NIGHTLY ? 3*i : i) * RANDOM_MULTIPLIER;
     int max = min+(min/2);
     return _TestUtil.nextInt(random, min, max);
   }
@@ -770,9 +770,9 @@
    * is active and {@link #RANDOM_MULTIPLIER}.
    */
   public static boolean rarely(Random random) {
-    int p = TEST_NIGHTLY ? 25 : 5;
+    int p = TEST_NIGHTLY ? 10 : 5;
     p += (p * Math.log(RANDOM_MULTIPLIER));
-    int min = 100 - Math.min(p, 90); // never more than 90
+    int min = 100 - Math.min(p, 50); // never more than 50
     return random.nextInt(100) >= min;
   }
   
diff --git a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
index b3cebe0..de6b730 100644
--- a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
+++ b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
@@ -92,10 +92,6 @@
       @Override
       public void close() {
       }
-
-      @Override
-      public void loadTermsIndex(int indexDivisor) {
-      }
     } 
 
     static class RAMField extends Terms {
diff --git a/lucene/src/test/org/apache/lucene/index/Test2BPostings.java b/lucene/src/test/org/apache/lucene/index/Test2BPostings.java
index 371060a..006cbb4 100644
--- a/lucene/src/test/org/apache/lucene/index/Test2BPostings.java
+++ b/lucene/src/test/org/apache/lucene/index/Test2BPostings.java
@@ -70,14 +70,16 @@
     final int numDocs = (Integer.MAX_VALUE / 26) + 1;
     for (int i = 0; i < numDocs; i++) {
       w.addDocument(doc);
-      if (i % 100000 == 0) {
+      if (VERBOSE && i % 100000 == 0) {
         System.out.println(i + " of " + numDocs + "...");
       }
     }
     w.optimize();
     w.close();
     CheckIndex ci = new CheckIndex(dir);
-    ci.setInfoStream(System.out);
+    if (VERBOSE) {
+      ci.setInfoStream(System.out);
+    }
     ci.checkIndex();
     dir.close();
   }
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
index 0579d6c..087abc6 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
@@ -551,7 +551,9 @@
         if (!success) {
           // Must force the close else the writer can have
           // open files which cause exc in MockRAMDir.close
-         
+          if (VERBOSE) {
+            System.out.println("TEST: now rollback");
+          }
           modifier.rollback();
         }
 
diff --git a/lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java b/lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java
index 6a557f6..1b0386b 100644
--- a/lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java
+++ b/lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java
@@ -90,6 +90,7 @@
     for (int r = 0; r < 3; r++) {
       final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
           TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+      w.setInfoStream(VERBOSE ? System.out : null);
       final int numUpdates = atLeast(20);
       int numThreads = _TestUtil.nextInt(random, 2, 6);
       IndexingThread[] threads = new IndexingThread[numThreads];
diff --git a/lucene/src/test/org/apache/lucene/index/TestStressAdvance.java b/lucene/src/test/org/apache/lucene/index/TestStressAdvance.java
index ceb6cee..33710bf 100644
--- a/lucene/src/test/org/apache/lucene/index/TestStressAdvance.java
+++ b/lucene/src/test/org/apache/lucene/index/TestStressAdvance.java
@@ -40,7 +40,7 @@
       doc.add(f);
       final Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
       doc.add(idField);
-      int num = atLeast(5000);
+      int num = atLeast(4097);
       for(int id=0;id<num;id++) {
         if (random.nextInt(4) == 3) {
           f.setValue("a");
diff --git a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
index 40e49fc..b1ab16d 100644
--- a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
+++ b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
@@ -56,7 +56,7 @@
     directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random, directory,
         newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
-        .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))
+        .setMaxBufferedDocs(_TestUtil.nextInt(random, 100, 1000))
         .setMergePolicy(newLogMergePolicy()));
     
     NumericField
@@ -337,7 +337,7 @@
   private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception {
     String field="field"+precisionStep;
     int termCountT=0,termCountC=0;
-    int num = atLeast(10);
+    int num = _TestUtil.nextInt(random, 10, 20);
     for (int i = 0; i < num; i++) {
       int lower=(int)(random.nextDouble()*noDocs*distance)+startOffset;
       int upper=(int)(random.nextDouble()*noDocs*distance)+startOffset;
@@ -415,7 +415,7 @@
   private void testRangeSplit(int precisionStep) throws Exception {
     String field="ascfield"+precisionStep;
     // 10 random tests
-    int  num = atLeast(10);
+    int num = _TestUtil.nextInt(random, 10, 20);
     for (int  i =0;  i< num; i++) {
       int lower=(int)(random.nextDouble()*noDocs - noDocs/2);
       int upper=(int)(random.nextDouble()*noDocs - noDocs/2);
@@ -491,7 +491,7 @@
     String field="field"+precisionStep;
     // 10 random tests, the index order is ascending,
     // so using a reverse sort field should retun descending documents
-    int num = atLeast(10);
+    int num = _TestUtil.nextInt(random, 10, 20);
     for (int i = 0; i < num; i++) {
       int lower=(int)(random.nextDouble()*noDocs*distance)+startOffset;
       int upper=(int)(random.nextDouble()*noDocs*distance)+startOffset;
diff --git a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
index d655171..6171f2a 100644
--- a/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
+++ b/lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
@@ -53,7 +53,7 @@
     directory = newDirectory();
     RandomIndexWriter writer = new RandomIndexWriter(random, directory,
         newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
-        .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))
+        .setMaxBufferedDocs(_TestUtil.nextInt(random, 100, 1000))
         .setMergePolicy(newLogMergePolicy()));
     
     NumericField
@@ -354,7 +354,7 @@
   private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception {
     String field="field"+precisionStep;
     int termCountT=0,termCountC=0;
-    int num = atLeast(10);
+    int num = _TestUtil.nextInt(random, 10, 20);
     for (int i = 0; i < num; i++) {
       long lower=(long)(random.nextDouble()*noDocs*distance)+startOffset;
       long upper=(long)(random.nextDouble()*noDocs*distance)+startOffset;
@@ -437,7 +437,7 @@
   private void testRangeSplit(int precisionStep) throws Exception {
     String field="ascfield"+precisionStep;
     // 10 random tests
-    int num = atLeast(10);
+    int num = _TestUtil.nextInt(random, 10, 20);
     for (int i = 0; i < num; i++) {
       long lower=(long)(random.nextDouble()*noDocs - noDocs/2);
       long upper=(long)(random.nextDouble()*noDocs - noDocs/2);
@@ -523,7 +523,7 @@
     String field="field"+precisionStep;
     // 10 random tests, the index order is ascending,
     // so using a reverse sort field should retun descending documents
-    int num = atLeast(10);
+    int num = _TestUtil.nextInt(random, 10, 20);
     for (int i = 0; i < num; i++) {
       long lower=(long)(random.nextDouble()*noDocs*distance)+startOffset;
       long upper=(long)(random.nextDouble()*noDocs*distance)+startOffset;
diff --git a/lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java b/lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java
index a3eb731..7e7759e 100644
--- a/lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java
+++ b/lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java
@@ -59,7 +59,7 @@
     // we generate aweful prefixes: good for testing.
     // but for preflex codec, the test can be very slow, so use less iterations.
     final String codec = CodecProvider.getDefault().getFieldCodec("field");
-    int num = codec.equals("PreFlex") ? 200 * RANDOM_MULTIPLIER : atLeast(2000);
+    int num = codec.equals("PreFlex") ? 200 * RANDOM_MULTIPLIER : atLeast(1000);
     for (int i = 0; i < num; i++) {
       field.setValue(_TestUtil.randomUnicodeString(random, 10));
       writer.addDocument(doc);
@@ -114,7 +114,7 @@
   
   /** test a bunch of random prefixes */
   public void testPrefixes() throws Exception {
-      int num = atLeast(1000);
+      int num = atLeast(100);
       for (int i = 0; i < num; i++)
         assertSame(_TestUtil.randomUnicodeString(random, 5));
   }
diff --git a/lucene/src/test/org/apache/lucene/util/fst/TestFSTs.java b/lucene/src/test/org/apache/lucene/util/fst/TestFSTs.java
index 3efb42b..c31d4dd 100644
--- a/lucene/src/test/org/apache/lucene/util/fst/TestFSTs.java
+++ b/lucene/src/test/org/apache/lucene/util/fst/TestFSTs.java
@@ -830,7 +830,7 @@
         final IntsRef prefix = ent.getKey();
         final CountMinOutput<T> cmo = ent.getValue();
         if (VERBOSE) {
-          System.out.println("  term=" + inputToString(inputMode, prefix) + " count=" + cmo.count + " isLeaf=" + cmo.isLeaf + " output=" + outputs.outputToString(cmo.output) + " isFinal=" + cmo.isFinal);
+          System.out.println("  term prefix=" + inputToString(inputMode, prefix, false) + " count=" + cmo.count + " isLeaf=" + cmo.isLeaf + " output=" + outputs.outputToString(cmo.output) + " isFinal=" + cmo.isFinal);
         }
         final boolean keep;
         if (prune1 > 0) {
@@ -897,7 +897,7 @@
       IntsRefFSTEnum.InputOutput<T> current;
       while((current = fstEnum.next()) != null) {
         if (VERBOSE) {
-          System.out.println("  fstEnum.next term=" + inputToString(inputMode, current.input) + " output=" + outputs.outputToString(current.output));
+          System.out.println("  fstEnum.next prefix=" + inputToString(inputMode, current.input, false) + " output=" + outputs.outputToString(current.output));
         }
         final CountMinOutput cmo = prefixes.get(current.input);
         assertNotNull(cmo);
@@ -920,7 +920,7 @@
           final CountMinOutput<T> cmo = ent.getValue();
           final T output = run(fst, ent.getKey(), stopNode);
           if (VERBOSE) {
-            System.out.println("TEST: verify term=" + inputToString(inputMode, ent.getKey()) + " output=" + outputs.outputToString(cmo.output));
+            System.out.println("TEST: verify prefix=" + inputToString(inputMode, ent.getKey(), false) + " output=" + outputs.outputToString(cmo.output));
           }
           // if (cmo.isFinal && !cmo.isLeaf) {
           if (cmo.isFinal) {
@@ -980,11 +980,17 @@
 
   @Nightly
   public void testBigSet() throws IOException {
-    testRandomWords(_TestUtil.nextInt(random, 50000, 60000), atLeast(1));
+    testRandomWords(_TestUtil.nextInt(random, 50000, 60000), 1);
+  }
+  
+  private static String inputToString(int inputMode, IntsRef term) {
+    return inputToString(inputMode, term, true);
   }
 
-  private static String inputToString(int inputMode, IntsRef term) {
-    if (inputMode == 0) {
+  private static String inputToString(int inputMode, IntsRef term, boolean isValidUnicode) {
+    if (!isValidUnicode) {
+      return term.toString();
+    } else if (inputMode == 0) {
       // utf8
       return toBytesRef(term).utf8ToString() + " " + term;
     } else {
@@ -1007,7 +1013,7 @@
     final int RUN_TIME_MSEC = atLeast(500);
     final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(-1).setRAMBufferSizeMB(64);
     final File tempDir = _TestUtil.getTempDir("fstlines");
-    final MockDirectoryWrapper dir = new MockDirectoryWrapper(random, FSDirectory.open(tempDir));
+    final MockDirectoryWrapper dir = newFSDirectory(tempDir);
     final IndexWriter writer = new IndexWriter(dir, conf);
     writer.setInfoStream(VERBOSE ? System.out : null);
     final long stopTime = System.currentTimeMillis() + RUN_TIME_MSEC;
@@ -1057,7 +1063,7 @@
         }
         builder.add(term, outputs.get(output));
         ord++;
-        if (ord % 100000 == 0 && LuceneTestCase.TEST_NIGHTLY) {
+        if (VERBOSE && ord % 100000 == 0 && LuceneTestCase.TEST_NIGHTLY) {
           System.out.println(ord + " terms...");
         }
       }
diff --git a/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java b/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
index 3134826..c80d2d4 100644
--- a/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
+++ b/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
@@ -70,8 +70,7 @@
  * <h3>Initial Usage</h3>
  * <p/>
  * This class has lots of options to try to make it efficient and flexible.
- * See the body of {@link #main main()} below in the source for real code, or
- * if you want pseudo code, the simplest possible usage is as follows. The bold
+ * The simplest possible usage is as follows. The bold
  * fragment is specific to this class.
  * <p/>
  * <pre class="prettyprint">
@@ -574,45 +573,12 @@
   }
 
   /**
-   * Return a query that will return docs like the passed file.
-   *
-   * @return a query that will return docs like the passed file.
-   */
-  public Query like(File f) throws IOException {
-    if (fieldNames == null) {
-      // gather list of valid fields from lucene
-      Collection<String> fields = ir.getFieldNames(IndexReader.FieldOption.INDEXED);
-      fieldNames = fields.toArray(new String[fields.size()]);
-    }
-
-    return like(new FileReader(f));
-  }
-
-  /**
-   * Return a query that will return docs like the passed URL.
-   *
-   * @return a query that will return docs like the passed URL.
-   */
-  public Query like(URL u) throws IOException {
-    return like(new InputStreamReader(u.openConnection().getInputStream()));
-  }
-
-  /**
-   * Return a query that will return docs like the passed stream.
-   *
-   * @return a query that will return docs like the passed stream.
-   */
-  public Query like(java.io.InputStream is) throws IOException {
-    return like(new InputStreamReader(is));
-  }
-
-  /**
    * Return a query that will return docs like the passed Reader.
    *
    * @return a query that will return docs like the passed Reader.
    */
-  public Query like(Reader r) throws IOException {
-    return createQuery(retrieveTerms(r));
+  public Query like(Reader r, String fieldName) throws IOException {
+    return createQuery(retrieveTerms(r, fieldName));
   }
 
   /**
@@ -727,65 +693,6 @@
   }
 
   /**
-   * Test driver.
-   * Pass in "-i INDEX" and then either "-fn FILE" or "-url URL".
-   */
-  public static void main(String[] a) throws Throwable {
-    String indexName = "localhost_index";
-    String fn = "c:/Program Files/Apache Group/Apache/htdocs/manual/vhosts/index.html.en";
-    URL url = null;
-    for (int i = 0; i < a.length; i++) {
-      if (a[i].equals("-i")) {
-        indexName = a[++i];
-      } else if (a[i].equals("-f")) {
-        fn = a[++i];
-      } else if (a[i].equals("-url")) {
-        url = new URL(a[++i]);
-      }
-    }
-
-    PrintStream o = System.out;
-    FSDirectory dir = FSDirectory.open(new File(indexName));
-    IndexReader r = IndexReader.open(dir, true);
-    o.println("Open index " + indexName + " which has " + r.numDocs() + " docs");
-
-    MoreLikeThis mlt = new MoreLikeThis(r);
-
-    o.println("Query generation parameters:");
-    o.println(mlt.describeParams());
-    o.println();
-
-    Query query = null;
-    if (url != null) {
-      o.println("Parsing URL: " + url);
-      query = mlt.like(url);
-    } else if (fn != null) {
-      o.println("Parsing file: " + fn);
-      query = mlt.like(new File(fn));
-    }
-
-    o.println("q: " + query);
-    o.println();
-    IndexSearcher searcher = new IndexSearcher(dir, true);
-
-    TopDocs hits = searcher.search(query, null, 25);
-    int len = hits.totalHits;
-    o.println("found: " + len + " documents matching");
-    o.println();
-    ScoreDoc[] scoreDocs = hits.scoreDocs;
-    for (int i = 0; i < Math.min(25, len); i++) {
-      Document d = searcher.doc(scoreDocs[i].doc);
-      String summary = d.get("summary");
-      o.println("score  : " + scoreDocs[i].score);
-      o.println("url    : " + d.get("url"));
-      o.println("\ttitle  : " + d.get("title"));
-      if (summary != null)
-        o.println("\tsummary: " + d.get("summary"));
-      o.println();
-    }
-  }
-
-  /**
    * Find words for a more-like-this query former.
    *
    * @param docNum the id of the lucene document from which to find terms
@@ -918,19 +825,18 @@
    * For an easier method to call see {@link #retrieveInterestingTerms retrieveInterestingTerms()}.
    *
    * @param r the reader that has the content of the document
+   * @param fieldName field passed to the analyzer to use when analyzing the content
    * @return the most interesting words in the document ordered by score, with the highest scoring, or best entry, first
    * @see #retrieveInterestingTerms
    */
-  public PriorityQueue<Object[]> retrieveTerms(Reader r) throws IOException {
+  public PriorityQueue<Object[]> retrieveTerms(Reader r, String fieldName) throws IOException {
     Map<String, Int> words = new HashMap<String, Int>();
-    for (String fieldName : fieldNames) {
-      addTermFrequencies(r, words, fieldName);
-    }
+    addTermFrequencies(r, words, fieldName);
     return createQueue(words);
   }
 
   /**
-   * @see #retrieveInterestingTerms(java.io.Reader)
+   * @see #retrieveInterestingTerms(java.io.Reader, String)
    */
   public String[] retrieveInterestingTerms(int docNum) throws IOException {
     ArrayList<Object> al = new ArrayList<Object>(maxQueryTerms);
@@ -948,16 +854,17 @@
 
   /**
    * Convenience routine to make it easy to return the most interesting words in a document.
-   * More advanced users will call {@link #retrieveTerms(java.io.Reader) retrieveTerms()} directly.
+   * More advanced users will call {@link #retrieveTerms(Reader, String) retrieveTerms()} directly.
    *
    * @param r the source document
+   * @param fieldName field passed to analyzer to use when analyzing the content
    * @return the most interesting words in the document
-   * @see #retrieveTerms(java.io.Reader)
+   * @see #retrieveTerms(java.io.Reader, String)
    * @see #setMaxQueryTerms
    */
-  public String[] retrieveInterestingTerms(Reader r) throws IOException {
+  public String[] retrieveInterestingTerms(Reader r, String fieldName) throws IOException {
     ArrayList<Object> al = new ArrayList<Object>(maxQueryTerms);
-    PriorityQueue<Object[]> pq = retrieveTerms(r);
+    PriorityQueue<Object[]> pq = retrieveTerms(r, fieldName);
     Object cur;
     int lim = maxQueryTerms; // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller...
     // we just want to return the top words
diff --git a/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThisQuery.java b/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThisQuery.java
index c2efef7..d26efb5 100644
--- a/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThisQuery.java
+++ b/modules/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThisQuery.java
@@ -28,6 +28,7 @@
 
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
+import java.io.StringReader;
 import java.util.Set;
 
 /**
@@ -40,6 +41,7 @@
   private String likeText;
   private String[] moreLikeFields;
   private Analyzer analyzer;
+  private String fieldName;
   private float percentTermsToMatch = 0.3f;
   private int minTermFrequency = 1;
   private int maxQueryTerms = 5;
@@ -49,10 +51,11 @@
   /**
    * @param moreLikeFields
    */
-  public MoreLikeThisQuery(String likeText, String[] moreLikeFields, Analyzer analyzer) {
+  public MoreLikeThisQuery(String likeText, String[] moreLikeFields, Analyzer analyzer, String fieldName) {
     this.likeText = likeText;
     this.moreLikeFields = moreLikeFields;
     this.analyzer = analyzer;
+    this.fieldName = fieldName;
   }
 
   @Override
@@ -67,7 +70,7 @@
     }
     mlt.setMaxQueryTerms(maxQueryTerms);
     mlt.setStopWords(stopWords);
-    BooleanQuery bq = (BooleanQuery) mlt.like(new ByteArrayInputStream(likeText.getBytes()));
+    BooleanQuery bq = (BooleanQuery) mlt.like(new StringReader(likeText), fieldName);
     BooleanClause[] clauses = bq.getClauses();
     //make at least half the terms match
     bq.setMinimumNumberShouldMatch((int) (clauses.length * percentTermsToMatch));
diff --git a/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java b/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java
index bb6fe6c..38d9eda 100644
--- a/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java
+++ b/modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java
@@ -87,7 +87,7 @@
     mlt.setBoostFactor(boostFactor);
     
     BooleanQuery query = (BooleanQuery) mlt.like(new StringReader(
-        "lucene release"));
+        "lucene release"), "text");
     List<BooleanClause> clauses = query.clauses();
     
     assertEquals("Expected " + originalValues.size() + " clauses.",
@@ -115,7 +115,7 @@
     mlt.setFieldNames(new String[] {"text"});
     mlt.setBoost(true);
     BooleanQuery query = (BooleanQuery) mlt.like(new StringReader(
-        "lucene release"));
+        "lucene release"), "text");
     List<BooleanClause> clauses = query.clauses();
 
     for (BooleanClause clause : clauses) {
@@ -124,4 +124,15 @@
     }
     return originalValues;
   }
+  
+  // LUCENE-3326
+  public void testMultiFields() throws Exception {
+    MoreLikeThis mlt = new MoreLikeThis(reader);
+    mlt.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false));
+    mlt.setMinDocFreq(1);
+    mlt.setMinTermFreq(1);
+    mlt.setMinWordLen(1);
+    mlt.setFieldNames(new String[] {"text", "foobar"});
+    mlt.like(new StringReader("this is a test"), "foobar");
+  }
 }
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 4a76c5d..5922eca 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -154,6 +154,10 @@
   for faster reopen times. There is also a new 'soft' autocommit tracker that can be
   configured. (Mark Miller, Robert Muir)
 
+* SOLR-2399: Updated Solr Admin interface.  New look and feel with per core administration
+  and many new options.  (Stefan Matheis via ryan)
+
+
 Optimizations
 ----------------------
 
@@ -264,6 +268,9 @@
 * SOLR-1825: SolrQuery.addFacetQuery now enables facets automatically, like
   addFacetField (Chris Male)
 
+* SOLR-2663: FieldTypePluginLoader has been refactored out of IndexSchema 
+  and made public. (hossman)
+
 Documentation
 ----------------------
 
@@ -310,6 +317,9 @@
 * LUCENE-2048: Added omitPositions to the schema, so you can omit position
   information while still indexing term frequencies.  (rmuir)
 
+* SOLR-2584: add UniqFieldsUpdateProcessor that removes duplicate values in the
+  specified fields. (Elmer Garduno, koji)
+
 Optimizations
 ----------------------
 
diff --git a/solr/contrib/dataimporthandler-extras/src/test/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java b/solr/contrib/dataimporthandler-extras/src/test/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java
index 3ca71a7..604ab8d 100644
--- a/solr/contrib/dataimporthandler-extras/src/test/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java
+++ b/solr/contrib/dataimporthandler-extras/src/test/org/apache/solr/handler/dataimport/TestMailEntityProcessor.java
@@ -188,7 +188,7 @@
     Boolean commitCalled;
 
     public SolrWriterImpl() {
-      super(null, ".", null);
+      super(null, null);
     }
 
     @Override
@@ -196,10 +196,6 @@
       return docs.add(doc);
     }
 
-    @Override
-    public void log(int event, String name, Object row) {
-      // Do nothing
-    }
 
     @Override
     public void doDeleteAll() {
diff --git a/solr/contrib/dataimporthandler/CHANGES.txt b/solr/contrib/dataimporthandler/CHANGES.txt
index 150a15c..9e75ae3 100644
--- a/solr/contrib/dataimporthandler/CHANGES.txt
+++ b/solr/contrib/dataimporthandler/CHANGES.txt
@@ -19,6 +19,7 @@
 * SOLR-2644: When using threads=2 the default logging is set too high (Bill Bell via shalin)
 * SOLR-2492: DIH does not commit if only deletes are processed (James Dyer via shalin)
 * SOLR-2186: DataImportHandler's multi-threaded option throws NPE (Lance Norskog, Frank Wesemann, shalin)
+* SOLR-2655: DIH multi threaded mode does not resolve attributes correctly (Frank Wesemann, shalin)
 
 ==================  3.3.0 ==================
 
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContextImpl.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContextImpl.java
index 1b32005..899ced5 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContextImpl.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContextImpl.java
@@ -100,7 +100,7 @@
     if (entity.dataSrc != null && docBuilder != null && docBuilder.verboseDebug &&
              Context.FULL_DUMP.equals(currentProcess())) {
       //debug is not yet implemented properly for deltas
-      entity.dataSrc = docBuilder.writer.getDebugLogger().wrapDs(entity.dataSrc);
+      entity.dataSrc = docBuilder.getDebugLogger().wrapDs(entity.dataSrc);
     }
     return entity.dataSrc;
   }
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHLogLevels.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHLogLevels.java
new file mode 100644
index 0000000..0ce3f6d
--- /dev/null
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHLogLevels.java
@@ -0,0 +1,5 @@
+package org.apache.solr.handler.dataimport;
+
+public enum DIHLogLevels {
+	START_ENTITY, END_ENTITY, TRANSFORMED_ROW, ENTITY_META, PRE_TRANSFORMER_ROW, START_DOC, END_DOC, ENTITY_OUT, ROW_END, TRANSFORMER_EXCEPTION, ENTITY_EXCEPTION, DISABLE_LOGGING, ENABLE_LOGGING, NONE
+}
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHPropertiesWriter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHPropertiesWriter.java
new file mode 100644
index 0000000..473ee7d
--- /dev/null
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHPropertiesWriter.java
@@ -0,0 +1,33 @@
+package org.apache.solr.handler.dataimport;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.util.Properties;
+
+/**
+ * @since solr 3.2
+ */
+public interface DIHPropertiesWriter {
+
+    public void init(DataImporter dataImporter);
+
+    public boolean isWritable();
+
+	public void persist(Properties props);
+	
+	public Properties readIndexerProperties();
+	
+}
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHWriter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHWriter.java
new file mode 100644
index 0000000..8027611
--- /dev/null
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHWriter.java
@@ -0,0 +1,93 @@
+package org.apache.solr.handler.dataimport;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import org.apache.solr.common.SolrInputDocument;
+
+/**
+ * @solr.experimental
+ *
+ */
+public interface DIHWriter {
+	
+	/**
+	 * <p>
+	 *  If this writer supports transactions or commit points, then commit any changes, 
+	 *  optionally optimizing the data for read/write performance
+	 * </p>
+	 * @param optimize
+	 */
+	public void commit(boolean optimize);
+	
+	/**
+	 * <p>
+	 *  Release resources used by this writer.  After calling close, reads & updates will throw exceptions.
+	 * </p>
+	 */
+	public void close();
+
+	/**
+	 * <p>
+	 *  If this writer supports transactions or commit points, then roll back any uncommitted changes.
+	 * </p>
+	 */
+	public void rollback();
+
+	/**
+	 * <p>
+	 *  Delete from the writer's underlying data store based the passed-in writer-specific query. (Optional Operation)
+	 * </p>
+	 * @param q
+	 */
+	public void deleteByQuery(String q);
+
+	/**
+	 * <p>
+	 *  Delete everything from the writer's underlying data store
+	 * </p>
+	 */
+	public void doDeleteAll();
+
+	/**
+	 * <p>
+	 *  Delete from the writer's underlying data store based on the passed-in Primary Key
+	 * </p>
+	 * @param key
+	 */
+	public void deleteDoc(Object key);
+	
+
+
+	/**
+	 * <p>
+	 *  Add a document to this writer's underlying data store.
+	 * </p>
+	 * @param doc
+	 * @return
+	 */
+	public boolean upload(SolrInputDocument doc);
+
+
+	
+	/**
+	 * <p>
+	 *  Provide context information for this writer.  init() should be called before using the writer.
+	 * </p>
+	 * @param context
+	 */
+	public void init(Context context) ;
+
+}
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java
index 5da8b13..bbf201b 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java
@@ -113,7 +113,7 @@
           final InputSource is = new InputSource(core.getResourceLoader().openConfig(configLoc));
           is.setSystemId(SystemIdResolver.createSystemIdFromResourceName(configLoc));
           importer = new DataImporter(is, core,
-                  dataSources, coreScopeSession);
+                  dataSources, coreScopeSession, myName);
         }
       }
     } catch (Throwable e) {
@@ -165,7 +165,7 @@
         try {
           processConfiguration((NamedList) initArgs.get("defaults"));
           importer = new DataImporter(new InputSource(new StringReader(requestParams.dataConfig)), req.getCore()
-                  , dataSources, coreScopeSession);
+                  , dataSources, coreScopeSession, myName);
         } catch (RuntimeException e) {
           rsp.add("exception", DebugLogger.getStacktraceString(e));
           importer = null;
@@ -280,7 +280,7 @@
   private SolrWriter getSolrWriter(final UpdateRequestProcessor processor,
                                    final SolrResourceLoader loader, final DataImporter.RequestParams requestParams, SolrQueryRequest req) {
 
-    return new SolrWriter(processor, loader.getConfigDir(), myName, req) {
+    return new SolrWriter(processor, req) {
 
       @Override
       public boolean upload(SolrInputDocument document) {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java
index 85ad093..56ae340 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java
@@ -39,7 +39,6 @@
 
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
-import java.io.File;
 import java.io.StringReader;
 import java.text.SimpleDateFormat;
 import java.util.*;
@@ -80,26 +79,35 @@
   public DocBuilder.Statistics cumulativeStatistics = new DocBuilder.Statistics();
 
   private SolrCore core;
+  
+  private DIHPropertiesWriter propWriter;
 
   private ReentrantLock importLock = new ReentrantLock();
 
   private final Map<String , Object> coreScopeSession;
 
   private boolean isDeltaImportSupported = false;
+  private final String handlerName;
 
   /**
    * Only for testing purposes
    */
   DataImporter() {
     coreScopeSession = new ConcurrentHashMap<String, Object>();
+    this.propWriter = new SimplePropertiesWriter();
+    propWriter.init(this);
+    this.handlerName = "dataimport" ;
   }
 
-  DataImporter(InputSource dataConfig, SolrCore core, Map<String, Properties> ds, Map<String, Object> session) {
+  DataImporter(InputSource dataConfig, SolrCore core, Map<String, Properties> ds, Map<String, Object> session, String handlerName) {
+      this.handlerName = handlerName;
     if (dataConfig == null)
       throw new DataImportHandlerException(SEVERE,
               "Configuration not found");
     this.core = core;
     this.schema = core.getSchema();
+    this.propWriter = new SimplePropertiesWriter();
+    propWriter.init(this);
     dataSourceProps = ds;
     if (session == null)
       session = new HashMap<String, Object>();
@@ -120,7 +128,11 @@
     }
   }
 
-  private void verifyWithSchema(Map<String, DataConfig.Field> fields) {
+   public String getHandlerName() {
+        return handlerName;
+    }
+
+    private void verifyWithSchema(Map<String, DataConfig.Field> fields) {
     Map<String, SchemaField> schemaFields = schema.getFields();
     for (Map.Entry<String, SchemaField> entry : schemaFields.entrySet()) {
       SchemaField sf = entry.getValue();
@@ -353,7 +365,7 @@
     setIndexStartTime(new Date());
 
     try {
-      docBuilder = new DocBuilder(this, writer, requestParams);
+      docBuilder = new DocBuilder(this, writer, propWriter, requestParams);
       checkWritablePersistFile(writer);
       docBuilder.execute();
       if (!requestParams.debug)
@@ -370,11 +382,11 @@
   }
 
   private void checkWritablePersistFile(SolrWriter writer) {
-    File persistFile = writer.getPersistFile();
-    boolean isWritable = persistFile.exists() ? persistFile.canWrite() : persistFile.getParentFile().canWrite();
-    if (isDeltaImportSupported && !isWritable) {
-      throw new DataImportHandlerException(SEVERE, persistFile.getAbsolutePath() +
-          " is not writable. Delta imports are supported by data config but will not work.");
+//  	File persistFile = propWriter.getPersistFile();
+//    boolean isWritable = persistFile.exists() ? persistFile.canWrite() : persistFile.getParentFile().canWrite();
+    if (isDeltaImportSupported && !propWriter.isWritable()) {
+      throw new DataImportHandlerException(SEVERE,
+          "Properties is not writable. Delta imports are supported by data config but will not work.");
     }
   }
 
@@ -384,7 +396,7 @@
 
     try {
       setIndexStartTime(new Date());
-      docBuilder = new DocBuilder(this, writer, requestParams);
+      docBuilder = new DocBuilder(this, writer, propWriter, requestParams);
       checkWritablePersistFile(writer);
       docBuilder.execute();
       if (!requestParams.debug)
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugLogger.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugLogger.java
index 4b2ebaf..06ee91a 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugLogger.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugLogger.java
@@ -45,7 +45,7 @@
   private Stack<DebugInfo> debugStack;
 
   NamedList output;
-  private final SolrWriter writer;
+//  private final SolrWriter writer1;
 
   private static final String LINE = "---------------------------------------------";
 
@@ -54,8 +54,8 @@
 
   boolean enabled = true;
 
-  public DebugLogger(SolrWriter solrWriter) {
-    writer = solrWriter;
+  public DebugLogger() {
+//    writer = solrWriter;
     output = new NamedList();
     debugStack = new Stack<DebugInfo>() {
 
@@ -67,7 +67,7 @@
         return super.pop();
       }
     };
-    debugStack.push(new DebugInfo(null, -1, null));
+    debugStack.push(new DebugInfo(null, DIHLogLevels.NONE, null));
     output = debugStack.peek().lst;
   }
 
@@ -75,47 +75,47 @@
     return debugStack.isEmpty() ? null : debugStack.peek();
   }
 
-  public void log(int event, String name, Object row) {
-    if (event == SolrWriter.DISABLE_LOGGING) {
+  public void log(DIHLogLevels event, String name, Object row) {
+    if (event == DIHLogLevels.DISABLE_LOGGING) {
       enabled = false;
       return;
-    } else if (event == SolrWriter.ENABLE_LOGGING) {
+    } else if (event == DIHLogLevels.ENABLE_LOGGING) {
       enabled = true;
       return;
     }
 
-    if (!enabled && event != SolrWriter.START_ENTITY
-            && event != SolrWriter.END_ENTITY) {
+    if (!enabled && event != DIHLogLevels.START_ENTITY
+            && event != DIHLogLevels.END_ENTITY) {
       return;
     }
 
-    if (event == SolrWriter.START_DOC) {
-      debugStack.push(new DebugInfo(null, SolrWriter.START_DOC, peekStack()));
-    } else if (SolrWriter.START_ENTITY == event) {
+    if (event == DIHLogLevels.START_DOC) {
+      debugStack.push(new DebugInfo(null, DIHLogLevels.START_DOC, peekStack()));
+    } else if (DIHLogLevels.START_ENTITY == event) {
       debugStack
-              .push(new DebugInfo(name, SolrWriter.START_ENTITY, peekStack()));
-    } else if (SolrWriter.ENTITY_OUT == event
-            || SolrWriter.PRE_TRANSFORMER_ROW == event) {
-      if (debugStack.peek().type == SolrWriter.START_ENTITY
-              || debugStack.peek().type == SolrWriter.START_DOC) {
+              .push(new DebugInfo(name, DIHLogLevels.START_ENTITY, peekStack()));
+    } else if (DIHLogLevels.ENTITY_OUT == event
+            || DIHLogLevels.PRE_TRANSFORMER_ROW == event) {
+      if (debugStack.peek().type == DIHLogLevels.START_ENTITY
+              || debugStack.peek().type == DIHLogLevels.START_DOC) {
         debugStack.peek().lst.add(null, fmt.format(new Object[]{++debugStack
                 .peek().rowCount}));
         addToNamedList(debugStack.peek().lst, row);
         debugStack.peek().lst.add(null, LINE);
       }
-    } else if (event == SolrWriter.ROW_END) {
+    } else if (event == DIHLogLevels.ROW_END) {
       popAllTransformers();
-    } else if (SolrWriter.END_ENTITY == event) {
-      while (debugStack.pop().type != SolrWriter.START_ENTITY)
+    } else if (DIHLogLevels.END_ENTITY == event) {
+      while (debugStack.pop().type != DIHLogLevels.START_ENTITY)
         ;
-    } else if (SolrWriter.END_DOC == event) {
-      while (debugStack.pop().type != SolrWriter.START_DOC)
+    } else if (DIHLogLevels.END_DOC == event) {
+      while (debugStack.pop().type != DIHLogLevels.START_DOC)
         ;
-    } else if (event == SolrWriter.TRANSFORMER_EXCEPTION) {
+    } else if (event == DIHLogLevels.TRANSFORMER_EXCEPTION) {
       debugStack.push(new DebugInfo(name, event, peekStack()));
       debugStack.peek().lst.add("EXCEPTION",
               getStacktraceString((Exception) row));
-    } else if (SolrWriter.TRANSFORMED_ROW == event) {
+    } else if (DIHLogLevels.TRANSFORMED_ROW == event) {
       debugStack.push(new DebugInfo(name, event, peekStack()));
       debugStack.peek().lst.add(null, LINE);
       addToNamedList(debugStack.peek().lst, row);
@@ -124,10 +124,10 @@
         DataImportHandlerException dataImportHandlerException = (DataImportHandlerException) row;
         dataImportHandlerException.debugged = true;
       }
-    } else if (SolrWriter.ENTITY_META == event) {
+    } else if (DIHLogLevels.ENTITY_META == event) {
       popAllTransformers();
       debugStack.peek().lst.add(name, row);
-    } else if (SolrWriter.ENTITY_EXCEPTION == event) {
+    } else if (DIHLogLevels.ENTITY_EXCEPTION == event) {
       if (row instanceof DataImportHandlerException) {
         DataImportHandlerException dihe = (DataImportHandlerException) row;
         if (dihe.debugged)
@@ -143,8 +143,8 @@
 
   private void popAllTransformers() {
     while (true) {
-      int type = debugStack.peek().type;
-      if (type == SolrWriter.START_DOC || type == SolrWriter.START_ENTITY)
+    	DIHLogLevels type = debugStack.peek().type;
+      if (type == DIHLogLevels.START_DOC || type == DIHLogLevels.START_ENTITY)
         break;
       debugStack.pop();
     }
@@ -181,23 +181,23 @@
 
       @Override
       public Object getData(String query) {
-        writer.log(SolrWriter.ENTITY_META, "query", query);
+        log(DIHLogLevels.ENTITY_META, "query", query);
         long start = System.currentTimeMillis();
         try {
           return ds.getData(query);
         } catch (DataImportHandlerException de) {
-          writer.log(SolrWriter.ENTITY_EXCEPTION,
+          log(DIHLogLevels.ENTITY_EXCEPTION,
                   null, de);
           throw de;
         } catch (Exception e) {
-          writer.log(SolrWriter.ENTITY_EXCEPTION,
+          log(DIHLogLevels.ENTITY_EXCEPTION,
                   null, e);
           DataImportHandlerException de = new DataImportHandlerException(
                   DataImportHandlerException.SEVERE, "", e);
           de.debugged = true;
           throw de;
         } finally {
-          writer.log(SolrWriter.ENTITY_META, "time-taken", DocBuilder
+          log(DIHLogLevels.ENTITY_META, "time-taken", DocBuilder
                   .getTimeElapsedSince(start));
         }
       }
@@ -208,18 +208,18 @@
     return new Transformer() {
       @Override
       public Object transformRow(Map<String, Object> row, Context context) {
-        writer.log(SolrWriter.PRE_TRANSFORMER_ROW, null, row);
+        log(DIHLogLevels.PRE_TRANSFORMER_ROW, null, row);
         String tName = getTransformerName(t);
         Object result = null;
         try {
           result = t.transformRow(row, context);
-          writer.log(SolrWriter.TRANSFORMED_ROW, tName, result);
+          log(DIHLogLevels.TRANSFORMED_ROW, tName, result);
         } catch (DataImportHandlerException de) {
-          writer.log(SolrWriter.TRANSFORMER_EXCEPTION, tName, de);
+          log(DIHLogLevels.TRANSFORMER_EXCEPTION, tName, de);
           de.debugged = true;
           throw de;
         } catch (Exception e) {
-          writer.log(SolrWriter.TRANSFORMER_EXCEPTION, tName, e);
+          log(DIHLogLevels.TRANSFORMER_EXCEPTION, tName, e);
           DataImportHandlerException de = new DataImportHandlerException(DataImportHandlerException.SEVERE, "", e);
           de.debugged = true;
           throw de;
@@ -258,23 +258,23 @@
 
     NamedList lst;
 
-    int type;
+    DIHLogLevels type;
 
     DebugInfo parent;
 
-    public DebugInfo(String name, int type, DebugInfo parent) {
+    public DebugInfo(String name, DIHLogLevels type, DebugInfo parent) {
       this.name = name;
       this.type = type;
       this.parent = parent;
       lst = new NamedList();
       if (parent != null) {
         String displayName = null;
-        if (type == SolrWriter.START_ENTITY) {
+        if (type == DIHLogLevels.START_ENTITY) {
           displayName = "entity:" + name;
-        } else if (type == SolrWriter.TRANSFORMED_ROW
-                || type == SolrWriter.TRANSFORMER_EXCEPTION) {
+        } else if (type == DIHLogLevels.TRANSFORMED_ROW
+                || type == DIHLogLevels.TRANSFORMER_EXCEPTION) {
           displayName = "transformer:" + name;
-        } else if (type == SolrWriter.START_DOC) {
+        } else if (type == DIHLogLevels.START_DOC) {
           this.name = displayName = "document#" + SolrWriter.getDocCount();
         }
         parent.lst.add(displayName, lst);
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java
index fed2306..979de00 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java
@@ -56,33 +56,67 @@
 
   public Statistics importStatistics = new Statistics();
 
-  SolrWriter writer;
+  DIHWriter writer;
 
   DataImporter.RequestParams requestParameters;
 
   boolean verboseDebug = false;
 
-   Map<String, Object> session = new ConcurrentHashMap<String, Object>();
+  Map<String, Object> session = new ConcurrentHashMap<String, Object>();
 
   static final ThreadLocal<DocBuilder> INSTANCE = new ThreadLocal<DocBuilder>();
   Map<String, Object> functionsNamespace;
   private Properties persistedProperties;
+  
+  private DIHPropertiesWriter propWriter;
+  private static final String PARAM_WRITER_IMPL = "writerImpl";
+  private static final String DEFAULT_WRITER_NAME = "SolrWriter";
+  private DebugLogger debugLogger;
 
-  public DocBuilder(DataImporter dataImporter, SolrWriter writer, DataImporter.RequestParams reqParams) {
+    @SuppressWarnings("unchecked")
+  public DocBuilder(DataImporter dataImporter, SolrWriter solrWriter, DIHPropertiesWriter propWriter, DataImporter.RequestParams reqParams) {
     INSTANCE.set(this);
     this.dataImporter = dataImporter;
-    this.writer = writer;
+    this.propWriter = propWriter;
     DataImporter.QUERY_COUNT.set(importStatistics.queryCount);
     requestParameters = reqParams;
     verboseDebug = requestParameters.debug && requestParameters.verbose;
     functionsNamespace = EvaluatorBag.getFunctionsNamespace(this.dataImporter.getConfig().functions, this);
-    persistedProperties = writer.readIndexerProperties();
+    persistedProperties = propWriter.readIndexerProperties();
+    
+    String writerClassStr = null;
+    if(reqParams!=null && reqParams.requestParams != null) {
+    	writerClassStr = (String) reqParams.requestParams.get(PARAM_WRITER_IMPL);
+    }
+    if(writerClassStr != null && !writerClassStr.equals(DEFAULT_WRITER_NAME) && !writerClassStr.equals(DocBuilder.class.getPackage().getName() + "." + DEFAULT_WRITER_NAME)) {
+    	try {
+    		Class<DIHWriter> writerClass = loadClass(writerClassStr, dataImporter.getCore());
+    		this.writer = writerClass.newInstance();
+    	} catch (Exception e) {
+    		throw new DataImportHandlerException(DataImportHandlerException.SEVERE, "Unable to load Writer implementation:" + writerClassStr, e);
+    	}
+   	} else {
+    	writer = solrWriter;
+    }
+    ContextImpl ctx = new ContextImpl(null, null, null, null, reqParams.requestParams, null, this);
+    writer.init(ctx);
+  }
+
+
+
+
+  DebugLogger getDebugLogger(){
+    if (debugLogger == null) {
+      debugLogger = new DebugLogger();
+    }
+    return debugLogger;
   }
 
   public VariableResolverImpl getVariableResolver() {
     try {
       VariableResolverImpl resolver = null;
-      if(dataImporter != null && dataImporter.getCore() != null){
+      if(dataImporter != null && dataImporter.getCore() != null
+          && dataImporter.getCore().getResourceLoader().getCoreProperties() != null){
         resolver =  new VariableResolverImpl(dataImporter.getCore().getResourceLoader().getCoreProperties());
       } else resolver = new VariableResolverImpl();
       Map<String, Object> indexerNamespace = new HashMap<String, Object>();
@@ -135,94 +169,100 @@
 
   @SuppressWarnings("unchecked")
   public void execute() {
-    dataImporter.store(DataImporter.STATUS_MSGS, statusMessages);
-    document = dataImporter.getConfig().document;
-    final AtomicLong startTime = new AtomicLong(System.currentTimeMillis());
-    statusMessages.put(TIME_ELAPSED, new Object() {
-      @Override
-      public String toString() {
-        return getTimeElapsedSince(startTime.get());
-      }
-    });
-
-    statusMessages.put(DataImporter.MSG.TOTAL_QUERIES_EXECUTED,
-            importStatistics.queryCount);
-    statusMessages.put(DataImporter.MSG.TOTAL_ROWS_EXECUTED,
-            importStatistics.rowsCount);
-    statusMessages.put(DataImporter.MSG.TOTAL_DOC_PROCESSED,
-            importStatistics.docCount);
-    statusMessages.put(DataImporter.MSG.TOTAL_DOCS_SKIPPED,
-            importStatistics.skipDocCount);
-
-    List<String> entities = requestParameters.entities;
-
-    // Trigger onImportStart
-    if (document.onImportStart != null) {
-      invokeEventListener(document.onImportStart);
-    }
-    AtomicBoolean fullCleanDone = new AtomicBoolean(false);
-    //we must not do a delete of *:* multiple times if there are multiple root entities to be run
-    Properties lastIndexTimeProps = new Properties();
-    lastIndexTimeProps.setProperty(LAST_INDEX_KEY,
-            DataImporter.DATE_TIME_FORMAT.get().format(dataImporter.getIndexStartTime()));
-    for (DataConfig.Entity e : document.entities) {
-      if (entities != null && !entities.contains(e.name))
-        continue;
-      lastIndexTimeProps.setProperty(e.name + "." + LAST_INDEX_KEY,
-              DataImporter.DATE_TIME_FORMAT.get().format(new Date()));
-      root = e;
-      String delQuery = e.allAttributes.get("preImportDeleteQuery");
-      if (dataImporter.getStatus() == DataImporter.Status.RUNNING_DELTA_DUMP) {
-        cleanByQuery(delQuery, fullCleanDone);
-        doDelta();
-        delQuery = e.allAttributes.get("postImportDeleteQuery");
-        if (delQuery != null) {
-          fullCleanDone.set(false);
-          cleanByQuery(delQuery, fullCleanDone);
-        }
-      } else {
-        cleanByQuery(delQuery, fullCleanDone);
-        doFullDump();
-        delQuery = e.allAttributes.get("postImportDeleteQuery");
-        if (delQuery != null) {
-          fullCleanDone.set(false);
-          cleanByQuery(delQuery, fullCleanDone);
-        }
-      }
-      statusMessages.remove(DataImporter.MSG.TOTAL_DOC_PROCESSED);
-    }
-
-    if (stop.get()) {
-      // Dont commit if aborted using command=abort
-      statusMessages.put("Aborted", DataImporter.DATE_TIME_FORMAT.get().format(new Date()));
-      rollback();
-    } else {
-      // Do not commit unnecessarily if this is a delta-import and no documents were created or deleted
-      if (!requestParameters.clean) {
-        if (importStatistics.docCount.get() > 0 || importStatistics.deletedDocCount.get() > 0) {
-          finish(lastIndexTimeProps);
-        }
-      } else {
-        // Finished operation normally, commit now
-        finish(lastIndexTimeProps);
-      }
-      
-      if (writer != null) {
-        writer.finish();
-      }
-      
-      if (document.onImportEnd != null) {
-        invokeEventListener(document.onImportEnd);
-      }
-    }
-
-    statusMessages.remove(TIME_ELAPSED);
-    statusMessages.put(DataImporter.MSG.TOTAL_DOC_PROCESSED, ""+ importStatistics.docCount.get());
-    if(importStatistics.failedDocCount.get() > 0)
-      statusMessages.put(DataImporter.MSG.TOTAL_FAILED_DOCS, ""+ importStatistics.failedDocCount.get());
-
-    statusMessages.put("Time taken ", getTimeElapsedSince(startTime.get()));
-    LOG.info("Time taken = " + getTimeElapsedSince(startTime.get()));
+  	try {
+	    dataImporter.store(DataImporter.STATUS_MSGS, statusMessages);
+	    document = dataImporter.getConfig().document;
+	    final AtomicLong startTime = new AtomicLong(System.currentTimeMillis());
+	    statusMessages.put(TIME_ELAPSED, new Object() {
+	      @Override
+	      public String toString() {
+	        return getTimeElapsedSince(startTime.get());
+	      }
+	    });
+	
+	    statusMessages.put(DataImporter.MSG.TOTAL_QUERIES_EXECUTED,
+	            importStatistics.queryCount);
+	    statusMessages.put(DataImporter.MSG.TOTAL_ROWS_EXECUTED,
+	            importStatistics.rowsCount);
+	    statusMessages.put(DataImporter.MSG.TOTAL_DOC_PROCESSED,
+	            importStatistics.docCount);
+	    statusMessages.put(DataImporter.MSG.TOTAL_DOCS_SKIPPED,
+	            importStatistics.skipDocCount);
+	
+	    List<String> entities = requestParameters.entities;
+	
+	    // Trigger onImportStart
+	    if (document.onImportStart != null) {
+	      invokeEventListener(document.onImportStart);
+	    }
+	    AtomicBoolean fullCleanDone = new AtomicBoolean(false);
+	    //we must not do a delete of *:* multiple times if there are multiple root entities to be run
+	    Properties lastIndexTimeProps = new Properties();
+	    lastIndexTimeProps.setProperty(LAST_INDEX_KEY,
+	            DataImporter.DATE_TIME_FORMAT.get().format(dataImporter.getIndexStartTime()));
+	    for (DataConfig.Entity e : document.entities) {
+	      if (entities != null && !entities.contains(e.name))
+	        continue;
+	      lastIndexTimeProps.setProperty(e.name + "." + LAST_INDEX_KEY,
+	              DataImporter.DATE_TIME_FORMAT.get().format(new Date()));
+	      root = e;
+	      String delQuery = e.allAttributes.get("preImportDeleteQuery");
+	      if (dataImporter.getStatus() == DataImporter.Status.RUNNING_DELTA_DUMP) {
+	        cleanByQuery(delQuery, fullCleanDone);
+	        doDelta();
+	        delQuery = e.allAttributes.get("postImportDeleteQuery");
+	        if (delQuery != null) {
+	          fullCleanDone.set(false);
+	          cleanByQuery(delQuery, fullCleanDone);
+	        }
+	      } else {
+	        cleanByQuery(delQuery, fullCleanDone);
+	        doFullDump();
+	        delQuery = e.allAttributes.get("postImportDeleteQuery");
+	        if (delQuery != null) {
+	          fullCleanDone.set(false);
+	          cleanByQuery(delQuery, fullCleanDone);
+	        }
+	      }
+	      statusMessages.remove(DataImporter.MSG.TOTAL_DOC_PROCESSED);
+	    }
+	
+	    if (stop.get()) {
+	      // Dont commit if aborted using command=abort
+	      statusMessages.put("Aborted", DataImporter.DATE_TIME_FORMAT.get().format(new Date()));
+	      rollback();
+	    } else {
+	      // Do not commit unnecessarily if this is a delta-import and no documents were created or deleted
+	      if (!requestParameters.clean) {
+	        if (importStatistics.docCount.get() > 0 || importStatistics.deletedDocCount.get() > 0) {
+	          finish(lastIndexTimeProps);
+	        }
+	      } else {
+	        // Finished operation normally, commit now
+	        finish(lastIndexTimeProps);
+	      } 
+	      
+	      if (document.onImportEnd != null) {
+	        invokeEventListener(document.onImportEnd);
+	      }
+	    }
+	
+	    statusMessages.remove(TIME_ELAPSED);
+	    statusMessages.put(DataImporter.MSG.TOTAL_DOC_PROCESSED, ""+ importStatistics.docCount.get());
+	    if(importStatistics.failedDocCount.get() > 0)
+	      statusMessages.put(DataImporter.MSG.TOTAL_FAILED_DOCS, ""+ importStatistics.failedDocCount.get());
+	
+	    statusMessages.put("Time taken ", getTimeElapsedSince(startTime.get()));
+	    LOG.info("Time taken = " + getTimeElapsedSince(startTime.get()));
+	  } catch(Exception e)
+		{
+			throw new RuntimeException(e);
+		} finally
+		{
+			if (writer != null) {
+	      writer.close();
+	    }
+		}
   }
 
   @SuppressWarnings("unchecked")
@@ -238,7 +278,7 @@
         addStatusMessage("Optimized");
     }
     try {
-      writer.persist(lastIndexTimeProps);
+      propWriter.persist(lastIndexTimeProps);
     } catch (Exception e) {
       LOG.error("Could not write property file", e);
       statusMessages.put("error", "Could not write property file. Delta imports will not work. " +
@@ -433,11 +473,11 @@
     private void runAThread(ThreadedEntityProcessorWrapper epw, EntityRow rows, String currProcess) throws Exception {
       currentEntityProcWrapper.set(epw);
       epw.threadedInit(context);
-      initEntity();
       try {
-        epw.init(rows);
-        DocWrapper docWrapper = this.docWrapper;
         Context.CURRENT_CONTEXT.set(context);
+        epw.init(rows);
+        initEntity();
+        DocWrapper docWrapper = this.docWrapper;
         for (; ;) {
           if(DocBuilder.this.stop.get()) break;
           try {
@@ -556,11 +596,11 @@
     Context.CURRENT_CONTEXT.set(ctx);
     
     if (requestParameters.start > 0) {
-      writer.log(SolrWriter.DISABLE_LOGGING, null, null);
+      getDebugLogger().log(DIHLogLevels.DISABLE_LOGGING, null, null);
     }
 
     if (verboseDebug) {
-      writer.log(SolrWriter.START_ENTITY, entity.name, null);
+      getDebugLogger().log(DIHLogLevels.START_ENTITY, entity.name, null);
     }
 
     int seenDocCount = 0;
@@ -574,11 +614,11 @@
           seenDocCount++;
 
           if (seenDocCount > requestParameters.start) {
-            writer.log(SolrWriter.ENABLE_LOGGING, null, null);
+            getDebugLogger().log(DIHLogLevels.ENABLE_LOGGING, null, null);
           }
 
           if (verboseDebug && entity.isDocRoot) {
-            writer.log(SolrWriter.START_DOC, entity.name, null);
+            getDebugLogger().log(DIHLogLevels.START_DOC, entity.name, null);
           }
           if (doc == null && entity.isDocRoot) {
             doc = new DocWrapper();
@@ -607,7 +647,7 @@
           }
 
           if (verboseDebug) {
-            writer.log(SolrWriter.ENTITY_OUT, entity.name, arow);
+            getDebugLogger().log(DIHLogLevels.ENTITY_OUT, entity.name, arow);
           }
           importStatistics.rowsCount.incrementAndGet();
           if (doc != null) {
@@ -643,7 +683,7 @@
 
         } catch (DataImportHandlerException e) {
           if (verboseDebug) {
-            writer.log(SolrWriter.ENTITY_EXCEPTION, entity.name, e);
+            getDebugLogger().log(DIHLogLevels.ENTITY_EXCEPTION, entity.name, e);
           }
           if(e.getErrCode() == DataImportHandlerException.SKIP_ROW){
             continue;
@@ -662,21 +702,21 @@
             throw e;
         } catch (Throwable t) {
           if (verboseDebug) {
-            writer.log(SolrWriter.ENTITY_EXCEPTION, entity.name, t);
+            getDebugLogger().log(DIHLogLevels.ENTITY_EXCEPTION, entity.name, t);
           }
           throw new DataImportHandlerException(DataImportHandlerException.SEVERE, t);
         } finally {
           if (verboseDebug) {
-            writer.log(SolrWriter.ROW_END, entity.name, null);
+            getDebugLogger().log(DIHLogLevels.ROW_END, entity.name, null);
             if (entity.isDocRoot)
-              writer.log(SolrWriter.END_DOC, null, null);
+              getDebugLogger().log(DIHLogLevels.END_DOC, null, null);
             Context.CURRENT_CONTEXT.remove();
           }
         }
       }
     } finally {
       if (verboseDebug) {
-        writer.log(SolrWriter.END_ENTITY, null, null);
+        getDebugLogger().log(DIHLogLevels.END_ENTITY, null, null);
       }
       entityProcessor.destroy();
     }
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
index c85dec1..db9d896 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
@@ -83,7 +83,7 @@
       @Override
       public boolean add(Transformer transformer) {
         if (docBuilder != null && docBuilder.verboseDebug) {
-          transformer = docBuilder.writer.getDebugLogger().wrapTransformer(transformer);
+          transformer = docBuilder.getDebugLogger().wrapTransformer(transformer);
         }
         return super.add(transformer);
       }
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EvaluatorBag.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EvaluatorBag.java
index 076734e..8c49f25 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EvaluatorBag.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EvaluatorBag.java
@@ -217,7 +217,6 @@
         Evaluator evaluator = evaluators.get(fname);
         if (evaluator == null)
           return null;
-        VariableResolverImpl vri = VariableResolverImpl.CURRENT_VARIABLE_RESOLVER.get();
         return evaluator.evaluate(m.group(2), Context.CURRENT_CONTEXT.get());
       }
 
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java
new file mode 100644
index 0000000..b9ab396
--- /dev/null
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SimplePropertiesWriter.java
@@ -0,0 +1,123 @@
+package org.apache.solr.handler.dataimport;
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Properties;
+
+import org.apache.solr.core.SolrCore;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class SimplePropertiesWriter implements DIHPropertiesWriter {
+	private static final Logger log = LoggerFactory.getLogger(SimplePropertiesWriter.class);
+
+	static final String IMPORTER_PROPERTIES = "dataimport.properties";
+
+	static final String LAST_INDEX_KEY = "last_index_time";
+
+	private String persistFilename = IMPORTER_PROPERTIES;
+
+	private String configDir = null;
+
+
+
+    public void init(DataImporter dataImporter) {
+      SolrCore core = dataImporter.getCore();
+      String configDir = core ==null ? ".": core.getResourceLoader().getConfigDir();
+      String persistFileName = dataImporter.getHandlerName();
+
+      this.configDir = configDir;
+	  if(persistFileName != null){
+        persistFilename = persistFileName + ".properties";
+      }
+    }
+
+
+
+	
+	private File getPersistFile() {
+    String filePath = configDir;
+    if (configDir != null && !configDir.endsWith(File.separator))
+      filePath += File.separator;
+    filePath += persistFilename;
+    return new File(filePath);
+  }
+
+    public boolean isWritable() {
+        File persistFile =  getPersistFile();
+        return persistFile.exists() ? persistFile.canWrite() : persistFile.getParentFile().canWrite();
+
+    }
+
+    @Override
+	public void persist(Properties p) {
+		OutputStream propOutput = null;
+
+		Properties props = readIndexerProperties();
+
+		try {
+			props.putAll(p);
+			String filePath = configDir;
+			if (configDir != null && !configDir.endsWith(File.separator))
+				filePath += File.separator;
+			filePath += persistFilename;
+			propOutput = new FileOutputStream(filePath);
+			props.store(propOutput, null);
+			log.info("Wrote last indexed time to " + persistFilename);
+		} catch (Exception e) {
+			throw new DataImportHandlerException(DataImportHandlerException.SEVERE, "Unable to persist Index Start Time", e);
+		} finally {
+			try {
+				if (propOutput != null)
+					propOutput.close();
+			} catch (IOException e) {
+				propOutput = null;
+			}
+		}
+	}
+
+	@Override
+	public Properties readIndexerProperties() {
+		Properties props = new Properties();
+		InputStream propInput = null;
+
+		try {
+			propInput = new FileInputStream(configDir + persistFilename);
+			props.load(propInput);
+			log.info("Read " + persistFilename);
+		} catch (Exception e) {
+			log.warn("Unable to read: " + persistFilename);
+		} finally {
+			try {
+				if (propInput != null)
+					propInput.close();
+			} catch (IOException e) {
+				propInput = null;
+			}
+		}
+
+		return props;
+	}
+
+}
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrWriter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrWriter.java
index e7bbb6c..14b3c48 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrWriter.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrWriter.java
@@ -27,46 +27,42 @@
 import org.slf4j.LoggerFactory;
 
 import java.io.*;
-import java.util.Properties;
+import java.util.Map;
+import java.util.Set;
 
 /**
- * <p> Writes documents to SOLR as well as provides methods for loading and persisting last index time. </p>
+ * <p> Writes documents to SOLR. </p>
  * <p/>
  * <b>This API is experimental and may change in the future.</b>
  *
  * @since solr 1.3
  */
-public class SolrWriter {
+public class SolrWriter implements DIHWriter {
   private static final Logger log = LoggerFactory.getLogger(SolrWriter.class);
 
-  static final String IMPORTER_PROPERTIES = "dataimport.properties";
-
   static final String LAST_INDEX_KEY = "last_index_time";
 
   private final UpdateRequestProcessor processor;
 
-  private final String configDir;
-
-  private String persistFilename = IMPORTER_PROPERTIES;
-
   DebugLogger debugLogger;
 
   SolrQueryRequest req;
 
-  public SolrWriter(UpdateRequestProcessor processor, String confDir, SolrQueryRequest req) {
+  public SolrWriter(UpdateRequestProcessor processor, SolrQueryRequest req) {
     this.processor = processor;
-    configDir = confDir;
     this.req = req;
   }
-  public SolrWriter(UpdateRequestProcessor processor, String confDir, String filePrefix, SolrQueryRequest req) {
-    this.processor = processor;
-    configDir = confDir;
-    if(filePrefix != null){
-      persistFilename = filePrefix+".properties";
-    }
-    this.req = req;
+  
+  @Override
+  public void close() {
+  	try {
+  		processor.finish();
+  	} catch (IOException e) {
+  		throw new DataImportHandlerException(DataImportHandlerException.SEVERE,
+  				"Unable to call finish() on UpdateRequestProcessor", e);
+  	}
   }
-
+  @Override
   public boolean upload(SolrInputDocument d) {
     try {
       AddUpdateCommand command = new AddUpdateCommand(req);
@@ -79,7 +75,8 @@
 
     return true;
   }
-
+  
+  @Override
   public void deleteDoc(Object id) {
     try {
       log.info("Deleting document: " + id);
@@ -90,75 +87,8 @@
       log.error("Exception while deleteing: " + id, e);
     }
   }
-
-
-  void persist(Properties p) {
-    OutputStream propOutput = null;
-
-    Properties props = readIndexerProperties();
-
-    try {
-      props.putAll(p);
-      File persistFile = getPersistFile();
-      propOutput = new FileOutputStream(persistFile);
-      props.store(propOutput, null);
-      log.info("Wrote last indexed time to " + persistFile.getAbsolutePath());
-    } catch (FileNotFoundException e) {
-      throw new DataImportHandlerException(DataImportHandlerException.SEVERE,
-              "Unable to persist Index Start Time", e);
-    } catch (IOException e) {
-      throw new DataImportHandlerException(DataImportHandlerException.SEVERE,
-              "Unable to persist Index Start Time", e);
-    } finally {
-      try {
-        if (propOutput != null)
-          propOutput.close();
-      } catch (IOException e) {
-        propOutput = null;
-      }
-    }
-  }
-
-  File getPersistFile() {
-    String filePath = configDir;
-    if (configDir != null && !configDir.endsWith(File.separator))
-      filePath += File.separator;
-    filePath += persistFilename;
-    return new File(filePath);
-  }
-
-  void finish() {
-    try {
-      processor.finish();
-    } catch (IOException e) {
-      throw new DataImportHandlerException(DataImportHandlerException.SEVERE,
-          "Unable to call finish() on UpdateRequestProcessor", e);
-    }
-  }
-  
-  Properties readIndexerProperties() {
-    Properties props = new Properties();
-    InputStream propInput = null;
-
-    try {
-      propInput = new FileInputStream(configDir
-              + persistFilename);
-      props.load(propInput);
-      log.info("Read " + persistFilename);
-    } catch (Exception e) {
-      log.warn("Unable to read: " + persistFilename);
-    } finally {
-      try {
-        if (propInput != null)
-          propInput.close();
-      } catch (IOException e) {
-        propInput = null;
-      }
-    }
-
-    return props;
-  }
-
+  	
+	@Override
   public void deleteByQuery(String query) {
     try {
       log.info("Deleting documents from Solr with query: " + query);
@@ -170,6 +100,7 @@
     }
   }
 
+	@Override
   public void commit(boolean optimize) {
     try {
       CommitUpdateCommand commit = new CommitUpdateCommand(req,optimize);
@@ -179,6 +110,7 @@
     }
   }
 
+	@Override
   public void rollback() {
     try {
       RollbackUpdateCommand rollback = new RollbackUpdateCommand(req);
@@ -188,6 +120,7 @@
     }
   }
 
+	@Override
   public void doDeleteAll() {
     try {
       DeleteUpdateCommand deleteCommand = new DeleteUpdateCommand(req);
@@ -225,28 +158,8 @@
       return null;
     }
   }
-
-  public DebugLogger getDebugLogger() {
-    if (debugLogger == null) {
-      debugLogger = new DebugLogger(this);
-    }
-    return debugLogger;
-  }
-
-  /**
-   * This method is used for verbose debugging
-   *
-   * @param event The event name start.entity ,end.entity ,transformer.row
-   * @param name  Name of the entity/transformer
-   * @param row   The actual data . Can be a Map<String,object> or a List<Map<String,object>>
-   */
-  public void log(int event, String name, Object row) {
-    getDebugLogger().log(event, name, row);
-  }
-
-  public static final int START_ENTITY = 1, END_ENTITY = 2,
-          TRANSFORMED_ROW = 3, ENTITY_META = 4, PRE_TRANSFORMER_ROW = 5,
-          START_DOC = 6, END_DOC = 7, ENTITY_OUT = 8, ROW_END = 9,
-          TRANSFORMER_EXCEPTION = 10, ENTITY_EXCEPTION = 11, DISABLE_LOGGING = 12,
-          ENABLE_LOGGING = 13;
+	@Override
+	public void init(Context context) {
+		/* NO-OP */		
+	}
 }
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolverImpl.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolverImpl.java
index 10ed0ac..c4dffa6 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolverImpl.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/VariableResolverImpl.java
@@ -35,11 +35,6 @@
 public class VariableResolverImpl extends VariableResolver {
   private Map<String, Object> container = new HashMap<String, Object>();
 
-  /**
-   * Used for creating Evaluators
-   */
-  Context context;
-
   private final TemplateString templateString = new TemplateString();
 
   private final Map defaults ;
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder.java
index 27ba8b3..f8bfa21 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilder.java
@@ -198,7 +198,7 @@
     Boolean finishCalled = Boolean.FALSE;
 
     public SolrWriterImpl() {
-      super(null, ".",null);
+      super(null, null);
     }
 
     @Override
@@ -207,11 +207,6 @@
     }
 
     @Override
-    public void log(int event, String name, Object row) {
-      // Do nothing
-    }
-
-    @Override
     public void doDeleteAll() {
       deleteAllCalled = Boolean.TRUE;
     }
@@ -222,7 +217,7 @@
     }
     
     @Override
-    public void finish() {
+    public void close() {
       finishCalled = Boolean.TRUE;
     }
   }
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilderThreaded.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilderThreaded.java
new file mode 100644
index 0000000..0e340c1
--- /dev/null
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestDocBuilderThreaded.java
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.handler.dataimport;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+
+/**
+ * Test DocBuilder with "threads"
+ */
+public class TestDocBuilderThreaded extends AbstractDataImportHandlerTestCase {
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("dataimport-solrconfig.xml", "dataimport-schema.xml");
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    List<Map<String, Object>> docs = new ArrayList<Map<String, Object>>();
+    docs.add(createMap("id", "1", "worker", "one"));
+    docs.add(createMap("id", "2", "worker", "two"));
+    docs.add(createMap("id", "3", "worker", "three"));
+    docs.add(createMap("id", "4", "worker", "four"));
+    MockDataSource.setIterator("select * from y", docs.iterator());
+    for (Map<String, Object> aDoc : docs) {
+      String theWorker = (String) aDoc.get("worker");
+      final List<Map<String, Object>> details = getDetails4Worker(theWorker);
+      log.info("details: " + details);
+      MockDataSource.setIterator(theWorker, details.iterator());
+    }
+  }
+
+  @After
+  @Override
+  public void tearDown() throws Exception {
+    DemoProcessor.entitiesInitied = 0;
+    DemoEvaluator.evaluated = 0;
+    MockDataSource.clearCache();
+    super.tearDown();
+  }
+
+  @Test
+  public void testProcessorThreaded2Entities() throws Exception {
+    runFullImport(threaded2EntitiesWithProcessor);
+    assertEquals("EntityProcessor.init() for child entity was called less times than the number of rows",
+        4, DemoProcessor.entitiesInitied);
+  }
+
+  @Test
+  public void testProcessor2EntitiesNoThreads() throws Exception {
+    runFullImport(twoEntitiesWithProcessor);
+    assertEquals("EntityProcessor.init() for child entity was called less times than the number of rows",
+        4, DemoProcessor.entitiesInitied);
+  }
+
+  /*
+  * This test fails in TestEnviroment, but works in real Live
+  */
+  @Test
+  public void testEvaluator() throws Exception {
+    runFullImport(twoEntitiesWithEvaluatorProcessor);
+    assertEquals("Evaluator was invoked less times than the number of rows",
+        4, DemoEvaluator.evaluated);
+  }
+
+  @SuppressWarnings("unchecked")
+  private List<Map<String, Object>> getDetails4Worker(String aWorker) {
+    List<Map<String, Object>> details4Worker = new ArrayList<Map<String, Object>>();
+    details4Worker.add(createMap("author_s", "Author_" + aWorker, "title_s", "Title for " + aWorker, "text_s", " Text for " + aWorker));
+    return details4Worker;
+  }
+
+  private final String threaded2EntitiesWithProcessor =
+
+      "<dataConfig> <dataSource type=\"MockDataSource\"/>\n" +
+          "<document>" +
+          "<entity name=\"job\" query=\"select * from y\"" +
+          " pk=\"id\" \n" +
+          " threads='1'\n" +
+          ">" +
+          "<field column=\"id\" />\n" +
+          "<entity name=\"details\" processor=\"TestDocBuilderThreaded$DemoProcessor\" \n" +
+          "worker=\"${job.worker}\" \n" +
+          "query=\"${job.worker}\" \n" +
+          "transformer=\"TemplateTransformer\" " +
+          " >" +
+          "<field column=\"author_s\" />" +
+          "<field column=\"title_s\" />" +
+          " <field column=\"text_s\" />" +
+          " <field column=\"generated_id_s\" template=\"generated_${job.id}\" />" +
+          "</entity>" +
+          "</entity>" +
+          "</document>" +
+          "</dataConfig>";
+
+  private final String twoEntitiesWithProcessor =
+
+      "<dataConfig> <dataSource type=\"MockDataSource\"/>\n" +
+          "<document>" +
+          "<entity name=\"job\" query=\"select * from y\"" +
+          " pk=\"id\" \n" +
+          ">" +
+          "<field column=\"id\" />\n" +
+          "<entity name=\"details\" processor=\"TestDocBuilderThreaded$DemoProcessor\" \n" +
+          "worker=\"${job.worker}\" \n" +
+          "query=\"${job.worker}\" \n" +
+          "transformer=\"TemplateTransformer\" " +
+          " >" +
+          "<field column=\"author_s\" />" +
+          "<field column=\"title_s\" />" +
+          " <field column=\"text_s\" />" +
+          " <field column=\"generated_id_s\" template=\"generated_${job.id}\" />" +
+          "</entity>" +
+          "</entity>" +
+          "</document>" +
+          "</dataConfig>";
+
+  private final String twoEntitiesWithEvaluatorProcessor =
+
+      "<dataConfig> <dataSource type=\"MockDataSource\"/>\n" +
+          "<function name=\"concat\" class=\"TestDocBuilderThreaded$DemoEvaluator\" />" +
+          "<document>" +
+          "<entity name=\"job\" query=\"select * from y\"" +
+          " pk=\"id\" \n" +
+          " threads=\"1\" " +
+          ">" +
+          "<field column=\"id\" />\n" +
+          "<entity name=\"details\" processor=\"TestDocBuilderThreaded$DemoProcessor\" \n" +
+          "worker=\"${dataimporter.functions.concat(details.author_s, ':_:' , details.title_s, 9 )}\" \n" +
+          "query=\"${job.worker}\" \n" +
+          "transformer=\"TemplateTransformer\" " +
+          " >" +
+          "<field column=\"author_s\" />" +
+          "<field column=\"title_s\" />" +
+          " <field column=\"text_s\" />" +
+          " <field column=\"generated_id_s\" template=\"generated_${job.id}\" />" +
+          "</entity>" +
+          "</entity>" +
+          "</document>" +
+          "</dataConfig>";
+
+
+  public static class DemoProcessor extends SqlEntityProcessor {
+
+    public static int entitiesInitied = 0;
+
+    @Override
+    public void init(Context context) {
+      super.init(context);
+      String result = context.getResolvedEntityAttribute("worker");
+      if (result == null || result.trim().length() == 0) {
+        throw new DataImportHandlerException(DataImportHandlerException.SEVERE, "Could not resolve entity attribute");
+      } else entitiesInitied++;
+    }
+  }
+
+  public static class DemoEvaluator extends Evaluator {
+    public static int evaluated = 0;
+
+    /* (non-Javadoc)
+    * @see org.apache.solr.handler.dataimport.Evaluator#evaluate(java.lang.String, org.apache.solr.handler.dataimport.Context)
+    */
+    @Override
+    @SuppressWarnings("unchecked")
+    public String evaluate(String expression, Context context) {
+      List allParams = EvaluatorBag.parseParams(expression, context.getVariableResolver());
+      StringBuilder result = new StringBuilder();
+      for (Object aVar : allParams) {
+        result.append(aVar.toString());
+      }
+      evaluated++;
+      return result.toString();
+    }
+  }
+}
diff --git a/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java b/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
index 0f75982..155d76f 100644
--- a/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
@@ -367,7 +367,8 @@
 
     public DocListAndSet getMoreLikeThis( Reader reader, int start, int rows, List<Query> filters, List<InterestingTerm> terms, int flags ) throws IOException
     {
-      rawMLTQuery = mlt.like(reader);
+      // analyzing with the first field: previous (stupid) behavior
+      rawMLTQuery = mlt.like(reader, mlt.getFieldNames()[0]);
       boostedMLTQuery = getBoostedQuery( rawMLTQuery );
       if( terms != null ) {
         fillInterestingTermsFromMLTQuery( boostedMLTQuery, terms );
diff --git a/solr/core/src/java/org/apache/solr/response/TextResponseWriter.java b/solr/core/src/java/org/apache/solr/response/TextResponseWriter.java
index 9a7d9cf..36abcaa 100644
--- a/solr/core/src/java/org/apache/solr/response/TextResponseWriter.java
+++ b/solr/core/src/java/org/apache/solr/response/TextResponseWriter.java
@@ -203,10 +203,6 @@
   {
     SolrDocument out = new SolrDocument();
     for( Fieldable f : doc.getFields() ) {
-      if( "gack_i".equals( f.name() ) ) {
-        System.out.println( f );
-      }
-      
       // Make sure multivalued fields are represented as lists
       Object existing = out.get(f.name());
       if (existing == null) {
diff --git a/solr/core/src/java/org/apache/solr/schema/FieldTypePluginLoader.java b/solr/core/src/java/org/apache/solr/schema/FieldTypePluginLoader.java
new file mode 100644
index 0000000..eaf32da
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/schema/FieldTypePluginLoader.java
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.schema;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.util.Version;
+import org.apache.solr.common.ResourceLoader;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.DOMUtil;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.core.Config;
+import org.apache.solr.core.SolrResourceLoader;
+import org.apache.solr.analysis.CharFilterFactory;
+import org.apache.solr.analysis.TokenFilterFactory;
+import org.apache.solr.analysis.TokenizerChain;
+import org.apache.solr.analysis.TokenizerFactory;
+import org.apache.solr.util.plugin.AbstractPluginLoader;
+import org.w3c.dom.*;
+
+import javax.xml.xpath.XPath;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpressionException;
+import javax.xml.xpath.XPathFactory;
+import java.util.*;
+import java.lang.reflect.Constructor;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public final class FieldTypePluginLoader 
+  extends AbstractPluginLoader<FieldType> {
+
+  private static final String LUCENE_MATCH_VERSION_PARAM
+    = IndexSchema.LUCENE_MATCH_VERSION_PARAM;
+
+  protected final static Logger log 
+    = LoggerFactory.getLogger(FieldTypePluginLoader.class);
+
+  private final XPath xpath = XPathFactory.newInstance().newXPath();
+
+  /**
+   * @param schema The schema that will be used to initialize the FieldTypes
+   * @param fieldTypes All FieldTypes that are instantiated by 
+   *        this Plugin Loader will be added to this Map
+   * @param schemaAware Any SchemaAware objects that are instantiated by 
+   *        this Plugin Loader will be added to this collection.
+   */
+  public FieldTypePluginLoader(final IndexSchema schema,
+                               final Map<String, FieldType> fieldTypes,
+                               final Collection<SchemaAware> schemaAware) {
+    super("[schema.xml] fieldType", true, true);
+    this.schema = schema;
+    this.fieldTypes = fieldTypes;
+    this.schemaAware = schemaAware;
+  }
+
+  private final IndexSchema schema;
+  private final Map<String, FieldType> fieldTypes;
+  private final Collection<SchemaAware> schemaAware;
+
+
+  @Override
+  protected FieldType create( ResourceLoader loader, 
+                              String name, 
+                              String className, 
+                              Node node ) throws Exception {
+
+    FieldType ft = (FieldType)loader.newInstance(className);
+    ft.setTypeName(name);
+    
+    String expression = "./analyzer[@type='query']";
+    Node anode = (Node)xpath.evaluate(expression, node, XPathConstants.NODE);
+    Analyzer queryAnalyzer = readAnalyzer(anode);
+    
+    // An analyzer without a type specified, or with type="index"
+    expression = "./analyzer[not(@type)] | ./analyzer[@type='index']";
+    anode = (Node)xpath.evaluate(expression, node, XPathConstants.NODE);
+    Analyzer analyzer = readAnalyzer(anode);
+    
+    // a custom similarity[Factory]
+    expression = "./similarity";
+    anode = (Node)xpath.evaluate(expression, node, XPathConstants.NODE);
+    Similarity similarity = IndexSchema.readSimilarity(loader, anode);
+    
+    if (queryAnalyzer==null) queryAnalyzer=analyzer;
+    if (analyzer==null) analyzer=queryAnalyzer;
+    if (analyzer!=null) {
+      ft.setAnalyzer(analyzer);
+      ft.setQueryAnalyzer(queryAnalyzer);
+    }
+    if (similarity!=null) {
+      ft.setSimilarity(similarity);
+    }
+    if (ft instanceof SchemaAware){
+      schemaAware.add((SchemaAware) ft);
+    }
+    return ft;
+  }
+  
+  @Override
+  protected void init(FieldType plugin, Node node) throws Exception {
+
+    Map<String,String> params = DOMUtil.toMapExcept( node.getAttributes(), 
+                                                     "name","class" );
+    plugin.setArgs(schema, params );
+  }
+  
+  @Override
+  protected FieldType register(String name, 
+                               FieldType plugin) throws Exception {
+
+    log.trace("fieldtype defined: " + plugin );
+    return fieldTypes.put( name, plugin );
+  }
+
+  //
+  // <analyzer><tokenizer class="...."/><tokenizer class="...." arg="....">
+  //
+  //
+  private Analyzer readAnalyzer(Node node) throws XPathExpressionException {
+                                
+    final SolrResourceLoader loader = schema.getResourceLoader();
+
+    // parent node used to be passed in as "fieldtype"
+    // if (!fieldtype.hasChildNodes()) return null;
+    // Node node = DOMUtil.getChild(fieldtype,"analyzer");
+    
+    if (node == null) return null;
+    NamedNodeMap attrs = node.getAttributes();
+    String analyzerName = DOMUtil.getAttr(attrs,"class");
+    if (analyzerName != null) {
+      try {
+        // No need to be core-aware as Analyzers are not in the core-aware list
+        final Class<? extends Analyzer> clazz = loader.findClass
+          (analyzerName).asSubclass(Analyzer.class);
+        
+        try {
+          // first try to use a ctor with version parameter 
+          // (needed for many new Analyzers that have no default one anymore)
+          Constructor<? extends Analyzer> cnstr 
+            = clazz.getConstructor(Version.class);
+          final String matchVersionStr 
+            = DOMUtil.getAttr(attrs, LUCENE_MATCH_VERSION_PARAM);
+          final Version luceneMatchVersion = (matchVersionStr == null) ?
+            schema.getDefaultLuceneMatchVersion() : 
+            Config.parseLuceneVersionString(matchVersionStr);
+          if (luceneMatchVersion == null) {
+            throw new SolrException
+              ( SolrException.ErrorCode.SERVER_ERROR,
+                "Configuration Error: Analyzer '" + clazz.getName() +
+                "' needs a 'luceneMatchVersion' parameter");
+          }
+          return cnstr.newInstance(luceneMatchVersion);
+        } catch (NoSuchMethodException nsme) {
+          // otherwise use default ctor
+          return clazz.newInstance();
+        }
+      } catch (Exception e) {
+        log.error("Cannot load analyzer: "+analyzerName, e);
+        throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
+                                 "Cannot load analyzer: "+analyzerName, e );
+      }
+    }
+
+    // Load the CharFilters
+
+    final ArrayList<CharFilterFactory> charFilters 
+      = new ArrayList<CharFilterFactory>();
+    AbstractPluginLoader<CharFilterFactory> charFilterLoader =
+      new AbstractPluginLoader<CharFilterFactory>
+      ( "[schema.xml] analyzer/charFilter", false, false ) {
+
+      @Override
+      protected void init(CharFilterFactory plugin, Node node) throws Exception {
+        if( plugin != null ) {
+          final Map<String,String> params = DOMUtil.toMapExcept(node.getAttributes(),"class");
+          // copy the luceneMatchVersion from config, if not set
+          if (!params.containsKey(LUCENE_MATCH_VERSION_PARAM))
+            params.put(LUCENE_MATCH_VERSION_PARAM, 
+                       schema.getDefaultLuceneMatchVersion().toString());
+          plugin.init( params );
+          charFilters.add( plugin );
+        }
+      }
+
+      @Override
+      protected CharFilterFactory register(String name, 
+                                           CharFilterFactory plugin) {
+        return null; // used for map registration
+      }
+    };
+
+    charFilterLoader.load( loader, (NodeList)xpath.evaluate("./charFilter",  node, XPathConstants.NODESET) );
+                            
+
+    // Load the Tokenizer
+    // Although an analyzer only allows a single Tokenizer, we load a list to make sure
+    // the configuration is ok
+
+    final ArrayList<TokenizerFactory> tokenizers 
+      = new ArrayList<TokenizerFactory>(1);
+    AbstractPluginLoader<TokenizerFactory> tokenizerLoader =
+      new AbstractPluginLoader<TokenizerFactory>
+      ( "[schema.xml] analyzer/tokenizer", false, false ) {
+      @Override
+      protected void init(TokenizerFactory plugin, Node node) throws Exception {
+        if( !tokenizers.isEmpty() ) {
+          throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
+              "The schema defines multiple tokenizers for: "+node );
+        }
+        final Map<String,String> params = DOMUtil.toMapExcept(node.getAttributes(),"class");
+
+        // copy the luceneMatchVersion from config, if not set
+        if (!params.containsKey(LUCENE_MATCH_VERSION_PARAM))
+          params.put(LUCENE_MATCH_VERSION_PARAM, 
+                     schema.getDefaultLuceneMatchVersion().toString());
+        plugin.init( params );
+        tokenizers.add( plugin );
+      }
+
+      @Override
+      protected TokenizerFactory register(String name, TokenizerFactory plugin) {
+        return null; // used for map registration
+      }
+    };
+
+    tokenizerLoader.load( loader, (NodeList)xpath.evaluate("./tokenizer", node, XPathConstants.NODESET) );
+    
+    // Make sure something was loaded
+    if( tokenizers.isEmpty() ) {
+      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,"analyzer without class or tokenizer & filter list");
+    }
+    
+
+    // Load the Filters
+
+    final ArrayList<TokenFilterFactory> filters 
+      = new ArrayList<TokenFilterFactory>();
+
+    AbstractPluginLoader<TokenFilterFactory> filterLoader = 
+      new AbstractPluginLoader<TokenFilterFactory>( "[schema.xml] analyzer/filter", false, false )
+    {
+      @Override
+      protected void init(TokenFilterFactory plugin, Node node) throws Exception {
+        if( plugin != null ) {
+          final Map<String,String> params = DOMUtil.toMapExcept(node.getAttributes(),"class");
+          // copy the luceneMatchVersion from config, if not set
+          if (!params.containsKey(LUCENE_MATCH_VERSION_PARAM))
+            params.put(LUCENE_MATCH_VERSION_PARAM, 
+                       schema.getDefaultLuceneMatchVersion().toString());
+          plugin.init( params );
+          filters.add( plugin );
+        }
+      }
+
+      @Override
+      protected TokenFilterFactory register(String name, TokenFilterFactory plugin) throws Exception {
+        return null; // used for map registration
+      }
+    };
+    filterLoader.load( loader, (NodeList)xpath.evaluate("./filter", node, XPathConstants.NODESET) );
+    
+    return new TokenizerChain(charFilters.toArray(new CharFilterFactory[charFilters.size()]),
+                              tokenizers.get(0), filters.toArray(new TokenFilterFactory[filters.size()]));
+  }
+    
+}
diff --git a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
index ad501ce..89b958b 100644
--- a/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
+++ b/solr/core/src/java/org/apache/solr/schema/IndexSchema.java
@@ -34,13 +34,8 @@
 import org.apache.solr.core.SolrConfig;
 import org.apache.solr.core.Config;
 import org.apache.solr.core.SolrResourceLoader;
-import org.apache.solr.analysis.CharFilterFactory;
-import org.apache.solr.analysis.TokenFilterFactory;
-import org.apache.solr.analysis.TokenizerChain;
-import org.apache.solr.analysis.TokenizerFactory;
 import org.apache.solr.search.SolrQueryParser;
 import org.apache.solr.search.SolrSimilarityProvider;
-import org.apache.solr.util.plugin.AbstractPluginLoader;
 import org.apache.solr.util.plugin.SolrCoreAware;
 import org.w3c.dom.*;
 import org.xml.sax.InputSource;
@@ -53,7 +48,6 @@
 import java.io.Reader;
 import java.io.IOException;
 import java.util.*;
-import java.lang.reflect.Constructor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -138,6 +132,11 @@
     return name;
   }
   
+  /** The Default Lucene Match Version for this IndexSchema */
+  public Version getDefaultLuceneMatchVersion() {
+    return solrConfig.luceneMatchVersion;
+  }
+
   float getVersion() {
     return version;
   }
@@ -376,63 +375,18 @@
 
       version = schemaConf.getFloat("/schema/@version", 1.0f);
 
-      final IndexSchema schema = this;
-      AbstractPluginLoader<FieldType> fieldLoader = new AbstractPluginLoader<FieldType>( "[schema.xml] fieldType", true, true) {
 
-        @Override
-        protected FieldType create( ResourceLoader loader, String name, String className, Node node ) throws Exception
-        {
-          FieldType ft = (FieldType)loader.newInstance(className);
-          ft.setTypeName(name);
+      // load the Field Types
 
-          String expression = "./analyzer[@type='query']";
-          Node anode = (Node)xpath.evaluate(expression, node, XPathConstants.NODE);
-          Analyzer queryAnalyzer = readAnalyzer(anode);
-
-          // An analyzer without a type specified, or with type="index"
-          expression = "./analyzer[not(@type)] | ./analyzer[@type='index']";
-          anode = (Node)xpath.evaluate(expression, node, XPathConstants.NODE);
-          Analyzer analyzer = readAnalyzer(anode);
-          
-          // a custom similarity[Factory]
-          expression = "./similarity";
-          anode = (Node)xpath.evaluate(expression, node, XPathConstants.NODE);
-          Similarity similarity = readSimilarity(anode);
-
-          if (queryAnalyzer==null) queryAnalyzer=analyzer;
-          if (analyzer==null) analyzer=queryAnalyzer;
-          if (analyzer!=null) {
-            ft.setAnalyzer(analyzer);
-            ft.setQueryAnalyzer(queryAnalyzer);
-          }
-          if (similarity!=null) {
-            ft.setSimilarity(similarity);
-          }
-          if (ft instanceof SchemaAware){
-            schemaAware.add((SchemaAware) ft);
-          }
-          return ft;
-        }
-        
-        @Override
-        protected void init(FieldType plugin, Node node) throws Exception {
-          Map<String,String> params = DOMUtil.toMapExcept( node.getAttributes(), "name","class" );
-          plugin.setArgs(schema, params );
-        }
-
-        @Override
-        protected FieldType register(String name, FieldType plugin) throws Exception {
-          log.trace("fieldtype defined: " + plugin );
-          return fieldTypes.put( name, plugin );
-        }
-      };
-      
+      final FieldTypePluginLoader typeLoader 
+        = new FieldTypePluginLoader(this, fieldTypes, schemaAware);
 
       String expression = "/schema/types/fieldtype | /schema/types/fieldType";
-      NodeList nodes = (NodeList) xpath.evaluate(expression, document, XPathConstants.NODESET);
-      fieldLoader.load( loader, nodes );
+      NodeList nodes = (NodeList) xpath.evaluate(expression, document, 
+                                                 XPathConstants.NODESET);
+      typeLoader.load( loader, nodes );
 
-      
+      // load the Fields
 
       // Hang on to the fields that say if they are required -- this lets us set a reasonable default for the unique key
       Map<String,Boolean> explicitRequiredProp = new HashMap<String, Boolean>();
@@ -506,7 +460,7 @@
     dynamicFields = dFields.toArray(new DynamicField[dFields.size()]);
 
     Node node = (Node) xpath.evaluate("/schema/similarity", document, XPathConstants.NODE);
-    Similarity similarity = readSimilarity(node);
+    Similarity similarity = readSimilarity(loader, node);
     fallbackSimilarity = similarity == null ? new DefaultSimilarity() : similarity;
 
     node = (Node) xpath.evaluate("/schema/similarityProvider", document, XPathConstants.NODE);
@@ -759,7 +713,7 @@
     return newArr;
   }
 
-  private Similarity readSimilarity(Node node) throws XPathExpressionException {
+  static Similarity readSimilarity(ResourceLoader loader, Node node) throws XPathExpressionException {
     if (node==null) {
       return null;
     } else {
@@ -783,140 +737,6 @@
     }
   }
 
-  //
-  // <analyzer><tokenizer class="...."/><tokenizer class="...." arg="....">
-  //
-  //
-  private Analyzer readAnalyzer(Node node) throws XPathExpressionException {
-    // parent node used to be passed in as "fieldtype"
-    // if (!fieldtype.hasChildNodes()) return null;
-    // Node node = DOMUtil.getChild(fieldtype,"analyzer");
-
-    if (node == null) return null;
-    NamedNodeMap attrs = node.getAttributes();
-    String analyzerName = DOMUtil.getAttr(attrs,"class");
-    if (analyzerName != null) {
-      try {
-        // No need to be core-aware as Analyzers are not in the core-aware list
-        final Class<? extends Analyzer> clazz = loader.findClass
-          (analyzerName).asSubclass(Analyzer.class);
-
-        try {
-          // first try to use a ctor with version parameter 
-          // (needed for many new Analyzers that have no default one anymore)
-          Constructor<? extends Analyzer> cnstr = clazz.getConstructor(Version.class);
-          final String matchVersionStr = DOMUtil.getAttr(attrs, LUCENE_MATCH_VERSION_PARAM);
-          final Version luceneMatchVersion = (matchVersionStr == null) ?
-            solrConfig.luceneMatchVersion : Config.parseLuceneVersionString(matchVersionStr);
-          if (luceneMatchVersion == null) {
-            throw new SolrException
-              ( SolrException.ErrorCode.SERVER_ERROR,
-                "Configuration Error: Analyzer '" + clazz.getName() +
-                "' needs a 'luceneMatchVersion' parameter");
-          }
-          return cnstr.newInstance(luceneMatchVersion);
-        } catch (NoSuchMethodException nsme) {
-          // otherwise use default ctor
-          return clazz.newInstance();
-        }
-      } catch (Exception e) {
-        log.error("Cannot load analyzer: "+analyzerName, e);
-        throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
-                                 "Cannot load analyzer: "+analyzerName, e );
-      }
-    }
-
-    XPath xpath = XPathFactory.newInstance().newXPath();
-
-    // Load the CharFilters
-    // --------------------------------------------------------------------------------
-    final ArrayList<CharFilterFactory> charFilters = new ArrayList<CharFilterFactory>();
-    AbstractPluginLoader<CharFilterFactory> charFilterLoader =
-      new AbstractPluginLoader<CharFilterFactory>( "[schema.xml] analyzer/charFilter", false, false )
-    {
-      @Override
-      protected void init(CharFilterFactory plugin, Node node) throws Exception {
-        if( plugin != null ) {
-          final Map<String,String> params = DOMUtil.toMapExcept(node.getAttributes(),"class");
-          // copy the luceneMatchVersion from config, if not set
-          if (!params.containsKey(LUCENE_MATCH_VERSION_PARAM))
-            params.put(LUCENE_MATCH_VERSION_PARAM, solrConfig.luceneMatchVersion.toString());
-          plugin.init( params );
-          charFilters.add( plugin );
-        }
-      }
-
-      @Override
-      protected CharFilterFactory register(String name, CharFilterFactory plugin) throws Exception {
-        return null; // used for map registration
-      }
-    };
-    charFilterLoader.load( solrConfig.getResourceLoader(), (NodeList)xpath.evaluate("./charFilter", node, XPathConstants.NODESET) );
-
-    // Load the Tokenizer
-    // Although an analyzer only allows a single Tokenizer, we load a list to make sure
-    // the configuration is ok
-    // --------------------------------------------------------------------------------
-    final ArrayList<TokenizerFactory> tokenizers = new ArrayList<TokenizerFactory>(1);
-    AbstractPluginLoader<TokenizerFactory> tokenizerLoader =
-      new AbstractPluginLoader<TokenizerFactory>( "[schema.xml] analyzer/tokenizer", false, false )
-    {
-      @Override
-      protected void init(TokenizerFactory plugin, Node node) throws Exception {
-        if( !tokenizers.isEmpty() ) {
-          throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
-              "The schema defines multiple tokenizers for: "+node );
-        }
-        final Map<String,String> params = DOMUtil.toMapExcept(node.getAttributes(),"class");
-        // copy the luceneMatchVersion from config, if not set
-        if (!params.containsKey(LUCENE_MATCH_VERSION_PARAM))
-          params.put(LUCENE_MATCH_VERSION_PARAM, solrConfig.luceneMatchVersion.toString());
-        plugin.init( params );
-        tokenizers.add( plugin );
-      }
-
-      @Override
-      protected TokenizerFactory register(String name, TokenizerFactory plugin) throws Exception {
-        return null; // used for map registration
-      }
-    };
-    tokenizerLoader.load( loader, (NodeList)xpath.evaluate("./tokenizer", node, XPathConstants.NODESET) );
-    
-    // Make sure something was loaded
-    if( tokenizers.isEmpty() ) {
-      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,"analyzer without class or tokenizer & filter list");
-    }
-    
-
-    // Load the Filters
-    // --------------------------------------------------------------------------------
-    final ArrayList<TokenFilterFactory> filters = new ArrayList<TokenFilterFactory>();
-    AbstractPluginLoader<TokenFilterFactory> filterLoader = 
-      new AbstractPluginLoader<TokenFilterFactory>( "[schema.xml] analyzer/filter", false, false )
-    {
-      @Override
-      protected void init(TokenFilterFactory plugin, Node node) throws Exception {
-        if( plugin != null ) {
-          final Map<String,String> params = DOMUtil.toMapExcept(node.getAttributes(),"class");
-          // copy the luceneMatchVersion from config, if not set
-          if (!params.containsKey(LUCENE_MATCH_VERSION_PARAM))
-            params.put(LUCENE_MATCH_VERSION_PARAM, solrConfig.luceneMatchVersion.toString());
-          plugin.init( params );
-          filters.add( plugin );
-        }
-      }
-
-      @Override
-      protected TokenFilterFactory register(String name, TokenFilterFactory plugin) throws Exception {
-        return null; // used for map registration
-      }
-    };
-    filterLoader.load( loader, (NodeList)xpath.evaluate("./filter", node, XPathConstants.NODESET) );
-
-    return new TokenizerChain(charFilters.toArray(new CharFilterFactory[charFilters.size()]),
-        tokenizers.get(0), filters.toArray(new TokenFilterFactory[filters.size()]));
-  };
-
 
   static abstract class DynamicReplacement implements Comparable<DynamicReplacement> {
     final static int STARTS_WITH=1;
diff --git a/solr/core/src/java/org/apache/solr/search/QParser.java b/solr/core/src/java/org/apache/solr/search/QParser.java
index a8b7cf8..8b3cabc 100755
--- a/solr/core/src/java/org/apache/solr/search/QParser.java
+++ b/solr/core/src/java/org/apache/solr/search/QParser.java
@@ -259,7 +259,8 @@
   }
 
   public Query getHighlightQuery() throws ParseException {
-    return getQuery();
+    Query query = getQuery();
+    return query instanceof WrappedQuery ? ((WrappedQuery)query).getWrappedQuery() : query;
   }
 
   public void addDebugInfo(NamedList<Object> debugInfo) {
diff --git a/solr/core/src/java/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactory.java
new file mode 100644
index 0000000..5b139f0
--- /dev/null
+++ b/solr/core/src/java/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactory.java
@@ -0,0 +1,108 @@
+package org.apache.solr.update.processor;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.update.AddUpdateCommand;
+
+/**
+ * A non-duplicate processor. Removes duplicates in the specified fields.
+ * 
+ * <pre class="prettyprint" >
+ * &lt;updateRequestProcessorChain name="uniq-fields"&gt;
+ *   &lt;processor class="org.apache.solr.update.processor.UniqFieldsUpdateProcessorFactory"&gt;
+ *     &lt;lst name="fields"&gt;
+ *       &lt;str&gt;uniq&lt;/str&gt;
+ *       &lt;str&gt;uniq2&lt;/str&gt;
+ *       &lt;str&gt;uniq3&lt;/str&gt;
+ *     &lt;/lst&gt;      
+ *   &lt;/processor&gt;
+ *   &lt;processor class="solr.RunUpdateProcessorFactory" /&gt;
+ * &lt;/updateRequestProcessorChain&gt;</pre>
+ * 
+ */
+public class UniqFieldsUpdateProcessorFactory extends UpdateRequestProcessorFactory {
+
+  private Set<String> fields;
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public void init(@SuppressWarnings("rawtypes") NamedList args) {
+    NamedList<String> flst = (NamedList<String>)args.get("fields");
+    if(flst != null){
+      fields = new HashSet<String>();
+      for(int i = 0; i < flst.size(); i++){
+        fields.add(flst.getVal(i));
+      }
+    }
+  }
+  
+  @Override
+  public UpdateRequestProcessor getInstance(SolrQueryRequest req,
+                                            SolrQueryResponse rsp,
+                                            UpdateRequestProcessor next) {
+    return new UniqFieldsUpdateProcessor(next, fields);
+  }
+  
+  public class UniqFieldsUpdateProcessor extends UpdateRequestProcessor {
+    
+    private final Set<String> fields;
+
+    public UniqFieldsUpdateProcessor(UpdateRequestProcessor next, 
+                                              Set<String> fields) {
+      super(next);
+      this.fields = fields;
+    }
+    
+    @Override
+    public void processAdd(AddUpdateCommand cmd) throws IOException {
+      if(fields != null){
+        SolrInputDocument solrInputDocument = cmd.getSolrInputDocument();
+        List<Object> uniqList = new ArrayList<Object>();
+        for (String field : fields) {
+          uniqList.clear();
+          Collection<Object> col = solrInputDocument.getFieldValues(field);
+          if (col != null) {
+            for (Object o : col) {
+              if(!uniqList.contains(o))
+                uniqList.add(o);
+            }
+            solrInputDocument.remove(field);
+            for (Object o : uniqList) {
+              solrInputDocument.addField(field, o);
+            }
+          }    
+        }
+      }
+      super.processAdd(cmd);
+    }
+  }
+}
+
+
+
diff --git a/solr/core/src/test-files/solr/conf/schema12.xml b/solr/core/src/test-files/solr/conf/schema12.xml
index c998a89..053a86e 100755
--- a/solr/core/src/test-files/solr/conf/schema12.xml
+++ b/solr/core/src/test-files/solr/conf/schema12.xml
@@ -523,6 +523,13 @@
    <field name="pointD" type="xyd" indexed="true" stored="true" multiValued="false"/>
    <field name="point_hash" type="geohash" indexed="true" stored="true" multiValued="false"/>
    <field name="store" type="location" indexed="true" stored="true"/>
+   
+   <!-- to test uniq fields -->   
+   <field name="uniq" type="string" indexed="true" stored="true" multiValued="true"/>
+   <field name="uniq2" type="string" indexed="true" stored="true" multiValued="true"/>
+   <field name="uniq3" type="string" indexed="true" stored="true"/>
+   <field name="nouniq" type="string" indexed="true" stored="true" multiValued="true"/>
+
    <dynamicField name="*_coordinate"  type="tdouble" indexed="true"  stored="false"/>
 
 
diff --git a/solr/core/src/test-files/solr/conf/solrconfig.xml b/solr/core/src/test-files/solr/conf/solrconfig.xml
index 9b40b15..64b5a42 100644
--- a/solr/core/src/test-files/solr/conf/solrconfig.xml
+++ b/solr/core/src/test-files/solr/conf/solrconfig.xml
@@ -491,5 +491,15 @@
     </processor>
     <processor class="solr.RunUpdateProcessorFactory" />
   </updateRequestProcessorChain>
+  <updateRequestProcessorChain name="uniq-fields">
+    <processor class="org.apache.solr.update.processor.UniqFieldsUpdateProcessorFactory">
+      <lst name="fields">
+        <str>uniq</str>
+        <str>uniq2</str>
+        <str>uniq3</str>
+      </lst>      
+    </processor>
+    <processor class="solr.RunUpdateProcessorFactory" />
+  </updateRequestProcessorChain>  
 
 </config>
diff --git a/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java b/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java
index fac042f..6032d91 100644
--- a/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java
+++ b/solr/core/src/test/org/apache/solr/core/TestJmxIntegration.java
@@ -112,8 +112,6 @@
   public void testJmxOnCoreReload() throws Exception {
     List<MBeanServer> servers = MBeanServerFactory.findMBeanServer(null);
     MBeanServer mbeanServer = servers.get(0);
-    log.info("Servers in testJmxUpdate: " + servers);
-    log.info(h.getCore().getInfoRegistry().toString());
 
     String coreName = h.getCore().getName();
     if (coreName.length() == 0) {
@@ -121,16 +119,43 @@
     }
 
     Set<ObjectInstance> oldBeans = mbeanServer.queryMBeans(null, null);
-    int oldNumberOfObjects = oldBeans.size();
-    h.getCoreContainer().reload(coreName);
-    
-    // chill for a moment, so our beans can get ready
-    Thread.sleep(1000);
-    
-    Set<ObjectInstance> newBeans = mbeanServer.queryMBeans(null, null);
-    int newNumberOfObjects = newBeans.size();
+    int oldNumberOfObjects = 0;
+    for (ObjectInstance bean : oldBeans) {
+      try {
+        if (String.valueOf(h.getCore().hashCode()).equals(mbeanServer.getAttribute(bean.getObjectName(), "coreHashCode"))) {
+          oldNumberOfObjects++;
+        }
+      } catch (AttributeNotFoundException e) {
+        // expected
+      }
+    }
 
-    assertEquals("Number of registered MBeans is not the same after Solr core reload", oldNumberOfObjects, newNumberOfObjects);
+    log.info("Before Reload: Size of infoRegistry: " + h.getCore().getInfoRegistry().size() + " MBeans: " + oldNumberOfObjects);
+    assertEquals("Number of registered MBeans is not the same as info registry size", h.getCore().getInfoRegistry().size(), oldNumberOfObjects);
+
+    h.getCoreContainer().reload(coreName);
+
+    Set<ObjectInstance> newBeans = mbeanServer.queryMBeans(null, null);
+    int newNumberOfObjects = 0;
+    int registrySize = 0;
+    SolrCore core = h.getCoreContainer().getCore(coreName);
+    try {
+      registrySize = core.getInfoRegistry().size();
+      for (ObjectInstance bean : newBeans) {
+        try {
+          if (String.valueOf(core.hashCode()).equals(mbeanServer.getAttribute(bean.getObjectName(), "coreHashCode"))) {
+            newNumberOfObjects++;
+          }
+        } catch (AttributeNotFoundException e) {
+          // expected
+        }
+      }
+    } finally {
+      core.close();
+    }
+
+    log.info("After Reload: Size of infoRegistry: " + registrySize + " MBeans: " + newNumberOfObjects);
+    assertEquals("Number of registered MBeans is not the same as info registry size", registrySize, newNumberOfObjects);
   }
 
   private ObjectName getObjectName(String key, SolrInfoMBean infoBean)
diff --git a/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java b/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java
new file mode 100644
index 0000000..f2b62e3
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java
@@ -0,0 +1,230 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.search;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader.ReaderContext;
+import org.apache.lucene.queries.function.DocValues;
+import org.apache.lucene.queries.function.ValueSource;
+import org.apache.lucene.util.ReaderUtil;
+import org.apache.noggit.ObjectBuilder;
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.client.solrj.SolrRequest;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.schema.SchemaField;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class TestRealTimeGet extends SolrTestCaseJ4 {
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("solrconfig.xml","schema12.xml");
+  }
+
+  /***
+  @Test
+  public void testGetRealtime() throws Exception {
+    SolrQueryRequest sr1 = req("q","foo");
+    IndexReader r1 = sr1.getCore().getRealtimeReader();
+
+    assertU(adoc("id","1"));
+
+    IndexReader r2 = sr1.getCore().getRealtimeReader();
+    assertNotSame(r1, r2);
+    int refcount = r2.getRefCount();
+
+    // make sure a new reader wasn't opened
+    IndexReader r3 = sr1.getCore().getRealtimeReader();
+    assertSame(r2, r3);
+    assertEquals(refcount+1, r3.getRefCount());
+
+    assertU(commit());
+
+    // this is not critical, but currently a commit does not refresh the reader
+    // if nothing has changed
+    IndexReader r4 = sr1.getCore().getRealtimeReader();
+    assertEquals(refcount+2, r4.getRefCount());
+
+
+    r1.decRef();
+    r2.decRef();
+    r3.decRef();
+    r4.decRef();
+    sr1.close();
+  }
+  ***/
+
+
+  private ConcurrentHashMap<Integer,Long> model = new ConcurrentHashMap<Integer,Long>();
+  private volatile Map<Integer,Long> committedModel = new HashMap<Integer,Long>();
+  volatile int lastId;
+  private final String field = "val_l";
+  private volatile Throwable ex;
+
+  @Test
+  public void testStressGetRealtime() throws Exception {
+    // update variables
+    final int commitPercent = 10;
+    final int softCommitPercent = 50; // what percent of the commits are soft
+    final int deletePercent = 8;
+    final int deleteByQueryPercent = 4;
+    final int ndocs = 100;
+    int nWriteThreads = 10;
+    final int maxConcurrentCommits = 2;   // number of committers at a time... needed if we want to avoid commit errors due to exceeding the max
+
+    // query variables
+    final int percentRealtimeQuery = 0;   // realtime get is not implemented yet
+    final AtomicLong operations = new AtomicLong(5000);  // number of query operations to perform in total
+    int nReadThreads = 10;
+
+
+    for (int i=0; i<ndocs; i++) {
+      model.put(i, -1L);
+    }
+    committedModel.putAll(model);
+
+    final AtomicInteger numCommitting = new AtomicInteger();
+
+    List<Thread> threads = new ArrayList<Thread>();
+
+    for (int i=0; i<nWriteThreads; i++) {
+      Thread thread = new Thread("WRITER"+i) {
+        Random rand = new Random(random.nextInt());
+
+        @Override
+        public void run() {
+          while (operations.get() > 0) {
+            int oper = rand.nextInt(100);
+            int id = rand.nextInt(ndocs);
+            Long val = model.get(id);
+            long nextVal = Math.abs(val)+1;
+
+            // set the lastId before we actually change it sometimes to try and
+            // uncover more race conditions between writing and reading
+            boolean before = random.nextBoolean();
+            if (before) {
+              lastId = id;
+            }
+
+            if (oper < commitPercent) {
+              if (numCommitting.incrementAndGet() <= maxConcurrentCommits) {
+                if (rand.nextInt(100) < softCommitPercent)
+                  assertU(h.commit("softCommit","true"));
+                else
+                  assertU(commit());
+              }
+
+              committedModel = new HashMap<Integer,Long>(model);  // take a snapshot
+              numCommitting.decrementAndGet();
+            } else if (oper < commitPercent + deletePercent) {
+              assertU("<delete><id>" + id + "</id></delete>");
+              model.put(id, -nextVal);
+            } else if (oper < commitPercent + deletePercent + deleteByQueryPercent) {
+              assertU("<delete><query>id:" + id + "</query></delete>");
+              model.put(id, -nextVal);
+            } else {
+              assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal)));
+            }
+
+            if (!before) {
+              lastId = id;
+            }
+          }
+        }
+      };
+
+      threads.add(thread);
+    }
+
+
+    for (int i=0; i<nReadThreads; i++) {
+      Thread thread = new Thread("READER"+i) {
+        Random rand = new Random(random.nextInt());
+
+        @Override
+        public void run() {
+          try {
+            while (operations.decrementAndGet() >= 0) {
+              int oper = rand.nextInt(100);
+              // bias toward a recently changed doc
+              int id = rand.nextInt(100) < 25 ? lastId : rand.nextInt(ndocs);
+
+              // when indexing, we update the index, then the model
+              // so when querying, we should first check the model, and then the index
+
+              boolean realTime = rand.nextInt(100) < percentRealtimeQuery;
+              long val;
+
+              if (realTime) {
+                val = model.get(id);
+              } else {
+                val = committedModel.get(id);
+              }
+
+              SolrQueryRequest sreq;
+              if (realTime) {
+                sreq = req("wt","json", "qt","/get", "ids",Integer.toString(id));
+              } else {
+                sreq = req("wt","json", "q","id:"+Integer.toString(id), "omitHeader","true");
+              }
+
+              String response = h.query(sreq);
+              Map rsp = (Map)ObjectBuilder.fromJSON(response);
+              List doclist = (List)(((Map)rsp.get("response")).get("docs"));
+              if (doclist.size() == 0) {
+                // there's no info we can get back with a delete, so not much we can check without further synchronization
+              } else {
+                assertEquals(1, doclist.size());
+                long foundVal = (Long)(((Map)doclist.get(0)).get(field));
+                assertTrue(foundVal >= Math.abs(val));
+              }
+            }
+          }
+          catch (Throwable e) {
+            ex = e;
+            operations.set(-1L);
+            SolrException.log(log,e);
+          }
+        }
+      };
+
+      threads.add(thread);
+    }
+
+
+    for (Thread thread : threads) {
+      thread.start();
+    }
+
+    for (Thread thread : threads) {
+      thread.join();
+    }
+
+    assertNull(ex);
+
+  }
+
+}
diff --git a/solr/core/src/test/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactoryTest.java b/solr/core/src/test/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactoryTest.java
new file mode 100644
index 0000000..4d3634e
--- /dev/null
+++ b/solr/core/src/test/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactoryTest.java
@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.solr.update.processor;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.solr.SolrTestCaseJ4;
+import org.apache.solr.common.params.MultiMapSolrParams;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.params.UpdateParams;
+import org.apache.solr.common.util.ContentStream;
+import org.apache.solr.common.util.ContentStreamBase;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.handler.XmlUpdateRequestHandler;
+import org.apache.solr.request.SolrQueryRequest;
+import org.apache.solr.request.SolrQueryRequestBase;
+import org.apache.solr.response.SolrQueryResponse;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * 
+ */
+public class UniqFieldsUpdateProcessorFactoryTest extends SolrTestCaseJ4 {
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("solrconfig.xml", "schema12.xml");
+  }
+
+  @Override
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+    clearIndex();
+    assertU(commit());
+  }
+
+  @Test
+  public void testUniqFields() throws Exception {
+    SolrCore core = h.getCore();
+    UpdateRequestProcessorChain chained = core
+      .getUpdateProcessingChain("uniq-fields");
+    UniqFieldsUpdateProcessorFactory factory = ((UniqFieldsUpdateProcessorFactory) chained
+        .getFactories()[0]);
+    assertNotNull(chained);
+
+    addDoc(adoc("id", "1a", 
+                "uniq", "value1", 
+                "uniq", "value1", 
+                "uniq", "value2"));
+    addDoc(adoc("id", "2a", 
+                "uniq2", "value1", 
+                "uniq2", "value2", 
+                "uniq2", "value1", 
+                "uniq2", "value3", 
+                "uniq", "value1", 
+                "uniq", "value1"));
+    addDoc(adoc("id", "1b", 
+                "uniq3", "value1", 
+                "uniq3", "value1"));
+    addDoc(adoc("id", "1c", 
+                "nouniq", "value1", 
+                "nouniq", "value1", 
+                "nouniq", "value2"));
+    addDoc(adoc("id", "2c", 
+                "nouniq", "value1", 
+                "nouniq", "value1", 
+                "nouniq", "value2", 
+                "uniq2", "value1", 
+                "uniq2", "value1"));
+
+    assertU(commit());
+    assertQ(req("id:1a"), "count(//*[@name='uniq']/*)=2",
+        "//arr[@name='uniq']/str[1][.='value1']",
+        "//arr[@name='uniq']/str[2][.='value2']");
+    assertQ(req("id:2a"), "count(//*[@name='uniq2']/*)=3",
+        "//arr[@name='uniq2']/str[1][.='value1']",
+        "//arr[@name='uniq2']/str[2][.='value2']",
+        "//arr[@name='uniq2']/str[3][.='value3']");
+    assertQ(req("id:2a"), "count(//*[@name='uniq']/*)=1");
+    assertQ(req("id:1b"), "count(//*[@name='uniq3'])=1");
+    assertQ(req("id:1c"), "count(//*[@name='nouniq']/*)=3");
+    assertQ(req("id:2c"), "count(//*[@name='nouniq']/*)=3");
+    assertQ(req("id:2c"), "count(//*[@name='uniq2']/*)=1");
+
+  }
+
+  private void addDoc(String doc) throws Exception {
+    Map<String, String[]> params = new HashMap<String, String[]>();
+    MultiMapSolrParams mmparams = new MultiMapSolrParams(params);
+    params.put(UpdateParams.UPDATE_CHAIN, new String[] { "uniq-fields" });
+    SolrQueryRequestBase req = new SolrQueryRequestBase(h.getCore(),
+        (SolrParams) mmparams) {
+    };
+
+    XmlUpdateRequestHandler handler = new XmlUpdateRequestHandler();
+    handler.init(null);
+    ArrayList<ContentStream> streams = new ArrayList<ContentStream>(2);
+    streams.add(new ContentStreamBase.StringStream(doc));
+    req.setContentStreams(streams);
+    handler.handleRequestBody(req, new SolrQueryResponse());
+    req.close();
+  }
+}
diff --git a/solr/solrj/build.xml b/solr/solrj/build.xml
index a1974e9..fbeb731 100644
--- a/solr/solrj/build.xml
+++ b/solr/solrj/build.xml
@@ -20,6 +20,13 @@
 
   <import file="../common-build.xml"/>
 
+  <!-- Specialized common-solr.test.classpath, to remove the Solr core test output -->
+  <path id="test.classpath">
+    <pathelement path="${common-solr.dir}/build/solr-test-framework/classes/java"/>
+  	<pathelement path="${tests.userdir}"/>
+  	<path refid="test.base.classpath"/>
+  </path>
+
   <target name="compile-test" depends="compile-solr-test-framework,common.compile-test"/>
 
   <target name="test" depends="compile-test,junit-mkdir,junit-sequential,junit-parallel"/>
diff --git a/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java b/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
index 47908b0..304d7e1 100644
--- a/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
+++ b/solr/test-framework/src/java/org/apache/solr/util/TestHarness.java
@@ -71,8 +71,8 @@
 public class TestHarness {
   protected CoreContainer container;
   private SolrCore core;
-  private XPath xpath = XPathFactory.newInstance().newXPath();
-  private DocumentBuilder builder;
+  private final ThreadLocal<DocumentBuilder> builderTL = new ThreadLocal<DocumentBuilder>();
+  private final ThreadLocal<XPath> xpathTL = new ThreadLocal<XPath>();
   public XmlUpdateRequestHandler updater;
         
   public static SolrConfig createConfig(String confFile) {
@@ -145,15 +145,40 @@
       core = container.getCore(coreName);
       if (core != null)
         core.close();
-      builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
-      
+
       updater = new XmlUpdateRequestHandler();
       updater.init( null );
     } catch (Exception e) {
       throw new RuntimeException(e);
     }
   }
-  
+
+  private DocumentBuilder getXmlDocumentBuilder() {
+    try {
+      DocumentBuilder builder = builderTL.get();
+      if (builder == null) {
+        builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
+        builderTL.set(builder);
+      }
+      return builder;
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private XPath getXpath() {
+    try {
+      XPath xpath = xpathTL.get();
+      if (xpath == null) {
+        xpath = XPathFactory.newInstance().newXPath();
+        xpathTL.set(xpath);
+      }
+      return xpath;
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
   // Creates a container based on infos needed to create one core
   static class Initializer extends CoreContainer.Initializer {
     String coreName;
@@ -364,7 +389,7 @@
                 
     Document document=null;
     try {
-      document = builder.parse(new ByteArrayInputStream
+      document = getXmlDocumentBuilder().parse(new ByteArrayInputStream
                                (xml.getBytes("UTF-8")));
     } catch (UnsupportedEncodingException e1) {
       throw new RuntimeException("Totally weird UTF-8 exception", e1);
@@ -374,7 +399,7 @@
                 
     for (String xp : tests) {
       xp=xp.trim();
-      Boolean bool = (Boolean) xpath.evaluate(xp, document,
+      Boolean bool = (Boolean) getXpath().evaluate(xp, document,
                                               XPathConstants.BOOLEAN);
 
       if (!bool) {
diff --git a/solr/webapp/web/css/screen.css b/solr/webapp/web/css/screen.css
index b3a53ef..f022d34 100644
--- a/solr/webapp/web/css/screen.css
+++ b/solr/webapp/web/css/screen.css
@@ -39,6 +39,11 @@
     text-align: left;
 }
 
+abbr
+{
+    cursor: help;
+}
+
 ul
 {
     list-style: none;
@@ -110,7 +115,7 @@
 {
     border: 1px solid #c0c0c0;
     min-height: 600px;
-    min-width:750px;
+    min-width: 750px;
     position: relative;
 }
 
@@ -753,10 +758,36 @@
 
 /* analysis */
 
-#content #analysis .settings
+#content #analysis-holder
+{
+    background-image: url( ../img/div.gif );
+    background-position: 50% 0;
+    background-repeat: repeat-y;
+}
+
+#content #analysis #field-analysis
+{
+    margin-bottom: 0;
+}
+
+#content #analysis #field-analysis .content
+{
+    padding-bottom: 0;
+}
+
+#content #analysis .settings-holder
 {
     clear: both;
+    padding-top: 15px;
+}
+
+#content #analysis .settings
+{
+    background-color: #fff;
+    border-top: 1px solid #fafafa;
+    border-bottom: 1px solid #fafafa;
     padding-top: 10px;
+    padding-bottom: 10px;
 }
 
 #content #analysis .settings select.loader
@@ -779,7 +810,7 @@
 #content #analysis .settings div
 {
     float: right;
-    width: 49%;
+    width: 47%;
 }
 
 #content #analysis .settings button
@@ -787,6 +818,32 @@
     float: right;
 }
 
+#content #analysis .settings button.loader
+{
+    background-position: 2px 50%;
+    padding-left: 21px;
+}
+
+#content #analysis .settings .verbose_output
+{
+    float: left;
+    width: auto;
+}
+
+#content #analysis .settings .verbose_output a
+{
+    background-image: url( ../img/ico/ui-check-box-uncheck.png );
+    background-position: 0 50%;
+    color: #999;
+    display: block;
+    padding-left: 21px;
+}
+
+#content #analysis .settings .verbose_output.active a
+{
+    background-image: url( ../img/ico/ui-check-box.png );
+}
+
 #content #analysis .index label,
 #content #analysis .query label
 {
@@ -804,7 +861,7 @@
 {
     float: left;
     margin-right: 0.5%;
-    min-width: 49%;
+    min-width: 47%;
     max-width: 99%;
 }
 
@@ -812,7 +869,7 @@
 {
     float: right;
     margin-left: 0.5%;
-    min-width: 49%;
+    min-width: 47%;
     max-width: 99%;
 }
 
@@ -829,117 +886,135 @@
     padding-left: 35px;
 }
 
-#content #analysis .analysis-result h2
+#content #analysis #analysis-result
 {
-    position: relative;
+    overflow: auto;
 }
 
-#content #analysis .analysis-result h2 .verbose_output
+#content #analysis #analysis-result .index,
+#content #analysis #analysis-result .query
 {
-    position: absolute;
-    right: 5px;
-    top: 5px;
+    background-color: #fff;
+    padding-top: 20px;
 }
 
-#content #analysis .analysis-result h2 .verbose_output a
+#content #analysis #analysis-result table
 {
-    background-image: url( ../img/ico/ui-check-box-uncheck.png );
-    background-position: 0 50%;
-    color: #999;
-    display: block;
-    padding-left: 21px;
+    border-collapse: collapse;
 }
 
-#content #analysis .analysis-result.verbose_output h2 .verbose_output a
+#content #analysis #analysis-result td
 {
-    background-image: url( ../img/ico/ui-check-box.png );
-    color: #080;
+    vertical-align: top;
+    white-space: nowrap;
 }
 
-#content #analysis .analysis-result .index,
-#content #analysis .analysis-result .query
+#content #analysis #analysis-result td.part.analyzer div,
+#content #analysis #analysis-result td.part.spacer .holder,
+#content #analysis #analysis-result td td td
 {
-    margin-bottom: 10px;
+    padding-top: 1px;
+    padding-bottom: 1px;
 }
 
-#content #analysis .analysis-result .row
-{
-    border-top: 1px solid #f0f0f0;
-    margin-top: 10px;
-    padding-top: 10px;
-}
-
-#content #analysis .analysis-result .row:first-child
-{
-    border-top: 0;
-    margin-top: 0;
-    padding-top: 0;
-}
-
-#content #analysis .analysis-result .row .analyzer
-{
-    color: #c0c0c0;
-}
-
-#content #analysis .analysis-result .row:hover .analyzer
-{
-    color: #333;
-}
-
-#content #analysis .analysis-result .row table tr.verbose_output
+#content #analysis #analysis-result td.legend,
+#content #analysis #analysis-result td.data tr.verbose_output
 {
     display: none;
 }
 
-#content #analysis .analysis-result.verbose_output .row table tr.verbose_output
+#content #analysis #analysis-result.verbose_output td.legend
+{
+    display: table-cell;
+}
+
+#content #analysis #analysis-result.verbose_output td.data tr.verbose_output
 {
     display: table-row;
 }
 
-#content #analysis .analysis-result .row table th,
-#content #analysis .analysis-result .row table td
+#content #analysis #analysis-result .match
 {
-    border-top: 1px solid #f0f0f0;
-    vertical-align: top;
+    background-color: #e9eff7;
+    background-color: #f2f2ff;
 }
 
-#content #analysis .analysis-result .row table th
+#content #analysis #analysis-result td.part
 {
-    background-color: #f8f8f8;
-    color: #999;
-    padding: 2px;
-    padding-bottom: 0;
+    padding-bottom: 10px;
 }
 
-#content #analysis .analysis-result .row table th abbr
+#content #analysis #analysis-result td.part.analyzer div
 {
-    border: 0;
-    cursor: help;
+    border-right: 1px solid #f0f0f0;
+    padding-right: 10px;
 }
 
-#content #analysis .analysis-result .row table td
+#content #analysis #analysis-result td.part.analyzer abbr
 {
-    border-left: 1px solid #dcdcdc;
+    color: #c0c0c0;
 }
 
-#content #analysis .analysis-result .row table td div
+#content #analysis #analysis-result td.part.legend .holder,
+#content #analysis #analysis-result td.part.data .holder
 {
-    border-top: 1px solid #f0f0f0;
-    display: block;
-    padding: 1px 2px;
+    padding-left: 10px;
+    padding-right: 10px;
+    border-right: 1px solid #c0c0c0;
 }
 
-#content #analysis .analysis-result .row table td div.empty
+#content #analysis #analysis-result td.part.legend td
+{
+    color: #c0c0c0;
+}
+
+#content #analysis #analysis-result td.part.legend .holder
+{
+    border-right-color: #f0f0f0;
+}
+
+#content #analysis #analysis-result td.part.data:last-child .holder
+{
+    padding-right: 0;
+    border-right: 0;
+}
+
+#content #analysis #analysis-result td.details 
+{
+    padding-left: 10px;
+    padding-right: 10px;
+    border-left: 1px solid #f0f0f0;
+    border-right: 1px solid #f0f0f0;
+}
+
+#content #analysis #analysis-result td.details:first-child
+{
+    padding-left: 0;
+    border-left: 0;
+}
+
+#content #analysis #analysis-result td.details:last-child
+{
+    padding-right: 0;
+    border-right: 0;
+}
+
+#content #analysis #analysis-result td.details tr.empty td
 {
     color: #f0f0f0;
 }
 
-#content #analysis .analysis-result .row table td div.match
+#content #analysis #analysis-result td.details tr.raw_bytes td
 {
-    background-color: #e9eff7;
+    letter-spacing: -1px;
 }
 
-#content #analysis .analysis-result .row table td div:first-child
+#content #analysis #analysis-result .part table table td
+{
+    border-top: 1px solid #f0f0f0;
+}
+
+#content #analysis #analysis-result .part table table tr:first-child td
 {
     border-top: 0;
 }
diff --git a/solr/webapp/web/index.jsp b/solr/webapp/web/index.jsp
index 1a50a8a..c0df5e2 100644
--- a/solr/webapp/web/index.jsp
+++ b/solr/webapp/web/index.jsp
@@ -38,7 +38,7 @@
             <div id="wip-notice">
                 <p>This interface is work in progress. It works best in Chrome.</p>
                 <p><a href="admin">Use the <span>old admin interface</span> if there are problems with this one.</a></p>
-                <p><a href="https://issues.apache.org/jira/browse/SOLR-2399">Bugs/Requests/Suggestions: <span>SOLR-2399</span></a></p>
+                <p><a href="https://issues.apache.org/jira/browse/SOLR-2667">Bugs/Requests/Suggestions: <span>SOLR-2667</span></a></p>
             </div>
 
             <p id="environment">&nbsp;</p>
diff --git a/solr/webapp/web/js/script.js b/solr/webapp/web/js/script.js
index d986056..d628c02 100644
--- a/solr/webapp/web/js/script.js
+++ b/solr/webapp/web/js/script.js
@@ -14,6 +14,16 @@
     
 };
 
+Number.prototype.esc = function()
+{
+    return new String( this ).esc();
+}
+
+String.prototype.esc = function()
+{
+    return this.replace( /</g, '&lt;' ).replace( />/g, '&gt;' );
+}
+
 var sammy = $.sammy
 (
     function()
@@ -712,7 +722,7 @@
 
                             for( var key in response.levels )
                             {
-                                var level = response.levels[key];
+                                var level = response.levels[key].esc();
                                 loglevel += '<li class="' + level + '"><a>' + level + '</a></li>' + "\n";
                             }
 
@@ -754,8 +764,8 @@
                                     logger_content += '<li class="jstree-leaf">';
                                     logger_content += '<ins class="jstree-icon">&nbsp;</ins>';
                                     logger_content += '<a class="trigger ' + classes.join( ' ' ) + '" ' + "\n" +
-                                                         'title="' + logger_name + '"><span>' + "\n" +
-                                                        logger_name.split( '.' ).pop() + "\n" +
+                                                         'title="' + logger_name.esc() + '"><span>' + "\n" +
+                                                        logger_name.split( '.' ).pop().esc() + "\n" +
                                                       '</span></a>';
 
                                     logger_content += loglevel
@@ -800,7 +810,7 @@
                                     function( index, element )
                                     {
                                         var element = $( element );
-                                        var effective_level = $( '.effective_level span', element ).html();
+                                        var effective_level = $( '.effective_level span', element ).text();
 
                                         element
                                             .css( 'z-index', 800 - index );
@@ -891,11 +901,11 @@
                                 }
 
                                 var item_content = '<li><dl class="' + item_class + '">' + "\n" +
-                                                   '<dt>' + displayed_key + '</dt>' + "\n";
+                                                   '<dt>' + displayed_key.esc() + '</dt>' + "\n";
 
                                 for( var i in displayed_value )
                                 {
-                                    item_content += '<dd>' + displayed_value[i] + '</dd>' + "\n";
+                                    item_content += '<dd>' + displayed_value[i].esc() + '</dd>' + "\n";
                                 }
 
                                 item_content += '</dl></li>';
@@ -965,8 +975,8 @@
                                     var c = 0;
                                     for( var i = 1; i < threadDumpData.length; i += 2 )
                                     {
-                                        var state = threadDumpData[i].state;
-                                        var name = '<a><span>' + threadDumpData[i].name + '</span></a>';
+                                        var state = threadDumpData[i].state.esc();
+                                        var name = '<a><span>' + threadDumpData[i].name.esc() + '</span></a>';
 
                                         var classes = [state];
                                         var details = '';
@@ -979,7 +989,7 @@
                                         if( threadDumpData[i].lock )
                                         {
                                             classes.push( 'lock' );
-                                            name += "\n" + '<p title="Waiting on">' + threadDumpData[i].lock + '</p>';
+                                            name += "\n" + '<p title="Waiting on">' + threadDumpData[i].lock.esc() + '</p>';
                                         }
 
                                         if( threadDumpData[i].stackTrace && 0 !== threadDumpData[i].stackTrace.length )
@@ -987,8 +997,10 @@
                                             classes.push( 'stacktrace' );
 
                                             var stack_trace = threadDumpData[i].stackTrace
-                                                                .join( '</li><li>' )
-                                                                .replace( /\(/g, '&#8203;(' );
+                                                                .join( '###' )
+                                                                .esc()
+                                                                .replace( /\(/g, '&#8203;(' )
+                                                                .replace( /###/g, '</li><li>' );
 
                                             name += '<div>' + "\n"
                                                     + '<ul>' + "\n"
@@ -1000,10 +1012,10 @@
                                         var item = '<tr class="' + classes.join( ' ' ) +'">' + "\n"
 
                                                  + '<td class="ico" title="' + state +'"><span>' + state +'</span></td>' + "\n"
-                                                 + '<td class="id">' + threadDumpData[i].id + '</td>' + "\n"
+                                                 + '<td class="id">' + threadDumpData[i].id.esc() + '</td>' + "\n"
                                                  + '<td class="name">' + name + '</td>' + "\n"
-                                                 + '<td class="time">' + threadDumpData[i].cpuTime + '</td>' + "\n"
-                                                 + '<td class="time">' + threadDumpData[i].userTime + '</td>' + "\n"
+                                                 + '<td class="time">' + threadDumpData[i].cpuTime.esc() + '</td>' + "\n"
+                                                 + '<td class="time">' + threadDumpData[i].userTime.esc() + '</td>' + "\n"
 
                                                  + '</tr>';
                                         
@@ -3402,6 +3414,8 @@
                         
                         var analysis_element = $( '#analysis', content_element );
                         var analysis_form = $( 'form', analysis_element );
+                        var analysis_result = $( '#analysis-result', analysis_element );
+                        analysis_result.hide();
                         
                         $.ajax
                         (
@@ -3465,10 +3479,56 @@
                                 }
                             }
                         );
+                                
+                        $( '.verbose_output a', analysis_element )
+                            .die( 'click' )
+                            .live
+                            (
+                                'click',
+                                function( event )
+                                {
+                                    $( this ).parent()
+                                        .toggleClass( 'active' );
+                                    
+                                    analysis_result
+                                        .toggleClass( 'verbose_output' );
+                                    
+                                    check_empty_spacer();
+                                }
+                            );
                         
-                        var analysis_result = $( '.analysis-result', analysis_element );
-                        analysis_result_tpl = analysis_result.clone();
-                        analysis_result.remove();
+                        var check_empty_spacer = function()
+                        {
+                            var spacer_holder = $( 'td.part.data.spacer .holder', analysis_result );
+
+                            if( 0 === spacer_holder.size() )
+                            {
+                                return false;
+                            }
+
+                            var verbose_output = analysis_result.hasClass( 'verbose_output' );
+
+                            spacer_holder
+                                .each
+                                (
+                                    function( index, element )
+                                    {
+                                        element = $( element );
+
+                                        if( verbose_output )
+                                        {
+                                            var cell = element.parent();
+                                            element.height( cell.height() );
+                                        }
+                                        else
+                                        {
+                                            element.removeAttr( 'style' );
+                                        }
+                                    }
+                                );
+                        }
+
+                        var button = $( 'button', analysis_form )
                         
                         analysis_form
                             .ajaxForm
@@ -3478,10 +3538,8 @@
                                     dataType : 'json',
                                     beforeSubmit : function( array, form, options )
                                     {
-                                        //loader
-                                        
-                                        $( '.analysis-result', analysis_element )
-                                            .remove();
+                                        loader.show( button );
+                                        button.attr( 'disabled', true );
                                         
                                         array.push( { name: 'analysis.showmatch', value: 'true' } );
                                         
@@ -3491,6 +3549,10 @@
                                     },
                                     success : function( response, status_text, xhr, form )
                                     {
+                                        analysis_result
+                                            .empty()
+                                            .show();
+                                        
                                         for( var name in response.analysis.field_names )
                                         {
                                             build_analysis_table( 'name', name, response.analysis.field_names[name] );
@@ -3500,6 +3562,8 @@
                                         {
                                             build_analysis_table( 'type', name, response.analysis.field_types[name] );
                                         }
+
+                                        check_empty_spacer();
                                     },
                                     error : function( xhr, text_status, error_thrown )
                                     {
@@ -3508,136 +3572,208 @@
                                     },
                                     complete : function()
                                     {
-                                        //loader
+                                        loader.hide( $( 'button', analysis_form ) );
+                                        button.removeAttr( 'disabled' );
                                     }
                                 }
                             );
+
+                            var generate_class_name = function( type )
+                            {
+                                var classes = [type];
+                                if( 'text' !== type )
+                                {
+                                    classes.push( 'verbose_output' );
+                                }
+                                return classes.join( ' ' );
+                            }
                             
                             var build_analysis_table = function( field_or_name, name, analysis_data )
-                            {                                
-                                var analysis_result_data = analysis_result_tpl.clone();
-                                var content = [];
-                                
+                            {        
                                 for( var type in analysis_data )
                                 {
                                     var type_length = analysis_data[type].length;
                                     if( 0 !== type_length )
                                     {
-                                        var type_content = '<div class="' + type + '">' + "\n";
-                                        for( var i = 0; i < type_length; i += 2 )
+                                        var global_elements_count = 0;
+                                        for( var i = 0; i < analysis_data[type].length; i += 2 )
                                         {
-                                            type_content += '<div class="row">' + "\n";
-                                        
-                                            var analyzer_parts = analysis_data[type][i].split( '.' );
-                                            var analyzer_parts_name = analyzer_parts.pop();
-                                            var analyzer_parts_namespace = analyzer_parts.join( '.' ) + '.';
-                                                                                        
-                                            type_content += '<div class="analyzer" title="' + analysis_data[type][i] +'">' + 
-                                                            analyzer_parts_name + '</div>' + "\n";
-
-                                            var raw_parts = {
-                                                'position' : [],
-                                                'text' : [],
-                                                'type' : [],
-                                                'start-end' : []
-                                            };
-                                            
-                                            for( var k in analysis_data[type][i+1] )
+                                            if( 'string' === typeof analysis_data[type][i+1] )
                                             {
-                                                var pos = analysis_data[type][i+1][k]['position'] - 1;
-                                                var is_match = !!analysis_data[type][i+1][k]['match'];
-                                            
-                                                if( 'undefined' === typeof raw_parts['text'][pos] )
-                                                {
-                                                    raw_parts['position'][pos] = [];
-                                                    raw_parts['text'][pos] = [];
-                                                    raw_parts['type'][pos] = [];
-                                                    raw_parts['start-end'][pos] = [];
-
-                                                    raw_parts['position'][pos].push( '<div>' + analysis_data[type][i+1][k]['position'] + '</div>' );
-                                                }
-
-                                                raw_parts['text'][pos].push( '<div class="' + ( is_match ? 'match' : '' ) + '">' + analysis_data[type][i+1][k]['text'] + '</div>' );
-                                                raw_parts['type'][pos].push( '<div>' + analysis_data[type][i+1][k]['type'] + '</div>' );
-                                                raw_parts['start-end'][pos].push( '<div>' + analysis_data[type][i+1][k]['start'] + '–' + analysis_data[type][i+1][k]['end'] + '</div>' );
+                                                analysis_data[type][i+1] = [{ 'text': analysis_data[type][i+1] }]
                                             }
-
-                                            var parts = {
-                                                'position' : [],
-                                                'text' : [],
-                                                'type' : [],
-                                                'start-end' : []
-                                            };
-
-                                            for( var key in raw_parts )
-                                            {
-                                                var length = raw_parts[key].length;
-                                                for( var j = 0; j < length; j++ )
-                                                {
-                                                    if( raw_parts[key][j] )
-                                                    {
-                                                        parts[key].push( '<td>' + raw_parts[key][j].join( "\n" ) + '</td>' );
-                                                    }
-                                                    else
-                                                    {
-                                                        parts[key].push( '<td><div class="empty">&empty;</div></td>' );
-                                                    }
-                                                }
-                                            }
-
-                                            type_content += '<div class="result">' + "\n";
-                                            type_content += '<table border="0" cellspacing="0" cellpadding="0">' + "\n";
-                                            
-                                            type_content += '<tr class="verbose_output">' + "\n";
-                                            type_content += '<th><abbr title="Position">P</abbr></th>' + "\n";
-                                            type_content += parts['position'].join( "\n" ) + "\n";
-                                            type_content += '</tr>' + "\n";
-                                                                                        
-                                            type_content += '<tr>' + "\n";
-                                            type_content += '<th><abbr title="Text">T</abbr></th>' + "\n";
-                                            type_content += parts['text'].join( "\n" ) + "\n";
-                                            type_content += '</tr>' + "\n";
-
-                                            type_content += '<tr class="verbose_output">' + "\n";
-                                            type_content += '<th><abbr title="Type">T</abbr></th>' + "\n";
-                                            type_content += parts['type'].join( "\n" ) + "\n";
-                                            type_content += '</tr>' + "\n";
-
-                                            type_content += '<tr class="verbose_output">' + "\n";
-                                            type_content += '<th><abbr title="Range (Start, End)">R</abbr></th>' + "\n";
-                                            type_content += parts['start-end'].join( "\n" ) + "\n";
-                                            type_content += '</tr>' + "\n";
-                                            
-                                            type_content += '</table>' + "\n";
-                                            type_content += '</div>' + "\n";
-                                            
-                                            type_content += '</div>' + "\n";
+                                            global_elements_count = Math.max( global_elements_count,
+                                                                              analysis_data[type][i+1].length );
                                         }
-                                        type_content += '</div>';
-                                        content.push( $.trim( type_content ) );
+
+                                        var content = '<div class="' + type + '">' + "\n";
+                                        content += '<table border="0" cellspacing="0" cellpadding="0">' + "\n";
+                                        
+                                        for( var i = 0; i < analysis_data[type].length; i += 2 )
+                                        {
+                                            var colspan = 1;
+                                            var elements = analysis_data[type][i+1];
+                                            var elements_count = global_elements_count;
+                                            
+                                            if( !elements[0].positionHistory )
+                                            {
+                                                colspan = elements_count;
+                                                elements_count = 1;
+                                            }
+
+                                            var legend = [];
+                                            for( var key in elements[0] )
+                                            {
+                                                var key_parts = key.split( '#' );
+                                                var used_key = key_parts.pop();
+                                                var short_key = used_key;
+
+                                                if( 1 === key_parts.length )
+                                                {
+                                                    used_key = '<abbr title="' + key + '">' + used_key + '</abbr>';
+                                                }
+
+                                                if( 'positionHistory' === short_key || 'match' === short_key )
+                                                {
+                                                    continue;
+                                                }
+
+                                                legend.push
+                                                (
+                                                    '<tr class="' + generate_class_name( short_key ) + '">' +
+                                                    '<td>' + used_key + '</td>' +
+                                                    '</tr>'
+                                                );
+                                            }
+
+                                            content += '<tbody>' + "\n";
+                                            content += '<tr class="step">' + "\n";
+
+                                                // analyzer
+                                                var analyzer_name = analysis_data[type][i]
+                                                                        .replace( /(\$1)+$/g, '' );
+
+                                                var analyzer_short = -1 !== analyzer_name.indexOf( '$' )
+                                                                   ? analyzer_name.split( '$' )[1]
+                                                                   : analyzer_name.split( '.' ).pop();
+                                                analyzer_short = analyzer_short.match( /[A-Z]/g ).join( '' );
+
+                                                content += '<td class="part analyzer"><div>' + "\n";
+                                                content += '<abbr title="' + analysis_data[type][i] + '">' + "\n";
+                                                content += analyzer_short + '</abbr></div></td>' + "\n";
+
+                                                // legend
+                                                content += '<td class="part legend"><div class="holder">' + "\n";
+                                                content += '<table border="0" cellspacing="0" cellpadding="0">' + "\n";
+                                                content += '<tr><td>' + "\n";
+                                                content += '<table border="0" cellspacing="0" cellpadding="0">' + "\n";
+                                                content += legend.join( "\n" ) + "\n";
+                                                content += '</table></td></tr></table></td>' + "\n";
+
+                                                // data
+                                                var cell_content = '<td class="part data spacer" '
+                                                                 + '    colspan="' + colspan + '">'
+                                                                 + '<div class="holder">&nbsp;</div>'
+                                                                 + '</td>';
+                                                var cells = new Array( elements_count + 1 ).join( cell_content );
+                                                content += cells + "\n";
+
+                                            content += '</tr>' + "\n";
+                                            content += '</tbody>' + "\n";
+                                        }
+                                        content += '</table>' + "\n";
+                                        content += '</div>' + "\n";
+
+                                        $( '.' + type, analysis_result )
+                                            .remove();
+
+                                        analysis_result
+                                            .append( content );
+                                        
+                                        var analysis_result_type = $( '.' + type, analysis_result );
+
+                                        for( var i = 0; i < analysis_data[type].length; i += 2 )
+                                        {
+                                            for( var j = 0; j < analysis_data[type][i+1].length; j += 1 )
+                                            {
+                                                var pos = analysis_data[type][i+1][j].positionHistory
+                                                        ? analysis_data[type][i+1][j].positionHistory[0]
+                                                        : 1;
+                                                var selector = 'tr.step:eq(' + ( i / 2 ) +') '
+                                                             + 'td.data:eq(' + ( pos - 1 ) + ') '
+                                                             + '.holder';
+                                                var cell = $( selector, analysis_result_type );
+
+                                                cell.parent()
+                                                    .removeClass( 'spacer' );
+
+                                                var table = $( 'table tr.details', cell );
+                                                if( 0 === table.size() )
+                                                {
+                                                    cell
+                                                        .html
+                                                        (
+                                                            '<table border="0" cellspacing="0" cellpadding="0">' + 
+                                                            '<tr class="details"></tr></table>'
+                                                        );
+                                                    var table = $( 'table tr.details', cell );
+                                                }
+
+                                                var tokens = [];
+                                                for( var key in analysis_data[type][i+1][j] )
+                                                {
+                                                    var short_key = key.split( '#' ).pop();
+                                                    
+                                                    if( 'positionHistory' === short_key || 'match' === short_key )
+                                                    {
+                                                        continue;
+                                                    }
+
+                                                    var classes = [];
+                                                    classes.push( generate_class_name( short_key ) );
+
+                                                    var data = analysis_data[type][i+1][j][key];
+                                                    if( 'object' === typeof data && data instanceof Array )
+                                                    {
+                                                        data = data.join( ' ' );
+                                                    }
+                                                    if( 'string' === typeof data )
+                                                    {
+                                                        data = data.esc();
+                                                    }
+
+                                                    if( null === data || 0 === data.length )
+                                                    {
+                                                        classes.push( 'empty' );
+                                                        data = '&empty;';
+                                                    }
+
+                                                    if( analysis_data[type][i+1][j].match && 
+                                                        ( 'text' === short_key || 'raw_bytes' === short_key ) )
+                                                    {
+                                                        classes.push( 'match' );
+                                                    }
+
+                                                    tokens.push
+                                                    (
+                                                        '<tr class="' + classes.join( ' ' ) + '">' +
+                                                        '<td>' + data + '</td>' +
+                                                        '</tr>'
+                                                    );
+                                                }
+                                                table
+                                                    .append
+                                                    (
+                                                        '<td class="details">' +
+                                                        '<table border="0" cellspacing="0" cellpadding="0">' +
+                                                        tokens.join( "\n" ) +
+                                                        '</table></td>'
+                                                    );
+                                            }
+                                        }
+                        
                                     }
                                 }
-                                
-                                $( 'h2 span', analysis_result_data )
-                                    .html( field_or_name + ': ' + name );
-                                
-                                $( 'h2 .verbose_output a', analysis_result_data )
-                                    .die( 'click' )
-                                    .live
-                                    (
-                                        'click',
-                                        function( event )
-                                        {
-                                            $( this ).parents( '.block' )
-                                                .toggleClass( 'verbose_output' );
-                                        }
-                                    );
-                                
-                                $( '.analysis-result-content', analysis_result_data )
-                                    .html( content.join( "\n" ) );
-                                
-                                analysis_element.append( analysis_result_data );
-                                
                             }
                             
                     }
@@ -4161,7 +4297,7 @@
                             for( var key in memory_data )
                             {                                                        
                                 $( '.value.' + key, this )
-                                    .html( memory_data[key] );
+                                    .text( memory_data[key] );
                             }
             
                             var data = {
@@ -4184,7 +4320,7 @@
                                 var value_element = $( '.' + key + ' dd', this );
 
                                 value_element
-                                    .html( data[key] );
+                                    .text( data[key] );
                                 
                                 value_element.closest( 'li' )
                                     .show();
@@ -4200,7 +4336,7 @@
                                 for( var key in commandLineArgs )
                                 {
                                     cmd_arg_element = cmd_arg_element.clone();
-                                    cmd_arg_element.html( commandLineArgs[key] );
+                                    cmd_arg_element.text( commandLineArgs[key] );
 
                                     cmd_arg_key_element
                                         .after( cmd_arg_element );
@@ -4247,7 +4383,7 @@
                             var headline = $( '#memory h2 span', this );
                                 
                             headline
-                                .html( headline.html() + ' (' + memory_percentage + '%)' );
+                                .text( headline.html() + ' (' + memory_percentage + '%)' );
 
                             $( '#memory-bar .value', this )
                                 .each
@@ -4266,7 +4402,7 @@
                                         byte_value = byte_value.toFixed( 2 ) + ' MB';
 
                                         self
-                                            .html( byte_value );
+                                            .text( byte_value );
                                     }
                                 );
                         },
diff --git a/solr/webapp/web/tpl/analysis.html b/solr/webapp/web/tpl/analysis.html
index c801326..0c5e18c 100644
--- a/solr/webapp/web/tpl/analysis.html
+++ b/solr/webapp/web/tpl/analysis.html
@@ -6,70 +6,55 @@
 
     </div>
 
-    <div class="block" id="field-analysis">
-    
-        <h2><span>Field Analysis</span></h2>
-        <div class="content">
-    
-        <div class="message-container">
-            <div class="message"></div>
-        </div>
-        
-        <form method="get">
+    <div id="analysis-holder">
+
+        <div id="field-analysis">
+              
+            <form method="get">
+                
+                <ul class="clearfix">
+                    
+                    <li class="index">
+                        
+                        <label for="analysis_fieldvalue_index">Field Value (Index)</label>
+                        <textarea name="analysis.fieldvalue" id="analysis_fieldvalue_index"></textarea>
+                        
+                    </li>
+                    
+                    <li class="query">
+                        
+                        <label for="analysis_fieldvalue_query">Field Value (Query)</label>
+                        <textarea name="analysis.query" id="analysis_fieldvalue_query"></textarea>
+                        
+                    </li>
+
+                    <li class="settings-holder clearfix">
+                        <div class="settings">
+
+                            <label for="type_or_name">Analyse Fieldname / FieldType:</label>
+                            <select id="type_or_name"></select>
+
+                            <div>
+
+                                <button type="submit">Analyse Values</button>
+
+                                <div class="verbose_output active">
+                                    <a>Verbose Output</a>
+                                </div>
+
+                            </div>
+
+                        </div>
+                    </li>
+                    
+                </ul>
+                
+            </form>
             
-            <ul class="clearfix">
-                
-                <li class="index">
-                    
-                    <label for="analysis_fieldvalue_index">Field Value (Index)</label>
-                    <textarea name="analysis.fieldvalue" id="analysis_fieldvalue_index"></textarea>
-                    
-                </li>
-                
-                <li class="query">
-                    
-                    <label for="analysis_fieldvalue_query">Field Value (Query)</label>
-                    <textarea name="analysis.query" id="analysis_fieldvalue_query"></textarea>
-                    
-                </li>
-
-                <li class="settings clearfix">
-
-                    <label for="type_or_name">Analyse Fieldname / FieldType:</label>
-                    <select id="type_or_name"></select>
-
-                    <div>
-
-                        <button type="submit">Analyse Values</button>
-
-                    </div>
-
-                </li>
-                
-            </ul>
-            
-        </form>
-        
-        </div>
-    </div>
-    
-    <div class="block analysis-result">
-        
-        <h2>
-            <span>{headline}</span>
-            <div class="verbose_output">
-                <a>Verbose Output</a>
-            </div>
-        </h2>
-        <div class="content">
-        
-        <div class="message-container">
-            <div class="message"></div>
         </div>
         
-        <div class="analysis-result-content clearfix">{content}</div>
-        
-        </div>
+        <div id="analysis-result" class="clearfix verbose_output"></div>
+
     </div>
 
 </div>
\ No newline at end of file