merged with trunk

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/LUCENE2793@1144189 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index 58f2a38..7d76af0 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -156,6 +156,12 @@
   the queries module and can be found at o.a.l.queries.function.  See MIGRATE.txt
   for more information (Chris Male)
 
+* LUCENE-2392: Decoupled vector space scoring from Query/Weight/Scorer. If you
+  extended Similarity directly before, you should extend TFIDFSimilarity instead.
+  Similarity is now a lower-level API to implement other scoring algorithms.
+  See MIGRATE.txt for more details.
+  (David Nemeskey, Simon Willnauer, Mike Mccandless, Robert Muir)
+
 Changes in Runtime Behavior
 
 * LUCENE-2846: omitNorms now behaves like omitTermFrequencyAndPositions, if you
diff --git a/lucene/MIGRATE.txt b/lucene/MIGRATE.txt
index ffbdef4..268ca52 100644
--- a/lucene/MIGRATE.txt
+++ b/lucene/MIGRATE.txt
@@ -382,3 +382,13 @@
    - o.a.l.search.function.ShortFieldSource -> o.a.l.queries.function.valuesource.ShortFieldSource
    - o.a.l.search.function.ValueSource -> o.a.l.queries.function.ValueSource
    - o.a.l.search.function.ValueSourceQuery -> o.a.l.queries.function.FunctionQuery
+
+* LUCENE-2392: Enable flexible scoring:
+
+  The existing "Similarity" api is now TFIDFSimilarity, if you were extending
+  Similarity before, you should likely extend this instead.
+
+  Weight.normalize no longer takes a norm value that incorporates the top-level
+  boost from outer queries such as BooleanQuery, instead it takes 2 parameters,
+  the outer boost (topLevelBoost) and the norm. Weight.sumOfSquaredWeights has
+  been renamed to Weight.getValueForNormalization().
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index 0de68e1..2af53fe 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -331,7 +331,7 @@
       <artifact:deploy file="${build.dir}/${maven.project.build.finalName}.jar">
         <artifact-attachments/>
       	<remoteRepository url="${m2.repository.url}">
-          <authentication username="${m2.repository.username}" privateKey="${m2.repository.private.key}"/>
+          <authentication username="${m2.repository.username}" privateKey="${m2.repository.private.key}" password="${m2.repository.password}"/>
         </remoteRepository>
         <pom refid="maven.project"/>
       </artifact:deploy>
@@ -351,7 +351,7 @@
       <artifact:pom id="maven.project" file="${maven.build.dir}/@{pom.xml}" />
       <artifact:deploy file="@{jar.file}">
         <remoteRepository url="${m2.repository.url}">
-          <authentication username="${m2.repository.username}" privateKey="${m2.repository.private.key}"/>
+          <authentication username="${m2.repository.username}" privateKey="${m2.repository.private.key}" password="${m2.repository.password}"/>
         </remoteRepository>
         <pom refid="maven.project"/>
       </artifact:deploy>
diff --git a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java
index 26c166e..8b26350 100644
--- a/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java
+++ b/lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java
@@ -240,8 +240,7 @@
           final FieldInvertState invertState = new FieldInvertState();
           invertState.setBoost(eFieldTermDocInfoFactoriesByTermText.getKey().boost * document.getDocument().getBoost());
           invertState.setLength(eFieldTermDocInfoFactoriesByTermText.getKey().fieldLength);
-          final float norm = similarityProvider.get(fieldName).computeNorm(invertState);
-          normsByFieldNameAndDocumentNumber.get(fieldName)[document.getDocumentNumber()] = similarityProvider.get(fieldName).encodeNormValue(norm);
+          normsByFieldNameAndDocumentNumber.get(fieldName)[document.getDocumentNumber()] = similarityProvider.get(fieldName).computeNorm(invertState);
         } else {
           System.currentTimeMillis();
         }
diff --git a/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index 9e383eb..727d47c 100644
--- a/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -51,7 +51,6 @@
 import org.apache.lucene.index.TermPositionVector;
 import org.apache.lucene.index.TermVectorMapper;
 import org.apache.lucene.index.FieldInvertState;
-import org.apache.lucene.index.IndexReader.ReaderContext;
 import org.apache.lucene.index.codecs.PerDocValues;
 import org.apache.lucene.search.Collector;
 import org.apache.lucene.search.IndexSearcher;
@@ -1202,19 +1201,18 @@
         int numOverlapTokens = info != null ? info.numOverlapTokens : 0;
         float boost = info != null ? info.getBoost() : 1.0f; 
         FieldInvertState invertState = new FieldInvertState(0, numTokens, numOverlapTokens, 0, boost);
-        float n = fieldSim.computeNorm(invertState);
-        byte norm = fieldSim.encodeNormValue(n);
+        byte norm = fieldSim.computeNorm(invertState);
         norms = new byte[] {norm};
         
         // cache it for future reuse
         cachedNorms = norms;
         cachedFieldName = fieldName;
         cachedSimilarity = sim;
-        if (DEBUG) System.err.println("MemoryIndexReader.norms: " + fieldName + ":" + n + ":" + norm + ":" + numTokens);
+        if (DEBUG) System.err.println("MemoryIndexReader.norms: " + fieldName + ":" + norm + ":" + numTokens);
       }
       return norms;
     }
-  
+
     @Override
     protected void doSetNorm(int doc, String fieldName, byte value) {
       throw new UnsupportedOperationException();
diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java b/lucene/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java
index d367324..c17ac02 100644
--- a/lucene/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java
+++ b/lucene/contrib/misc/src/java/org/apache/lucene/index/FieldNormModifier.java
@@ -147,7 +147,7 @@
         for (int d = 0; d < termCounts.length; d++) {
           if (liveDocs == null || liveDocs.get(d)) {
             invertState.setLength(termCounts[d]);
-            subReader.setNorm(d, field, fieldSim.encodeNormValue(fieldSim.computeNorm(invertState)));
+            subReader.setNorm(d, field, fieldSim.computeNorm(invertState));
           }
         }
       }
diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingCodec.java b/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingCodec.java
index 57915da..f39ce3d 100644
--- a/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingCodec.java
+++ b/lucene/contrib/misc/src/java/org/apache/lucene/index/codecs/appending/AppendingCodec.java
@@ -25,7 +25,6 @@
 import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.SegmentWriteState;
 import org.apache.lucene.index.codecs.Codec;
-import org.apache.lucene.index.codecs.DocValuesConsumer;
 import org.apache.lucene.index.codecs.DefaultDocValuesProducer;
 import org.apache.lucene.index.codecs.FieldsConsumer;
 import org.apache.lucene.index.codecs.FieldsProducer;
@@ -58,7 +57,7 @@
   public static String CODEC_NAME = "Appending";
   
   public AppendingCodec() {
-    name = CODEC_NAME;
+    super(CODEC_NAME);
   }
 
   @Override
@@ -138,22 +137,22 @@
     StandardPostingsReader.files(dir, segmentInfo, codecId, files);
     BlockTermsReader.files(dir, segmentInfo, codecId, files);
     FixedGapTermsIndexReader.files(dir, segmentInfo, codecId, files);
-    DefaultDocValuesConsumer.files(dir, segmentInfo, codecId, files);
+    DefaultDocValuesConsumer.files(dir, segmentInfo, codecId, files, getDocValuesUseCFS());
   }
 
   @Override
   public void getExtensions(Set<String> extensions) {
     StandardCodec.getStandardExtensions(extensions);
-    DefaultDocValuesConsumer.getDocValuesExtensions(extensions);
+    DefaultDocValuesConsumer.getDocValuesExtensions(extensions, getDocValuesUseCFS());
   }
   
   @Override
   public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-    return new DefaultDocValuesConsumer(state, BytesRef.getUTF8SortedAsUnicodeComparator());
+    return new DefaultDocValuesConsumer(state, getDocValuesSortComparator(), getDocValuesUseCFS());
   }
 
   @Override
   public PerDocValues docsProducer(SegmentReadState state) throws IOException {
-    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, state.context);
+    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, getDocValuesUseCFS(), getDocValuesSortComparator(), state.context);
   }
 }
diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java b/lucene/contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java
index c5c454a..f1ac1459 100644
--- a/lucene/contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java
+++ b/lucene/contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java
@@ -106,7 +106,7 @@
    * discountOverlaps is true by default or true for this
    * specific field. */
   @Override
-  public float computeNorm(FieldInvertState state) {
+  public byte computeNorm(FieldInvertState state) {
     final int numTokens;
 
     if (discountOverlaps)
@@ -114,7 +114,7 @@
     else
       numTokens = state.getLength();
 
-    return state.getBoost() * computeLengthNorm(numTokens);
+    return encodeNormValue(state.getBoost() * computeLengthNorm(numTokens));
   }
 
   /**
diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/store/WindowsDirectory.java b/lucene/contrib/misc/src/java/org/apache/lucene/store/WindowsDirectory.java
index 0e435c5..29d8998 100644
--- a/lucene/contrib/misc/src/java/org/apache/lucene/store/WindowsDirectory.java
+++ b/lucene/contrib/misc/src/java/org/apache/lucene/store/WindowsDirectory.java
@@ -70,7 +70,7 @@
   @Override
   public IndexInput openInput(String name, IOContext context) throws IOException {
     ensureOpen();
-    return new WindowsIndexInput(new File(getDirectory(), name), DEFAULT_BUFFERSIZE);
+    return new WindowsIndexInput(new File(getDirectory(), name), Math.max(BufferedIndexInput.bufferSize(context), DEFAULT_BUFFERSIZE));
   }
   
   protected static class WindowsIndexInput extends BufferedIndexInput {
diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java
index 9af69bb..d5896a4 100644
--- a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java
+++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java
@@ -49,8 +49,8 @@
     public Similarity get(String field) {
       return new DefaultSimilarity() {
         @Override
-        public float computeNorm(FieldInvertState state) {
-          return state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength());
+        public byte computeNorm(FieldInvertState state) {
+          return encodeNormValue(state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength()));
         }
       };
     }
diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java b/lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
index f7f33f3..0e9732c 100644
--- a/lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
+++ b/lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
@@ -21,6 +21,7 @@
 import org.apache.lucene.search.DefaultSimilarity;
 import org.apache.lucene.search.DefaultSimilarityProvider;
 import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TFIDFSimilarity;
 import org.apache.lucene.search.SimilarityProvider;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.index.FieldInvertState;
@@ -58,15 +59,15 @@
       invertState.setLength(i);
       assertEquals("3,10: spot i="+i,
                    1.0f,
-                   s.computeNorm(invertState),
+                   ss.decodeNormValue(s.computeNorm(invertState)),
                    0.0f);
     }
   
     for (int i = 10; i < 1000; i++) {
       invertState.setLength(i-9);
-      final float normD = d.computeNorm(invertState);
+      final byte normD = d.computeNorm(invertState);
       invertState.setLength(i);
-      final float normS = s.computeNorm(invertState);
+      final byte normS = s.computeNorm(invertState);
       assertEquals("3,10: 10<x : i="+i,
                    normD,
                    normS,
@@ -104,14 +105,14 @@
       invertState.setLength(i);
       assertEquals("f: 3,10: spot i="+i,
                    1.0f,
-                   sp.get("foo").computeNorm(invertState),
+                   ss.decodeNormValue(sp.get("foo").computeNorm(invertState)),
                    0.0f);
     }
     for (int i = 10; i < 1000; i++) {
       invertState.setLength(i-9);
-      final float normD = d.computeNorm(invertState);
+      final byte normD = d.computeNorm(invertState);
       invertState.setLength(i);
-      final float normS = sp.get("foo").computeNorm(invertState);
+      final byte normS = sp.get("foo").computeNorm(invertState);
       assertEquals("f: 3,10: 10<x : i="+i,
                    normD,
                    normS,
@@ -121,21 +122,21 @@
       invertState.setLength(i);
       assertEquals("f: 8,13: spot i="+i,
                    1.0f,
-                   sp.get("bar").computeNorm(invertState),
+                   ss.decodeNormValue(sp.get("bar").computeNorm(invertState)),
                    0.0f);
     }
     for (int i = 6; i <=9; i++) {
       invertState.setLength(i);
       assertEquals("f: 6,9: spot i="+i,
                    1.0f,
-                   sp.get("yak").computeNorm(invertState),
+                   ss.decodeNormValue(sp.get("yak").computeNorm(invertState)),
                    0.0f);
     }
     for (int i = 13; i < 1000; i++) {
       invertState.setLength(i-12);
-      final float normD = d.computeNorm(invertState);
+      final byte normD = d.computeNorm(invertState);
       invertState.setLength(i);
-      final float normS = sp.get("bar").computeNorm(invertState);
+      final byte normS = sp.get("bar").computeNorm(invertState);
       assertEquals("f: 8,13: 13<x : i="+i,
                    normD,
                    normS,
@@ -143,9 +144,9 @@
     }
     for (int i = 9; i < 1000; i++) {
       invertState.setLength(i-8);
-      final float normD = d.computeNorm(invertState);
+      final byte normD = d.computeNorm(invertState);
       invertState.setLength(i);
-      final float normS = sp.get("yak").computeNorm(invertState);
+      final byte normS = sp.get("yak").computeNorm(invertState);
       assertEquals("f: 6,9: 9<x : i="+i,
                    normD,
                    normS,
@@ -157,8 +158,8 @@
 
     for (int i = 9; i < 1000; i++) {
       invertState.setLength(i);
-      final float normSS = sp.get("a").computeNorm(invertState);
-      final float normS = sp.get("b").computeNorm(invertState);
+      final byte normSS = sp.get("a").computeNorm(invertState);
+      final byte normS = sp.get("b").computeNorm(invertState);
       assertTrue("s: i="+i+" : a="+normSS+
                  " < b="+normS,
                  normSS < normS);
@@ -170,8 +171,8 @@
   
     SweetSpotSimilarity ss = new SweetSpotSimilarity();
 
-    Similarity d = new DefaultSimilarity();
-    Similarity s = ss;
+    TFIDFSimilarity d = new DefaultSimilarity();
+    TFIDFSimilarity s = ss;
     
     // tf equal
 
@@ -222,7 +223,7 @@
       };
     ss.setHyperbolicTfFactors(3.3f, 7.7f, Math.E, 5.0f);
     
-    Similarity s = ss;
+    TFIDFSimilarity s = ss;
 
     for (int i = 1; i <=1000; i++) {
       assertTrue("MIN tf: i="+i+" : s="+s.tf(i),
diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java b/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
index 0313e83..1ab8e7c 100644
--- a/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
+++ b/lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
@@ -54,8 +54,8 @@
       public Similarity get(String field) {
         return new DefaultSimilarity() {
           @Override
-          public float computeNorm(FieldInvertState state) {
-            return state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength());
+          public byte computeNorm(FieldInvertState state) {
+            return encodeNormValue(state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength()));
           }
         };
       }
@@ -175,8 +175,8 @@
     public Similarity get(String field) {
       return new DefaultSimilarity() {
         @Override
-        public float computeNorm(FieldInvertState state) {
-          return state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength());
+        public byte computeNorm(FieldInvertState state) {
+          return encodeNormValue(state.getBoost() * (discountOverlaps ? state.getLength() - state.getNumOverlap() : state.getLength()));
         }
       };
     }
diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java
index ae54875..2a0099b 100644
--- a/lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java
+++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java
@@ -51,7 +51,11 @@
  */
 public class FuzzyLikeThisQuery extends Query
 {
-    static Similarity sim=new DefaultSimilarity();
+    // TODO: generalize this query (at least it should not reuse this static sim!
+    // a better way might be to convert this into multitermquery rewrite methods.
+    // the rewrite method can 'average' the TermContext's term statistics (docfreq,totalTermFreq) 
+    // provided to TermQuery, so that the general idea is agnostic to any scoring system...
+    static TFIDFSimilarity sim=new DefaultSimilarity();
     Query rewrittenQuery=null;
     ArrayList<FieldVals> fieldVals=new ArrayList<FieldVals>();
     Analyzer analyzer;
diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
index 0b97af5..3f83090 100644
--- a/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
+++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
@@ -44,6 +44,7 @@
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TFIDFSimilarity;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.store.FSDirectory;
@@ -285,7 +286,7 @@
     /**
      * For idf() calculations.
      */
-    private Similarity similarity;// = new DefaultSimilarity();
+    private TFIDFSimilarity similarity;// = new DefaultSimilarity();
 
     /**
      * IndexReader to use
@@ -320,17 +321,17 @@
         this(ir, new DefaultSimilarity());
     }
 
-    public MoreLikeThis(IndexReader ir, Similarity sim){
+    public MoreLikeThis(IndexReader ir, TFIDFSimilarity sim){
       this.ir = ir;
       this.similarity = sim;
     }
 
 
-  public Similarity getSimilarity() {
+  public TFIDFSimilarity getSimilarity() {
     return similarity;
   }
 
-  public void setSimilarity(Similarity similarity) {
+  public void setSimilarity(TFIDFSimilarity similarity) {
     this.similarity = similarity;
   }
 
diff --git a/lucene/src/java/org/apache/lucene/document/AbstractField.java b/lucene/src/java/org/apache/lucene/document/AbstractField.java
index 8cf624d..e15e525 100755
--- a/lucene/src/java/org/apache/lucene/document/AbstractField.java
+++ b/lucene/src/java/org/apache/lucene/document/AbstractField.java
@@ -81,13 +81,13 @@
    * default, in the {@link
    * org.apache.lucene.search.Similarity#computeNorm(FieldInvertState)} method, the boost value is multiplied
    * by the length normalization factor and then
-   * rounded by {@link org.apache.lucene.search.Similarity#encodeNormValue(float)} before it is stored in the
+   * rounded by {@link org.apache.lucene.search.DefaultSimilarity#encodeNormValue(float)} before it is stored in the
    * index.  One should attempt to ensure that this product does not overflow
    * the range of that encoding.
    *
    * @see org.apache.lucene.document.Document#setBoost(float)
    * @see org.apache.lucene.search.Similarity#computeNorm(FieldInvertState)
-   * @see org.apache.lucene.search.Similarity#encodeNormValue(float)
+   * @see org.apache.lucene.search.DefaultSimilarity#encodeNormValue(float)
    */
   public void setBoost(float boost) {
     this.boost = boost;
diff --git a/lucene/src/java/org/apache/lucene/document/Fieldable.java b/lucene/src/java/org/apache/lucene/document/Fieldable.java
index 45396db..5c25371 100755
--- a/lucene/src/java/org/apache/lucene/document/Fieldable.java
+++ b/lucene/src/java/org/apache/lucene/document/Fieldable.java
@@ -48,13 +48,13 @@
    * default, in the {@link
    * org.apache.lucene.search.Similarity#computeNorm(FieldInvertState)} method, the boost value is multiplied
    * by the length normalization factor
-   * and then rounded by {@link org.apache.lucene.search.Similarity#encodeNormValue(float)} before it is stored in the
+   * and then rounded by {@link org.apache.lucene.search.DefaultSimilarity#encodeNormValue(float)} before it is stored in the
    * index.  One should attempt to ensure that this product does not overflow
    * the range of that encoding.
    *
    * @see org.apache.lucene.document.Document#setBoost(float)
    * @see org.apache.lucene.search.Similarity#computeNorm(FieldInvertState)
-   * @see org.apache.lucene.search.Similarity#encodeNormValue(float)
+   * @see org.apache.lucene.search.DefaultSimilarity#encodeNormValue(float)
    */
   void setBoost(float boost);
 
diff --git a/lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java b/lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
index d10cdc7..1cd79da 100644
--- a/lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
+++ b/lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
@@ -32,7 +32,6 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FlushInfo;
 import org.apache.lucene.store.IOContext;
-import org.apache.lucene.store.IOContext.Context;
 import org.apache.lucene.util.BitVector;
 import org.apache.lucene.util.ByteBlockPool.Allocator;
 import org.apache.lucene.util.ByteBlockPool.DirectTrackingAllocator;
@@ -546,7 +545,7 @@
 
   PerDocWriteState newPerDocWriteState(int codecId) {
     assert segment != null;
-    return new PerDocWriteState(infoStream, directory, segment, fieldInfos, bytesUsed, codecId);
+    return new PerDocWriteState(infoStream, directory, segment, fieldInfos, bytesUsed, codecId, IOContext.DEFAULT);
   }
   
   void setInfoStream(PrintStream infoStream) {
diff --git a/lucene/src/java/org/apache/lucene/index/FieldsReader.java b/lucene/src/java/org/apache/lucene/index/FieldsReader.java
index 1d23a72..f56769b 100644
--- a/lucene/src/java/org/apache/lucene/index/FieldsReader.java
+++ b/lucene/src/java/org/apache/lucene/index/FieldsReader.java
@@ -31,9 +31,12 @@
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.CloseableThreadLocal;
+import org.apache.lucene.util.IOUtils;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.io.Reader;
+import java.util.ArrayList;
 
 /**
  * Class responsible for access to stored document fields.
@@ -42,7 +45,7 @@
  * 
  * @lucene.internal
  */
-public final class FieldsReader implements Cloneable {
+public final class FieldsReader implements Cloneable, Closeable {
   private final static int FORMAT_SIZE = 4;
 
   private final FieldInfos fieldInfos;
@@ -180,21 +183,11 @@
    */
   public final void close() throws IOException {
     if (!closed) {
-      if (fieldsStream != null) {
-        fieldsStream.close();
-      }
       if (isOriginal) {
-        if (cloneableFieldsStream != null) {
-          cloneableFieldsStream.close();
-        }
-        if (cloneableIndexStream != null) {
-          cloneableIndexStream.close();
-        }
+        IOUtils.closeSafely(false, fieldsStream, indexStream, fieldsStreamTL, cloneableFieldsStream, cloneableIndexStream);
+      } else {
+        IOUtils.closeSafely(false, fieldsStream, indexStream, fieldsStreamTL);
       }
-      if (indexStream != null) {
-        indexStream.close();
-      }
-      fieldsStreamTL.close();
       closed = true;
     }
   }
diff --git a/lucene/src/java/org/apache/lucene/index/IndexReader.java b/lucene/src/java/org/apache/lucene/index/IndexReader.java
index f843409..6d94250 100644
--- a/lucene/src/java/org/apache/lucene/index/IndexReader.java
+++ b/lucene/src/java/org/apache/lucene/index/IndexReader.java
@@ -1025,7 +1025,7 @@
   public abstract byte[] norms(String field) throws IOException;
 
   /** Expert: Resets the normalization factor for the named field of the named
-   * document.  The norm represents the product of the field's {@link
+   * document.  By default, The norm represents the product of the field's {@link
    * org.apache.lucene.document.Fieldable#setBoost(float) boost} and its
    * length normalization}.  Thus, to preserve the length normalization
    * values when resetting this, one should base the new value upon the old.
@@ -1034,7 +1034,8 @@
    * this method throws {@link IllegalStateException}.
    *
    * @see #norms(String)
-   * @see Similarity#decodeNormValue(byte)
+   * @see Similarity#computeNorm(FieldInvertState)
+   * @see org.apache.lucene.search.DefaultSimilarity#decodeNormValue(byte)
    * @throws StaleReaderException if the index has changed
    *  since this reader was opened
    * @throws CorruptIndexException if the index is corrupt
diff --git a/lucene/src/java/org/apache/lucene/index/MultiPerDocValues.java b/lucene/src/java/org/apache/lucene/index/MultiPerDocValues.java
index 6e6b6d4..b94da88 100644
--- a/lucene/src/java/org/apache/lucene/index/MultiPerDocValues.java
+++ b/lucene/src/java/org/apache/lucene/index/MultiPerDocValues.java
@@ -28,6 +28,7 @@
 import org.apache.lucene.index.values.MultiIndexDocValues;
 import org.apache.lucene.index.values.ValueType;
 import org.apache.lucene.index.values.MultiIndexDocValues.DocValuesIndex;
+import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.ReaderUtil;
 import org.apache.lucene.util.ReaderUtil.Gather;
 
@@ -151,20 +152,7 @@
   }
 
   public void close() throws IOException {
-    final PerDocValues[] perDocValues = this.subs;
-    IOException ex = null;
-    for (PerDocValues values : perDocValues) {
-      try {
-        values.close();
-      } catch (IOException e) {
-        if (ex == null) {
-          ex = e;
-        }
-      }
-    }
-    if (ex != null) {
-      throw ex;
-    }
+    IOUtils.closeSafely(false, this.subs);
   }
 
   @Override
diff --git a/lucene/src/java/org/apache/lucene/index/NormsWriterPerField.java b/lucene/src/java/org/apache/lucene/index/NormsWriterPerField.java
index 8b9cc3b..9a0612c 100644
--- a/lucene/src/java/org/apache/lucene/index/NormsWriterPerField.java
+++ b/lucene/src/java/org/apache/lucene/index/NormsWriterPerField.java
@@ -72,8 +72,7 @@
         assert norms.length == upto;
         norms = ArrayUtil.grow(norms, 1+upto);
       }
-      final float norm = similarity.computeNorm(fieldState);
-      norms[upto] = similarity.encodeNormValue(norm);
+      norms[upto] = similarity.computeNorm(fieldState);
       docIDs[upto] = docState.docID;
       upto++;
     }
diff --git a/lucene/src/java/org/apache/lucene/index/PerDocWriteState.java b/lucene/src/java/org/apache/lucene/index/PerDocWriteState.java
index e7b1d93..8bf08f3 100644
--- a/lucene/src/java/org/apache/lucene/index/PerDocWriteState.java
+++ b/lucene/src/java/org/apache/lucene/index/PerDocWriteState.java
@@ -20,6 +20,7 @@
 
 import org.apache.lucene.index.codecs.PerDocConsumer;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
 
 /**
  * Encapsulates all necessary state to initiate a {@link PerDocConsumer} and
@@ -35,10 +36,11 @@
   public final AtomicLong bytesUsed;
   public final SegmentCodecs segmentCodecs;
   public final int codecId;
+  public final IOContext context;
 
   PerDocWriteState(PrintStream infoStream, Directory directory,
       String segmentName, FieldInfos fieldInfos, AtomicLong bytesUsed,
-      int codecId) {
+      int codecId, IOContext context) {
     this.infoStream = infoStream;
     this.directory = directory;
     this.segmentName = segmentName;
@@ -46,6 +48,7 @@
     this.segmentCodecs = fieldInfos.buildSegmentCodecs(false);
     this.codecId = codecId;
     this.bytesUsed = bytesUsed;
+    this.context = context;
   }
 
   PerDocWriteState(SegmentWriteState state) {
@@ -56,6 +59,7 @@
     fieldInfos = state.fieldInfos;
     codecId = state.codecId;
     bytesUsed = new AtomicLong(0);
+    context = state.context;
   }
 
   PerDocWriteState(PerDocWriteState state, int codecId) {
@@ -66,5 +70,6 @@
     this.segmentCodecs = state.segmentCodecs;
     this.codecId = codecId;
     this.bytesUsed = state.bytesUsed;
+    this.context = state.context;
   }
 }
diff --git a/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java b/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java
index 78cf40a..8159230 100644
--- a/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java
+++ b/lucene/src/java/org/apache/lucene/index/PerFieldCodecWrapper.java
@@ -52,7 +52,7 @@
   private final SegmentCodecs segmentCodecs;
 
   PerFieldCodecWrapper(SegmentCodecs segmentCodecs) {
-    name = "PerField";
+    super("PerField");
     this.segmentCodecs = segmentCodecs;
   }
 
diff --git a/lucene/src/java/org/apache/lucene/index/SegmentCoreReaders.java b/lucene/src/java/org/apache/lucene/index/SegmentCoreReaders.java
index d478f85..2e392e5 100644
--- a/lucene/src/java/org/apache/lucene/index/SegmentCoreReaders.java
+++ b/lucene/src/java/org/apache/lucene/index/SegmentCoreReaders.java
@@ -26,6 +26,7 @@
 import org.apache.lucene.store.CompoundFileDirectory;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
+import org.apache.lucene.util.IOUtils;
 
 /** Holds core readers that are shared (unchanged) when
  * SegmentReader is cloned or reopened */
@@ -120,33 +121,9 @@
   }
   
   synchronized void decRef() throws IOException {
-    
     if (ref.decrementAndGet() == 0) {
-      
-      if (fields != null) {
-        fields.close();
-      }
-      
-      if (perDocProducer != null) {
-        perDocProducer.close();
-      }
-      
-      if (termVectorsReaderOrig != null) {
-        termVectorsReaderOrig.close();
-      }
-      
-      if (fieldsReaderOrig != null) {
-        fieldsReaderOrig.close();
-      }
-      
-      if (cfsReader != null) {
-        cfsReader.close();
-      }
-      
-      if (storeCFSReader != null) {
-        storeCFSReader.close();
-      }
-      
+      IOUtils.closeSafely(false, fields, perDocProducer, termVectorsReaderOrig,
+          fieldsReaderOrig, cfsReader, storeCFSReader);
       // Now, notify any ReaderFinished listeners:
       if (owner != null) {
         owner.notifyReaderFinishedListeners();
diff --git a/lucene/src/java/org/apache/lucene/index/SegmentInfo.java b/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
index 6291651..f2180b0 100644
--- a/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
+++ b/lucene/src/java/org/apache/lucene/index/SegmentInfo.java
@@ -30,7 +30,6 @@
 import org.apache.lucene.index.codecs.Codec;
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter;
-import org.apache.lucene.store.BufferedIndexInput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
@@ -248,7 +247,7 @@
       }
       final Directory dirToTest;
       if (isCompoundFile) {
-        dirToTest = dir.openCompoundInput(IndexFileNames.segmentFileName(storesSegment, "", ext), IOContext.READONCE );
+        dirToTest = dir.openCompoundInput(IndexFileNames.segmentFileName(storesSegment, "", ext), IOContext.READONCE);
       } else {
         dirToTest = dir;
       }
diff --git a/lucene/src/java/org/apache/lucene/index/SegmentMerger.java b/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
index beb7361..7f75463 100644
--- a/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
+++ b/lucene/src/java/org/apache/lucene/index/SegmentMerger.java
@@ -119,6 +119,7 @@
 
     mergedDocs = mergeFields();
     mergeTerms();
+    mergePerDoc();
     mergeNorms();
 
     if (fieldInfos.hasVectors())
@@ -482,18 +483,11 @@
     // the new segment:
 
     int docBase = 0;
-
+    
     final List<Fields> fields = new ArrayList<Fields>();
-
     final List<ReaderUtil.Slice> slices = new ArrayList<ReaderUtil.Slice>();
     final List<Bits> bits = new ArrayList<Bits>();
     final List<Integer> bitsStarts = new ArrayList<Integer>();
-    
-    // TODO: move this into its own method - this merges currently only docvalues
-    final List<PerDocValues> perDocProducers = new ArrayList<PerDocValues>();    
-    final List<ReaderUtil.Slice> perDocSlices = new ArrayList<ReaderUtil.Slice>();
-    final List<Bits> perDocBits = new ArrayList<Bits>();
-    final List<Integer> perDocBitsStarts = new ArrayList<Integer>();
 
     for(IndexReader r : readers) {
       final Fields f = r.fields();
@@ -504,18 +498,10 @@
         bits.add(r.getLiveDocs());
         bitsStarts.add(docBase);
       }
-      final PerDocValues producer = r.perDocValues();
-      if (producer != null) {
-        perDocSlices.add(new ReaderUtil.Slice(docBase, maxDoc, fields.size()));
-        perDocProducers.add(producer);
-        perDocBits.add(r.getLiveDocs());
-        perDocBitsStarts.add(docBase);
-      }
       docBase += maxDoc;
     }
 
     bitsStarts.add(docBase);
-    perDocBitsStarts.add(docBase);
 
     // we may gather more readers than mergeState.readerCount
     mergeState = new MergeState();
@@ -581,19 +567,45 @@
     } finally {
       consumer.close();
     }
+  }
+
+  private void mergePerDoc() throws IOException {
+    final List<PerDocValues> perDocProducers = new ArrayList<PerDocValues>();    
+    final List<ReaderUtil.Slice> perDocSlices = new ArrayList<ReaderUtil.Slice>();
+    final List<Bits> perDocBits = new ArrayList<Bits>();
+    final List<Integer> perDocBitsStarts = new ArrayList<Integer>();
+    int docBase = 0;
+    for (IndexReader r : readers) {
+      final int maxDoc = r.maxDoc();
+      final PerDocValues producer = r.perDocValues();
+      if (producer != null) {
+        perDocSlices.add(new ReaderUtil.Slice(docBase, maxDoc, perDocProducers
+            .size()));
+        perDocProducers.add(producer);
+        perDocBits.add(r.getLiveDocs());
+        perDocBitsStarts.add(docBase);
+      }
+      docBase += maxDoc;
+    }
+    perDocBitsStarts.add(docBase);
     if (!perDocSlices.isEmpty()) {
-      mergeState.multiLiveDocs = new MultiBits(perDocBits, perDocBitsStarts, true);
+      mergeState.multiLiveDocs = new MultiBits(perDocBits, perDocBitsStarts,
+          true);
       final PerDocConsumer docsConsumer = codec
           .docsConsumer(new PerDocWriteState(segmentWriteState));
+      boolean success = false;
       try {
-        final MultiPerDocValues multiPerDocValues = new MultiPerDocValues(perDocProducers
-            .toArray(PerDocValues.EMPTY_ARRAY), perDocSlices
-            .toArray(ReaderUtil.Slice.EMPTY_ARRAY));
+        final MultiPerDocValues multiPerDocValues = new MultiPerDocValues(
+            perDocProducers.toArray(PerDocValues.EMPTY_ARRAY),
+            perDocSlices.toArray(ReaderUtil.Slice.EMPTY_ARRAY));
         docsConsumer.merge(mergeState, multiPerDocValues);
+        success = true;
       } finally {
-        docsConsumer.close();
+        IOUtils.closeSafely(!success, docsConsumer);
       }
     }
+    /* don't close the perDocProducers here since they are private segment producers
+     * and will be closed once the SegmentReader goes out of scope */ 
   }
 
   private MergeState mergeState;
diff --git a/lucene/src/java/org/apache/lucene/index/TermVectorsReader.java b/lucene/src/java/org/apache/lucene/index/TermVectorsReader.java
index ca9b628..b034637 100644
--- a/lucene/src/java/org/apache/lucene/index/TermVectorsReader.java
+++ b/lucene/src/java/org/apache/lucene/index/TermVectorsReader.java
@@ -25,11 +25,13 @@
 import org.apache.lucene.store.IOContext.Context;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
 
+import java.io.Closeable;
 import java.io.IOException;
 import java.util.Arrays;
 
-class TermVectorsReader implements Cloneable {
+class TermVectorsReader implements Cloneable, Closeable {
 
   // NOTE: if you make a new format, it must be larger than
   // the current format
@@ -190,14 +192,8 @@
     return format;
   }
 
-  void close() throws IOException {
-    // make all effort to close up. Keep the first exception
-    // and throw it as a new one.
-    IOException keep = null;
-    if (tvx != null) try { tvx.close(); } catch (IOException e) { keep = e; }
-    if (tvd != null) try { tvd.close(); } catch (IOException e) { if (keep == null) keep = e; }
-    if (tvf  != null) try {  tvf.close(); } catch (IOException e) { if (keep == null) keep = e; }
-    if (keep != null) throw (IOException) keep.fillInStackTrace();
+  public void close() throws IOException {
+    IOUtils.closeSafely(false, tvx, tvd, tvf);
   }
 
   /**
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/Codec.java b/lucene/src/java/org/apache/lucene/index/codecs/Codec.java
index 736ceed..bd0af40 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/Codec.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/Codec.java
@@ -18,6 +18,7 @@
  */
 
 import java.io.IOException;
+import java.util.Comparator;
 import java.util.Set;
 
 import org.apache.lucene.index.PerDocWriteState;
@@ -25,13 +26,21 @@
 import org.apache.lucene.index.SegmentWriteState;
 import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 
 /** @lucene.experimental */
 public abstract class Codec {
   public static final Codec[] EMPTY = new Codec[0];
   /** Unique name that's used to retrieve this codec when
    *  reading the index */
-  public String name;
+  public final String name;
+  private boolean dvUseCompoundFile = true;
+  private Comparator<BytesRef> docValuesSortComparator = BytesRef
+      .getUTF8SortedAsUnicodeComparator();
+  
+  protected Codec(String name) {
+    this.name = name;
+  }
 
   /** Writes a new segment */
   public abstract FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException;
@@ -68,7 +77,48 @@
 
   /** Records all file extensions this codec uses */
   public abstract void getExtensions(Set<String> extensions);
+  
 
+  /**
+   * If set to <code>true</code> this codec will use a compound file for
+   * IndexDocValues, otherwise each IndexDocValues field will create up to 2
+   * files per segment.
+   * <p>
+   * NOTE: The default values is <code>true</code>.
+   */
+  public void setDocValuesUseCFS(boolean docValuesUseCFS) {
+    this.dvUseCompoundFile = docValuesUseCFS;
+  }
+
+  /**
+   * Returns <code>true</code> iff compound file should be used for
+   * IndexDocValues, otherwise <code>false</code>.
+   * 
+   * @see #setDocValuesUseCFS(boolean)
+   * @return <code>true</code> iff compound file should be used for
+   *         IndexDocValues, otherwise <code>false</code>.
+   */
+  public boolean getDocValuesUseCFS() {
+    return dvUseCompoundFile;
+  }
+  
+  /**
+   * Sets the {@link BytesRef} comparator for sorted IndexDocValue variants. The
+   * default is {@link BytesRef#getUTF8SortedAsUnicodeComparator()}. *
+   */
+  public void setDocValuesSortComparator(
+      Comparator<BytesRef> docValuesSortComparator) {
+    this.docValuesSortComparator = docValuesSortComparator;
+  }
+
+  /**
+   * Returns the {@link BytesRef} comparator for sorted IndexDocValue variants.
+   * The default is {@link BytesRef#getUTF8SortedAsUnicodeComparator()}.
+   */
+  public Comparator<BytesRef> getDocValuesSortComparator() {
+    return docValuesSortComparator;
+  }
+  
   @Override
   public String toString() {
     return name;
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java b/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java
index 6f3934b..74d547b 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/CoreCodecProvider.java
@@ -44,7 +44,7 @@
   public CoreCodecProvider() {
     register(new StandardCodec());
     register(new PreFlexCodec());
-    register(new PulsingCodec(1));
+    register(new PulsingCodec());
     register(new SimpleTextCodec());
     register(new MemoryCodec());
   }
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/DefaultDocValuesConsumer.java b/lucene/src/java/org/apache/lucene/index/codecs/DefaultDocValuesConsumer.java
index b02c9c9..d1749fb 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/DefaultDocValuesConsumer.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/DefaultDocValuesConsumer.java
@@ -32,79 +32,106 @@
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.BytesRef;
 
+/**
+ * 
+ * @lucene.experimental
+ */
 public class DefaultDocValuesConsumer extends PerDocConsumer {
   private final String segmentName;
   private final int codecId;
   private final Directory directory;
   private final AtomicLong bytesUsed;
   private final Comparator<BytesRef> comparator;
-
-  public DefaultDocValuesConsumer(PerDocWriteState state, Comparator<BytesRef> comparator) {
+  private boolean useCompoundFile;
+  private final IOContext context;
+  
+  public DefaultDocValuesConsumer(PerDocWriteState state, Comparator<BytesRef> comparator, boolean useCompoundFile) throws IOException {
     this.segmentName = state.segmentName;
     this.codecId = state.codecId;
     this.bytesUsed = state.bytesUsed;
-    this.directory = state.directory;
+    this.context = state.context;
+    //TODO maybe we should enable a global CFS that all codecs can pull on demand to further reduce the number of files?
+    this.directory = useCompoundFile ? state.directory.createCompoundOutput(
+        IndexFileNames.segmentFileName(segmentName, codecId,
+            IndexFileNames.COMPOUND_FILE_EXTENSION), context) : state.directory;
     this.comparator = comparator;
+    this.useCompoundFile = useCompoundFile;
   }
-  
+
   public void close() throws IOException {
+    if (useCompoundFile) {
+      this.directory.close();
+    }
   }
 
   @Override
   public DocValuesConsumer addValuesField(FieldInfo field) throws IOException {
     return Writer.create(field.getDocValues(),
         docValuesId(segmentName, codecId, field.number),
-        // TODO can we have a compound file per segment and codec for
-        // docvalues?
-        directory, comparator, bytesUsed, IOContext.DEFAULT);
+        directory, comparator, bytesUsed, context);
   }
   
   @SuppressWarnings("fallthrough")
   public static void files(Directory dir, SegmentInfo segmentInfo, int codecId,
-      Set<String> files) throws IOException {
+      Set<String> files, boolean useCompoundFile) throws IOException {
     FieldInfos fieldInfos = segmentInfo.getFieldInfos();
     for (FieldInfo fieldInfo : fieldInfos) {
       if (fieldInfo.getCodecId() == codecId && fieldInfo.hasDocValues()) {
         String filename = docValuesId(segmentInfo.name, codecId,
             fieldInfo.number);
-        switch (fieldInfo.getDocValues()) {
-        case BYTES_FIXED_DEREF:
-        case BYTES_VAR_DEREF:
-        case BYTES_VAR_SORTED:
-        case BYTES_FIXED_SORTED:
-        case BYTES_VAR_STRAIGHT:
-          files.add(IndexFileNames.segmentFileName(filename, "",
-              Writer.INDEX_EXTENSION));
-          assert dir.fileExists(IndexFileNames.segmentFileName(filename, "",
-              Writer.INDEX_EXTENSION));
-          // until here all types use an index
-        case BYTES_FIXED_STRAIGHT:
-        case FLOAT_32:
-        case FLOAT_64:
-        case VAR_INTS:
-        case FIXED_INTS_16:
-        case FIXED_INTS_32:
-        case FIXED_INTS_64:
-        case FIXED_INTS_8:
-          files.add(IndexFileNames.segmentFileName(filename, "",
-              Writer.DATA_EXTENSION));
-          assert dir.fileExists(IndexFileNames.segmentFileName(filename, "",
-              Writer.DATA_EXTENSION));
-          break;
-      
-        default:
-          assert false;
+        if (useCompoundFile) {
+          files.add(IndexFileNames.segmentFileName(segmentInfo.name, codecId, IndexFileNames.COMPOUND_FILE_EXTENSION));
+          files.add(IndexFileNames.segmentFileName(segmentInfo.name, codecId, IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION));
+          assert dir.fileExists(IndexFileNames.segmentFileName(segmentInfo.name, codecId, IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION)); 
+          assert dir.fileExists(IndexFileNames.segmentFileName(segmentInfo.name, codecId, IndexFileNames.COMPOUND_FILE_EXTENSION)); 
+          return;
+        } else {
+          switch (fieldInfo.getDocValues()) {
+          case BYTES_FIXED_DEREF:
+          case BYTES_VAR_DEREF:
+          case BYTES_VAR_SORTED:
+          case BYTES_FIXED_SORTED:
+          case BYTES_VAR_STRAIGHT:
+            files.add(IndexFileNames.segmentFileName(filename, "",
+                Writer.INDEX_EXTENSION));
+            assert dir.fileExists(IndexFileNames.segmentFileName(filename, "",
+                Writer.INDEX_EXTENSION));
+            // until here all types use an index
+          case BYTES_FIXED_STRAIGHT:
+          case FLOAT_32:
+          case FLOAT_64:
+          case VAR_INTS:
+          case FIXED_INTS_16:
+          case FIXED_INTS_32:
+          case FIXED_INTS_64:
+          case FIXED_INTS_8:
+            files.add(IndexFileNames.segmentFileName(filename, "",
+                Writer.DATA_EXTENSION));
+            assert dir.fileExists(IndexFileNames.segmentFileName(filename, "",
+                Writer.DATA_EXTENSION));
+            break;
+        
+          default:
+            assert false;
+          }
         }
       }
     }
   }
   
+
   static String docValuesId(String segmentsName, int codecID, int fieldId) {
     return segmentsName + "_" + codecID + "-" + fieldId;
   }
-
-  public static void getDocValuesExtensions(Set<String> extensions) {
-    extensions.add(Writer.DATA_EXTENSION);
-    extensions.add(Writer.INDEX_EXTENSION);
+  
+  public static void getDocValuesExtensions(Set<String> extensions, boolean useCompoundFile) {
+    if (useCompoundFile) {
+      extensions.add(IndexFileNames.COMPOUND_FILE_ENTRIES_EXTENSION);
+      extensions.add(IndexFileNames.COMPOUND_FILE_EXTENSION);
+    } else {
+      extensions.add(Writer.DATA_EXTENSION);
+      extensions.add(Writer.INDEX_EXTENSION);
+    }
   }
+
 }
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/DefaultDocValuesProducer.java b/lucene/src/java/org/apache/lucene/index/codecs/DefaultDocValuesProducer.java
index 663df35..6a3207d 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/DefaultDocValuesProducer.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/DefaultDocValuesProducer.java
@@ -16,12 +16,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+import java.io.Closeable;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Comparator;
 import java.util.TreeMap;
 
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.SegmentInfo;
 import org.apache.lucene.index.values.Bytes;
 import org.apache.lucene.index.values.IndexDocValues;
@@ -30,6 +34,8 @@
 import org.apache.lucene.index.values.ValueType;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
 
 /**
  * Abstract base class for FieldsProducer implementations supporting
@@ -40,8 +46,12 @@
 public class DefaultDocValuesProducer extends PerDocValues {
 
   protected final TreeMap<String, IndexDocValues> docValues;
+  private final boolean useCompoundFile;
+  private final Closeable cfs;
+  private final Comparator<BytesRef> sortComparator;
 
   /**
+   * 
    * Creates a new {@link DefaultDocValuesProducer} instance and loads all
    * {@link IndexDocValues} instances for this segment and codec.
    * 
@@ -53,12 +63,27 @@
    *          the {@link FieldInfos}
    * @param codecId
    *          the codec ID
+   * @param useCompoundFile
+   *          if <code>true</code> this producer opens a compound file to read
+   *          IndexDocValues fields, otherwise each field defines its own set of
+   *          files.
+   * @param sortComparator
+   *          defines the sort order for sorted IndexDocValues variants
    * @throws IOException
    *           if an {@link IOException} occurs
    */
-  public DefaultDocValuesProducer(SegmentInfo si, Directory dir,
-      FieldInfos fieldInfo, int codecId, IOContext context) throws IOException {
-    docValues = load(fieldInfo, si.name, si.docCount, dir, codecId, context);
+  public DefaultDocValuesProducer(SegmentInfo si, Directory dir, 
+      FieldInfos fieldInfo, int codecId, boolean useCompoundFile, Comparator<BytesRef> sortComparator, IOContext context) throws IOException {
+    this.useCompoundFile = useCompoundFile;
+    this.sortComparator = sortComparator;
+    final Directory directory;
+    if (useCompoundFile) {
+      cfs = directory = dir.openCompoundInput(IndexFileNames.segmentFileName(si.name, codecId, IndexFileNames.COMPOUND_FILE_EXTENSION), context);
+    } else {
+      cfs = null;
+      directory = dir;
+    }
+    docValues = load(fieldInfo, si.name, si.docCount, directory, codecId, context);
   }
 
   /**
@@ -86,14 +111,14 @@
           final String id = DefaultDocValuesConsumer.docValuesId(segment,
               codecId, fieldInfo.number);
           values.put(field,
-              loadDocValues(docCount, dir, id, fieldInfo.getDocValues(), context));
+              loadDocValues(docCount, dir, id, fieldInfo.getDocValues(), sortComparator, context));
         }
       }
       success = true;
     } finally {
       if (!success) {
         // if we fail we must close all opened resources if there are any
-        closeDocValues(values.values());
+        closeInternal(values.values());
       }
     }
     return values;
@@ -113,6 +138,7 @@
    *          the unique file ID within the segment
    * @param type
    *          the type to load
+   * @param sortComparator byte comparator used by sorted variants
    * @return a {@link IndexDocValues} instance for the given type
    * @throws IOException
    *           if an {@link IOException} occurs
@@ -120,7 +146,7 @@
    *           if the given {@link ValueType} is not supported
    */
   protected IndexDocValues loadDocValues(int docCount, Directory dir, String id,
-      ValueType type, IOContext context) throws IOException {
+      ValueType type, Comparator<BytesRef> sortComparator, IOContext context) throws IOException {
     switch (type) {
     case FIXED_INTS_16:
     case FIXED_INTS_32:
@@ -133,39 +159,36 @@
     case FLOAT_64:
       return Floats.getValues(dir, id, docCount, context);
     case BYTES_FIXED_STRAIGHT:
-      return Bytes.getValues(dir, id, Bytes.Mode.STRAIGHT, true, docCount, context);
+      return Bytes.getValues(dir, id, Bytes.Mode.STRAIGHT, true, docCount, sortComparator, context);
     case BYTES_FIXED_DEREF:
-      return Bytes.getValues(dir, id, Bytes.Mode.DEREF, true, docCount, context);
+      return Bytes.getValues(dir, id, Bytes.Mode.DEREF, true, docCount, sortComparator, context);
     case BYTES_FIXED_SORTED:
-      return Bytes.getValues(dir, id, Bytes.Mode.SORTED, true, docCount, context);
+      return Bytes.getValues(dir, id, Bytes.Mode.SORTED, true, docCount, sortComparator, context);
     case BYTES_VAR_STRAIGHT:
-      return Bytes.getValues(dir, id, Bytes.Mode.STRAIGHT, false, docCount, context);
+      return Bytes.getValues(dir, id, Bytes.Mode.STRAIGHT, false, docCount, sortComparator, context);
     case BYTES_VAR_DEREF:
-      return Bytes.getValues(dir, id, Bytes.Mode.DEREF, false, docCount, context);
+      return Bytes.getValues(dir, id, Bytes.Mode.DEREF, false, docCount, sortComparator, context);
     case BYTES_VAR_SORTED:
-      return Bytes.getValues(dir, id, Bytes.Mode.SORTED, false, docCount, context);
+      return Bytes.getValues(dir, id, Bytes.Mode.SORTED, false, docCount, sortComparator, context);
     default:
       throw new IllegalStateException("unrecognized index values mode " + type);
     }
   }
 
   public void close() throws IOException {
-    closeDocValues(docValues.values());
+    closeInternal(docValues.values());
   }
 
-  private void closeDocValues(final Collection<IndexDocValues> values)
-      throws IOException {
-    IOException ex = null;
-    for (IndexDocValues docValues : values) {
-      try {
-        docValues.close();
-      } catch (IOException e) {
-        ex = e;
-      }
-    }
-    if (ex != null) {
-      throw ex;
-    }
+  private void closeInternal(Collection<? extends Closeable> closeables) throws IOException {
+    final Collection<? extends Closeable> toClose;
+    if (useCompoundFile) {
+      final ArrayList<Closeable> list = new ArrayList<Closeable>(closeables);
+      list.add(cfs);
+      toClose = list; 
+    } else {
+      toClose = closeables;
+    } 
+    IOUtils.closeSafely(false, toClose);
   }
 
   @Override
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/memory/MemoryCodec.java b/lucene/src/java/org/apache/lucene/index/codecs/memory/MemoryCodec.java
index 79c1cab..b4fed2d 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/memory/MemoryCodec.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/memory/MemoryCodec.java
@@ -77,9 +77,9 @@
  * @lucene.experimental */
 
 public class MemoryCodec extends Codec {
-
+  
   public MemoryCodec() {
-    name = "Memory";
+    super("Memory");
   }
 
   private static final boolean VERBOSE = false;
@@ -779,22 +779,22 @@
   @Override
   public void files(Directory dir, SegmentInfo segmentInfo, int id, Set<String> files) throws IOException {
     files.add(IndexFileNames.segmentFileName(segmentInfo.name, id, EXTENSION));
-    DefaultDocValuesConsumer.files(dir, segmentInfo, id, files);
+    DefaultDocValuesConsumer.files(dir, segmentInfo, id, files, getDocValuesUseCFS());
   }
 
   @Override
   public void getExtensions(Set<String> extensions) {
     extensions.add(EXTENSION);
-    DefaultDocValuesConsumer.getDocValuesExtensions(extensions);
+    DefaultDocValuesConsumer.getDocValuesExtensions(extensions, getDocValuesUseCFS());
   }
 
   @Override
   public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-    return new DefaultDocValuesConsumer(state, BytesRef.getUTF8SortedAsUnicodeComparator());
+    return new DefaultDocValuesConsumer(state, getDocValuesSortComparator(), getDocValuesUseCFS());
   }
 
   @Override
   public PerDocValues docsProducer(SegmentReadState state) throws IOException {
-    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, IOContext.READONCE);
+    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, getDocValuesUseCFS(), getDocValuesSortComparator(), IOContext.READONCE);
   }
 }
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexCodec.java b/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexCodec.java
index 087be5e..e5ce0b6 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexCodec.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexCodec.java
@@ -55,7 +55,7 @@
   public static final String PROX_EXTENSION = "prx";
 
   public PreFlexCodec() {
-    name = "PreFlex";
+    super("PreFlex");
   }
   
   @Override
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingCodec.java b/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingCodec.java
index e5c5283..2bb6d97 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingCodec.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingCodec.java
@@ -43,7 +43,6 @@
 import org.apache.lucene.index.codecs.TermsIndexWriterBase;
 import org.apache.lucene.index.codecs.standard.StandardCodec;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 
 /** This codec "inlines" the postings for terms that have
@@ -58,10 +57,19 @@
 
   private final int freqCutoff;
 
+  /**
+   * Creates a {@link PulsingCodec} with <tt>freqCutoff = 1</tt>
+   * 
+   * @see PulsingCodec#PulsingCodec(int)
+   */
+  public PulsingCodec() {
+    this(1);
+  }
+  
   /** Terms with freq <= freqCutoff are inlined into terms
    *  dict. */
   public PulsingCodec(int freqCutoff) {
-    name = "Pulsing";
+    super("Pulsing");
     this.freqCutoff = freqCutoff;
   }
 
@@ -157,22 +165,22 @@
     StandardPostingsReader.files(dir, segmentInfo, id, files);
     BlockTermsReader.files(dir, segmentInfo, id, files);
     VariableGapTermsIndexReader.files(dir, segmentInfo, id, files);
-    DefaultDocValuesConsumer.files(dir, segmentInfo, id, files);
+    DefaultDocValuesConsumer.files(dir, segmentInfo, id, files, getDocValuesUseCFS());
   }
 
   @Override
   public void getExtensions(Set<String> extensions) {
     StandardCodec.getStandardExtensions(extensions);
-    DefaultDocValuesConsumer.getDocValuesExtensions(extensions);
+    DefaultDocValuesConsumer.getDocValuesExtensions(extensions, getDocValuesUseCFS());
   }
   
   @Override
   public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-    return new DefaultDocValuesConsumer(state, BytesRef.getUTF8SortedAsUnicodeComparator());
+    return new DefaultDocValuesConsumer(state, getDocValuesSortComparator(), getDocValuesUseCFS());
   }
 
   @Override
   public PerDocValues docsProducer(SegmentReadState state) throws IOException {
-    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, state.context);
+    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, getDocValuesUseCFS(), getDocValuesSortComparator(), state.context);
   }
 }
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextCodec.java b/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextCodec.java
index 832b7e3..1bdb88f 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextCodec.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/simpletext/SimpleTextCodec.java
@@ -33,7 +33,6 @@
 import org.apache.lucene.index.codecs.DefaultDocValuesConsumer;
 import org.apache.lucene.index.codecs.PerDocValues;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 
 /** For debugging, curiosity, transparency only!!  Do not
  *  use this codec in production.
@@ -44,11 +43,12 @@
  *
  *  @lucene.experimental */
 public class SimpleTextCodec extends Codec {
-
+  
   public SimpleTextCodec() {
-    name = "SimpleText";
+    super("SimpleText");
   }
 
+
   @Override
   public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
     return new SimpleTextFieldsWriter(state);
@@ -69,23 +69,23 @@
   @Override
   public void files(Directory dir, SegmentInfo segmentInfo, int id, Set<String> files) throws IOException {
     files.add(getPostingsFileName(segmentInfo.name, id));
-    DefaultDocValuesConsumer.files(dir, segmentInfo, id, files);
+    DefaultDocValuesConsumer.files(dir, segmentInfo, id, files, getDocValuesUseCFS());
   }
 
   @Override
   public void getExtensions(Set<String> extensions) {
     extensions.add(POSTINGS_EXTENSION);
-    DefaultDocValuesConsumer.getDocValuesExtensions(extensions);
+    DefaultDocValuesConsumer.getDocValuesExtensions(extensions, getDocValuesUseCFS());
   }
   
   // TODO: would be great if these used a plain text impl
   @Override
   public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-    return new DefaultDocValuesConsumer(state, BytesRef.getUTF8SortedAsUnicodeComparator());
+    return new DefaultDocValuesConsumer(state, getDocValuesSortComparator(), getDocValuesUseCFS());
   }
 
   @Override
   public PerDocValues docsProducer(SegmentReadState state) throws IOException {
-    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, state.context);
+    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, getDocValuesUseCFS(), getDocValuesSortComparator(), state.context);
   }
 }
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardCodec.java b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardCodec.java
index 47def92..eed2648 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardCodec.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/standard/StandardCodec.java
@@ -40,14 +40,13 @@
 import org.apache.lucene.index.codecs.BlockTermsReader;
 import org.apache.lucene.index.codecs.DefaultDocValuesProducer;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
 
 /** Default codec. 
  *  @lucene.experimental */
 public class StandardCodec extends Codec {
 
   public StandardCodec() {
-    name = "Standard";
+    super("Standard");
   }
 
   @Override
@@ -140,13 +139,13 @@
     StandardPostingsReader.files(dir, segmentInfo, id, files);
     BlockTermsReader.files(dir, segmentInfo, id, files);
     VariableGapTermsIndexReader.files(dir, segmentInfo, id, files);
-    DefaultDocValuesConsumer.files(dir, segmentInfo, id, files);
+    DefaultDocValuesConsumer.files(dir, segmentInfo, id, files, getDocValuesUseCFS());
   }
 
   @Override
   public void getExtensions(Set<String> extensions) {
     getStandardExtensions(extensions);
-    DefaultDocValuesConsumer.getDocValuesExtensions(extensions);
+    DefaultDocValuesConsumer.getDocValuesExtensions(extensions, getDocValuesUseCFS());
   }
 
   public static void getStandardExtensions(Set<String> extensions) {
@@ -158,11 +157,11 @@
 
   @Override
   public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-    return new DefaultDocValuesConsumer(state, BytesRef.getUTF8SortedAsUnicodeComparator());
+    return new DefaultDocValuesConsumer(state, getDocValuesSortComparator(), getDocValuesUseCFS());
   }
 
   @Override
   public PerDocValues docsProducer(SegmentReadState state) throws IOException {
-    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, state.context);
+    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, getDocValuesUseCFS(), getDocValuesSortComparator(), state.context);
   }
 }
diff --git a/lucene/src/java/org/apache/lucene/index/values/Bytes.java b/lucene/src/java/org/apache/lucene/index/values/Bytes.java
index 9735c69..f92e657 100644
--- a/lucene/src/java/org/apache/lucene/index/values/Bytes.java
+++ b/lucene/src/java/org/apache/lucene/index/values/Bytes.java
@@ -153,12 +153,13 @@
    *          otherwise <code>false</code>
    * @param maxDoc
    *          the number of document values stored for the given ID
+   * @param sortComparator byte comparator used by sorted variants
    * @return an initialized {@link IndexDocValues} instance.
    * @throws IOException
    *           if an {@link IOException} occurs
    */
   public static IndexDocValues getValues(Directory dir, String id, Mode mode,
-      boolean fixedSize, int maxDoc, IOContext context) throws IOException {
+      boolean fixedSize, int maxDoc, Comparator<BytesRef> sortComparator, IOContext context) throws IOException {
 
     // TODO -- I can peek @ header to determing fixed/mode?
     if (fixedSize) {
@@ -175,7 +176,7 @@
       } else if (mode == Mode.DEREF) {
         return new VarDerefBytesImpl.Reader(dir, id, maxDoc, context);
       } else if (mode == Mode.SORTED) {
-        return new VarSortedBytesImpl.Reader(dir, id, maxDoc, context);
+        return new VarSortedBytesImpl.Reader(dir, id, maxDoc, sortComparator, context);
       }
     }
 
diff --git a/lucene/src/java/org/apache/lucene/index/values/IndexDocValues.java b/lucene/src/java/org/apache/lucene/index/values/IndexDocValues.java
index e9bde3f..305a076 100644
--- a/lucene/src/java/org/apache/lucene/index/values/IndexDocValues.java
+++ b/lucene/src/java/org/apache/lucene/index/values/IndexDocValues.java
@@ -130,6 +130,18 @@
       throws IOException {
     return cache.loadSorted(this, comparator);
   }
+  
+  /**
+   * Returns a {@link SortedSource} instance using a default {@link BytesRef}
+   * comparator for this {@link IndexDocValues} field instance like
+   * {@link #getSource()}.
+   * <p>
+   * This method will return null iff this {@link IndexDocValues} represent a
+   * {@link Source} instead of a {@link SortedSource}.
+   */
+  public SortedSource getSortedSorted() throws IOException {
+    return getSortedSorted(null);
+  }
 
   /**
    * Loads and returns a {@link SortedSource} instance for this
@@ -142,7 +154,19 @@
       throws IOException {
     throw new UnsupportedOperationException();
   }
-
+  
+  /**
+   * Loads and returns a {@link SortedSource} instance using a default
+   * {@link BytesRef} comparator for this {@link IndexDocValues} field instance
+   * like {@link #load()}.
+   * <p>
+   * This method will return null iff this {@link IndexDocValues} represent a
+   * {@link Source} instead of a {@link SortedSource}.
+   */
+  public SortedSource loadSorted() throws IOException {
+    return loadSorted(null);
+  }
+  
   /**
    * Returns the {@link ValueType} of this {@link IndexDocValues} instance
    */
diff --git a/lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java b/lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java
index b452f45..3e884b3 100644
--- a/lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java
+++ b/lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java
@@ -168,14 +168,17 @@
 
   public static class Reader extends BytesReaderBase {
 
-    Reader(Directory dir, String id, int maxDoc, IOContext context) throws IOException {
+    private final Comparator<BytesRef> defaultComp;
+        
+    Reader(Directory dir, String id, int maxDoc, Comparator<BytesRef> comparator, IOContext context) throws IOException {
       super(dir, id, CODEC_NAME, VERSION_START, true, context);
+      this.defaultComp = comparator;
     }
 
     @Override
     public org.apache.lucene.index.values.IndexDocValues.Source load()
         throws IOException {
-      return loadSorted(null);
+      return loadSorted(defaultComp);
     }
 
     @Override
diff --git a/lucene/src/java/org/apache/lucene/search/BooleanQuery.java b/lucene/src/java/org/apache/lucene/search/BooleanQuery.java
index ecddb66..983fb4a 100644
--- a/lucene/src/java/org/apache/lucene/search/BooleanQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/BooleanQuery.java
@@ -183,14 +183,11 @@
     public Query getQuery() { return BooleanQuery.this; }
 
     @Override
-    public float getValue() { return getBoost(); }
-
-    @Override
-    public float sumOfSquaredWeights() throws IOException {
+    public float getValueForNormalization() throws IOException {
       float sum = 0.0f;
       for (int i = 0 ; i < weights.size(); i++) {
         // call sumOfSquaredWeights for all clauses in case of side effects
-        float s = weights.get(i).sumOfSquaredWeights();         // sum sub weights
+        float s = weights.get(i).getValueForNormalization();         // sum sub weights
         if (!clauses.get(i).isProhibited())
           // only add to sum for non-prohibited clauses
           sum += s;
@@ -206,11 +203,11 @@
     }
 
     @Override
-    public void normalize(float norm) {
-      norm *= getBoost();                         // incorporate boost
+    public void normalize(float norm, float topLevelBoost) {
+      topLevelBoost *= getBoost();                         // incorporate boost
       for (Weight w : weights) {
         // normalize all clauses, (even if prohibited in case of side affects)
-        w.normalize(norm);
+        w.normalize(norm, topLevelBoost);
       }
     }
 
diff --git a/lucene/src/java/org/apache/lucene/search/ConstantScoreAutoRewrite.java b/lucene/src/java/org/apache/lucene/search/ConstantScoreAutoRewrite.java
index abe1ff6..69be445 100644
--- a/lucene/src/java/org/apache/lucene/search/ConstantScoreAutoRewrite.java
+++ b/lucene/src/java/org/apache/lucene/search/ConstantScoreAutoRewrite.java
@@ -27,7 +27,7 @@
 import org.apache.lucene.util.ByteBlockPool;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefHash;
-import org.apache.lucene.util.PerReaderTermState;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
 
@@ -77,7 +77,7 @@
   }
   
   @Override
-  protected void addClause(BooleanQuery topLevel, Term term, int docFreq, float boost /*ignored*/, PerReaderTermState states) {
+  protected void addClause(BooleanQuery topLevel, Term term, int docFreq, float boost /*ignored*/, TermContext states) {
     topLevel.add(new TermQuery(term, states), BooleanClause.Occur.SHOULD);
   }
 
@@ -140,9 +140,9 @@
       assert termState != null;
       if (pos < 0) {
         pos = (-pos)-1;
-        array.termState[pos].register(termState, readerContext.ord, termsEnum.docFreq());
+        array.termState[pos].register(termState, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
       } else {
-        array.termState[pos] = new PerReaderTermState(topReaderContext, termState, readerContext.ord, termsEnum.docFreq());
+        array.termState[pos] = new TermContext(topReaderContext, termState, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
       }
       return true;
     }
@@ -183,9 +183,9 @@
     return true;
   }
   
-  /** Special implementation of BytesStartArray that keeps parallel arrays for {@link PerReaderTermState} */
+  /** Special implementation of BytesStartArray that keeps parallel arrays for {@link TermContext} */
   static final class TermStateByteStart extends DirectBytesStartArray  {
-    PerReaderTermState[] termState;
+    TermContext[] termState;
     
     public TermStateByteStart(int initSize) {
       super(initSize);
@@ -194,7 +194,7 @@
     @Override
     public int[] init() {
       final int[] ord = super.init();
-      termState = new PerReaderTermState[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
+      termState = new TermContext[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
       assert termState.length >= ord.length;
       return ord;
     }
@@ -203,7 +203,7 @@
     public int[] grow() {
       final int[] ord = super.grow();
       if (termState.length < ord.length) {
-        PerReaderTermState[] tmpTermState = new PerReaderTermState[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
+        TermContext[] tmpTermState = new TermContext[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
         System.arraycopy(termState, 0, tmpTermState, 0, termState.length);
         termState = tmpTermState;
       }      
diff --git a/lucene/src/java/org/apache/lucene/search/ConstantScoreQuery.java b/lucene/src/java/org/apache/lucene/search/ConstantScoreQuery.java
index 85007df..4c1e6c7 100644
--- a/lucene/src/java/org/apache/lucene/search/ConstantScoreQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/ConstantScoreQuery.java
@@ -110,24 +110,19 @@
     }
 
     @Override
-    public float getValue() {
-      return queryWeight;
-    }
-
-    @Override
-    public float sumOfSquaredWeights() throws IOException {
+    public float getValueForNormalization() throws IOException {
       // we calculate sumOfSquaredWeights of the inner weight, but ignore it (just to initialize everything)
-      if (innerWeight != null) innerWeight.sumOfSquaredWeights();
+      if (innerWeight != null) innerWeight.getValueForNormalization();
       queryWeight = getBoost();
       return queryWeight * queryWeight;
     }
 
     @Override
-    public void normalize(float norm) {
-      this.queryNorm = norm;
+    public void normalize(float norm, float topLevelBoost) {
+      this.queryNorm = norm * topLevelBoost;
       queryWeight *= this.queryNorm;
       // we normalize the inner weight, but ignore it (just to initialize everything)
-      if (innerWeight != null) innerWeight.normalize(norm);
+      if (innerWeight != null) innerWeight.normalize(norm, topLevelBoost);
     }
 
     @Override
@@ -148,7 +143,7 @@
       if (disi == null) {
         return null;
       }
-      return new ConstantScorer(disi, this);
+      return new ConstantScorer(disi, this, queryWeight);
     }
     
     @Override
@@ -181,9 +176,9 @@
     final DocIdSetIterator docIdSetIterator;
     final float theScore;
 
-    public ConstantScorer(DocIdSetIterator docIdSetIterator, Weight w) throws IOException {
+    public ConstantScorer(DocIdSetIterator docIdSetIterator, Weight w, float theScore) throws IOException {
       super(w);
-      theScore = w.getValue();
+      this.theScore = theScore;
       this.docIdSetIterator = docIdSetIterator;
     }
 
@@ -212,7 +207,7 @@
         @Override
         public void setScorer(Scorer scorer) throws IOException {
           // we must wrap again here, but using the scorer passed in as parameter:
-          collector.setScorer(new ConstantScorer(scorer, ConstantScorer.this.weight));
+          collector.setScorer(new ConstantScorer(scorer, ConstantScorer.this.weight, ConstantScorer.this.theScore));
         }
         
         @Override
diff --git a/lucene/src/java/org/apache/lucene/search/DefaultSimilarity.java b/lucene/src/java/org/apache/lucene/search/DefaultSimilarity.java
index 4b89f8e..5e0ab44 100644
--- a/lucene/src/java/org/apache/lucene/search/DefaultSimilarity.java
+++ b/lucene/src/java/org/apache/lucene/search/DefaultSimilarity.java
@@ -20,7 +20,7 @@
  */
 
 /** Expert: Default scoring implementation. */
-public class DefaultSimilarity extends Similarity {
+public class DefaultSimilarity extends TFIDFSimilarity {
 
   /** Implemented as
    *  <code>state.getBoost()*lengthNorm(numTerms)</code>, where
@@ -31,15 +31,15 @@
    *
    *  @lucene.experimental */
   @Override
-  public float computeNorm(FieldInvertState state) {
+  public byte computeNorm(FieldInvertState state) {
     final int numTerms;
     if (discountOverlaps)
       numTerms = state.getLength() - state.getNumOverlap();
     else
       numTerms = state.getLength();
-    return state.getBoost() * ((float) (1.0 / Math.sqrt(numTerms)));
+    return encodeNormValue(state.getBoost() * ((float) (1.0 / Math.sqrt(numTerms))));
   }
-  
+
   /** Implemented as <code>sqrt(freq)</code>. */
   @Override
   public float tf(float freq) {
diff --git a/lucene/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java b/lucene/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
index 0434232..c50edfd 100644
--- a/lucene/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
@@ -110,16 +110,12 @@
     @Override
     public Query getQuery() { return DisjunctionMaxQuery.this; }
 
-    /** Return our boost */
-    @Override
-    public float getValue() { return getBoost(); }
-
     /** Compute the sub of squared weights of us applied to our subqueries.  Used for normalization. */
     @Override
-    public float sumOfSquaredWeights() throws IOException {
+    public float getValueForNormalization() throws IOException {
       float max = 0.0f, sum = 0.0f;
       for (Weight currentWeight : weights) {
-        float sub = currentWeight.sumOfSquaredWeights();
+        float sub = currentWeight.getValueForNormalization();
         sum += sub;
         max = Math.max(max, sub);
         
@@ -130,10 +126,10 @@
 
     /** Apply the computed normalization factor to our subqueries */
     @Override
-    public void normalize(float norm) {
-      norm *= getBoost();  // Incorporate our boost
+    public void normalize(float norm, float topLevelBoost) {
+      topLevelBoost *= getBoost();  // Incorporate our boost
       for (Weight wt : weights) {
-        wt.normalize(norm);
+        wt.normalize(norm, topLevelBoost);
       }
     }
 
diff --git a/lucene/src/java/org/apache/lucene/search/ExactPhraseScorer.java b/lucene/src/java/org/apache/lucene/search/ExactPhraseScorer.java
index 153821d..08cf2c3 100644
--- a/lucene/src/java/org/apache/lucene/search/ExactPhraseScorer.java
+++ b/lucene/src/java/org/apache/lucene/search/ExactPhraseScorer.java
@@ -23,12 +23,6 @@
 import org.apache.lucene.index.*;
 
 final class ExactPhraseScorer extends Scorer {
-  private final byte[] norms;
-  private final float value;
-
-  private static final int SCORE_CACHE_SIZE = 32;
-  private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
-
   private final int endMinus1;
 
   private final static int CHUNK = 4096;
@@ -60,14 +54,12 @@
   private int docID = -1;
   private int freq;
 
-  private final Similarity similarity;
+  private final Similarity.ExactDocScorer docScorer;
   
   ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
-                    Similarity similarity, byte[] norms) throws IOException {
+                    Similarity.ExactDocScorer docScorer) throws IOException {
     super(weight);
-    this.similarity = similarity;
-    this.norms = norms;
-    this.value = weight.getValue();
+    this.docScorer = docScorer;
 
     chunkStates = new ChunkState[postings.length];
 
@@ -88,10 +80,6 @@
         return;
       }
     }
-
-    for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
-      scoreCache[i] = similarity.tf((float) i) * value;
-    }
   }
 
   @Override
@@ -206,13 +194,7 @@
 
   @Override
   public float score() throws IOException {
-    final float raw; // raw score
-    if (freq < SCORE_CACHE_SIZE) {
-      raw = scoreCache[freq];
-    } else {
-      raw = similarity.tf((float) freq) * value;
-    }
-    return norms == null ? raw : raw * similarity.decodeNormValue(norms[docID]); // normalize
+    return docScorer.score(docID, freq);
   }
 
   private int phraseFreq() throws IOException {
diff --git a/lucene/src/java/org/apache/lucene/search/Explanation.java b/lucene/src/java/org/apache/lucene/search/Explanation.java
index 2997528..0425f24 100644
--- a/lucene/src/java/org/apache/lucene/search/Explanation.java
+++ b/lucene/src/java/org/apache/lucene/search/Explanation.java
@@ -125,25 +125,4 @@
 
     return buffer.toString();
   }
-  
-  /**
-   * Small Util class used to pass both an idf factor as well as an
-   * explanation for that factor.
-   * 
-   * This class will likely be held on a {@link Weight}, so be aware 
-   * before storing any large or un-serializable fields.
-   *
-   */
-  public static abstract class IDFExplanation {
-    /**
-     * @return the idf factor
-     */
-    public abstract float getIdf();
-    /**
-     * This should be calculated lazily if possible.
-     * 
-     * @return the explanation for the idf factor.
-     */
-    public abstract String explain();
-  }
 }
diff --git a/lucene/src/java/org/apache/lucene/search/FilteredQuery.java b/lucene/src/java/org/apache/lucene/search/FilteredQuery.java
index 1bcd845..0630846 100644
--- a/lucene/src/java/org/apache/lucene/search/FilteredQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/FilteredQuery.java
@@ -63,21 +63,15 @@
   public Weight createWeight(final IndexSearcher searcher) throws IOException {
     final Weight weight = query.createWeight (searcher);
     return new Weight() {
-      private float value;
-        
-      // pass these methods through to enclosed query's weight
-      @Override
-      public float getValue() { return value; }
       
       @Override
-      public float sumOfSquaredWeights() throws IOException { 
-        return weight.sumOfSquaredWeights() * getBoost() * getBoost(); 
+      public float getValueForNormalization() throws IOException { 
+        return weight.getValueForNormalization() * getBoost() * getBoost(); 
       }
 
       @Override
-      public void normalize (float v) { 
-        weight.normalize(v);
-        value = weight.getValue() * getBoost();
+      public void normalize (float norm, float topLevelBoost) { 
+        weight.normalize(norm, topLevelBoost);
       }
 
       @Override
diff --git a/lucene/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/src/java/org/apache/lucene/search/IndexSearcher.java
index ce404f9..81b90f2 100644
--- a/lucene/src/java/org/apache/lucene/search/IndexSearcher.java
+++ b/lucene/src/java/org/apache/lucene/search/IndexSearcher.java
@@ -674,11 +674,11 @@
   public Weight createNormalizedWeight(Query query) throws IOException {
     query = rewrite(query);
     Weight weight = query.createWeight(this);
-    float sum = weight.sumOfSquaredWeights();
-    float norm = getSimilarityProvider().queryNorm(sum);
+    float v = weight.getValueForNormalization();
+    float norm = getSimilarityProvider().queryNorm(v);
     if (Float.isInfinite(norm) || Float.isNaN(norm))
       norm = 1.0f;
-    weight.normalize(norm);
+    weight.normalize(norm, 1.0f);
     return weight;
   }
   
diff --git a/lucene/src/java/org/apache/lucene/search/MatchAllDocsQuery.java b/lucene/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
index bc66237..39b9841 100644
--- a/lucene/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
@@ -32,35 +32,17 @@
  */
 public class MatchAllDocsQuery extends Query {
 
-  public MatchAllDocsQuery() {
-    this(null);
-  }
-
-  private final String normsField;
-
-  /**
-   * @param normsField Field used for normalization factor (document boost). Null if nothing.
-   */
-  public MatchAllDocsQuery(String normsField) {
-    this.normsField = normsField;
-  }
-
   private class MatchAllScorer extends Scorer {
     final float score;
-    final byte[] norms;
     private int doc = -1;
     private final int maxDoc;
     private final Bits liveDocs;
-    private final Similarity similarity;
-    
-    MatchAllScorer(IndexReader reader, Similarity similarity, Weight w,
-        byte[] norms) throws IOException {
+
+    MatchAllScorer(IndexReader reader, Weight w, float score) throws IOException {
       super(w);
-      this.similarity = similarity;
       liveDocs = reader.getLiveDocs();
-      score = w.getValue();
+      this.score = score;
       maxDoc = reader.maxDoc();
-      this.norms = norms;
     }
 
     @Override
@@ -82,7 +64,7 @@
     
     @Override
     public float score() {
-      return norms == null ? score : score * similarity.decodeNormValue(norms[docID()]);
+      return score;
     }
 
     @Override
@@ -93,12 +75,10 @@
   }
 
   private class MatchAllDocsWeight extends Weight {
-    private Similarity similarity;
     private float queryWeight;
     private float queryNorm;
 
     public MatchAllDocsWeight(IndexSearcher searcher) {
-      this.similarity = normsField == null ? null : searcher.getSimilarityProvider().get(normsField);
     }
 
     @Override
@@ -112,33 +92,27 @@
     }
 
     @Override
-    public float getValue() {
-      return queryWeight;
-    }
-
-    @Override
-    public float sumOfSquaredWeights() {
+    public float getValueForNormalization() {
       queryWeight = getBoost();
       return queryWeight * queryWeight;
     }
 
     @Override
-    public void normalize(float queryNorm) {
-      this.queryNorm = queryNorm;
+    public void normalize(float queryNorm, float topLevelBoost) {
+      this.queryNorm = queryNorm * topLevelBoost;
       queryWeight *= this.queryNorm;
     }
 
     @Override
     public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
-      return new MatchAllScorer(context.reader, similarity, this,
-          normsField != null ? context.reader.norms(normsField) : null);
+      return new MatchAllScorer(context.reader, this, queryWeight);
     }
 
     @Override
     public Explanation explain(AtomicReaderContext context, int doc) {
       // explain query weight
       Explanation queryExpl = new ComplexExplanation
-        (true, getValue(), "MatchAllDocsQuery, product of:");
+        (true, queryWeight, "MatchAllDocsQuery, product of:");
       if (getBoost() != 1.0f) {
         queryExpl.addDetail(new Explanation(getBoost(),"boost"));
       }
diff --git a/lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java b/lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java
index 6ae3c69..bed29c7 100644
--- a/lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java
@@ -22,12 +22,14 @@
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader.ReaderContext;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.search.Explanation.IDFExplanation;
+import org.apache.lucene.search.Similarity.SloppyDocScorer;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.util.ToStringUtils;
 import org.apache.lucene.util.PriorityQueue;
 import org.apache.lucene.util.Bits;
@@ -129,45 +131,35 @@
 
 
   private class MultiPhraseWeight extends Weight {
-    private Similarity similarity;
-    private float value;
-    private final IDFExplanation idfExp;
-    private float idf;
-    private float queryNorm;
-    private float queryWeight;
+    private final Similarity similarity;
+    private final Similarity.Stats stats;
 
     public MultiPhraseWeight(IndexSearcher searcher)
       throws IOException {
       this.similarity = searcher.getSimilarityProvider().get(field);
-
+      final ReaderContext context = searcher.getTopReaderContext();
+      
       // compute idf
-      ArrayList<Term> allTerms = new ArrayList<Term>();
+      ArrayList<TermContext> allTerms = new ArrayList<TermContext>();
       for(final Term[] terms: termArrays) {
         for (Term term: terms) {
-          allTerms.add(term);
+          allTerms.add(TermContext.build(context, term, true));
         }
       }
-      idfExp = similarity.idfExplain(allTerms, searcher);
-      idf = idfExp.getIdf();
+      stats = similarity.computeStats(searcher, field, getBoost(), allTerms.toArray(new TermContext[allTerms.size()]));
     }
 
     @Override
     public Query getQuery() { return MultiPhraseQuery.this; }
 
     @Override
-    public float getValue() { return value; }
-
-    @Override
-    public float sumOfSquaredWeights() {
-      queryWeight = idf * getBoost();             // compute query weight
-      return queryWeight * queryWeight;           // square it
+    public float getValueForNormalization() {
+      return stats.getValueForNormalization();
     }
 
     @Override
-    public void normalize(float queryNorm) {
-      this.queryNorm = queryNorm;
-      queryWeight *= queryNorm;                   // normalize query weight
-      value = queryWeight * idf;                  // idf for document 
+    public void normalize(float queryNorm, float topLevelBoost) {
+      stats.normalize(queryNorm, topLevelBoost);
     }
 
     @Override
@@ -222,8 +214,7 @@
       }
 
       if (slop == 0) {
-        ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity,
-            reader.norms(field));
+        ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity.exactDocScorer(stats, field, context));
         if (s.noDocs) {
           return null;
         } else {
@@ -231,84 +222,29 @@
         }
       } else {
         return new SloppyPhraseScorer(this, postingsFreqs, similarity,
-                                      slop, reader.norms(field));
+                                      slop, similarity.sloppyDocScorer(stats, field, context));
       }
     }
 
     @Override
-    public Explanation explain(AtomicReaderContext context, int doc)
-      throws IOException {
-      ComplexExplanation result = new ComplexExplanation();
-      result.setDescription("weight("+getQuery()+" in "+doc+"), product of:");
-
-      Explanation idfExpl = new Explanation(idf, "idf(" + field + ":" + idfExp.explain() +")");
-
-      // explain query weight
-      Explanation queryExpl = new Explanation();
-      queryExpl.setDescription("queryWeight(" + getQuery() + "), product of:");
-
-      Explanation boostExpl = new Explanation(getBoost(), "boost");
-      if (getBoost() != 1.0f)
-        queryExpl.addDetail(boostExpl);
-
-      queryExpl.addDetail(idfExpl);
-
-      Explanation queryNormExpl = new Explanation(queryNorm,"queryNorm");
-      queryExpl.addDetail(queryNormExpl);
-
-      queryExpl.setValue(boostExpl.getValue() *
-                         idfExpl.getValue() *
-                         queryNormExpl.getValue());
-
-      result.addDetail(queryExpl);
-
-      // explain field weight
-      ComplexExplanation fieldExpl = new ComplexExplanation();
-      fieldExpl.setDescription("fieldWeight("+getQuery()+" in "+doc+
-                               "), product of:");
-
+    public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
       Scorer scorer = scorer(context, ScorerContext.def());
-      if (scorer == null) {
-        return new Explanation(0.0f, "no matching docs");
+      if (scorer != null) {
+        int newDoc = scorer.advance(doc);
+        if (newDoc == doc) {
+          float freq = scorer.freq();
+          SloppyDocScorer docScorer = similarity.sloppyDocScorer(stats, field, context);
+          ComplexExplanation result = new ComplexExplanation();
+          result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
+          Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
+          result.addDetail(scoreExplanation);
+          result.setValue(scoreExplanation.getValue());
+          result.setMatch(true);          
+          return result;
+        }
       }
-
-      Explanation tfExplanation = new Explanation();
-      int d = scorer.advance(doc);
-      float phraseFreq;
-      if (d == doc) {
-        phraseFreq = scorer.freq();
-      } else {
-        phraseFreq = 0.0f;
-      }
-
-      tfExplanation.setValue(similarity.tf(phraseFreq));
-      tfExplanation.setDescription("tf(phraseFreq=" + phraseFreq + ")");
-      fieldExpl.addDetail(tfExplanation);
-      fieldExpl.addDetail(idfExpl);
-
-      Explanation fieldNormExpl = new Explanation();
-      byte[] fieldNorms = context.reader.norms(field);
-      float fieldNorm =
-        fieldNorms!=null ? similarity.decodeNormValue(fieldNorms[doc]) : 1.0f;
-      fieldNormExpl.setValue(fieldNorm);
-      fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
-      fieldExpl.addDetail(fieldNormExpl);
-
-      fieldExpl.setMatch(Boolean.valueOf(tfExplanation.isMatch()));
-      fieldExpl.setValue(tfExplanation.getValue() *
-                         idfExpl.getValue() *
-                         fieldNormExpl.getValue());
-
-      result.addDetail(fieldExpl);
-      result.setMatch(fieldExpl.getMatch());
-
-      // combine them
-      result.setValue(queryExpl.getValue() * fieldExpl.getValue());
-
-      if (queryExpl.getValue() == 1.0f)
-        return fieldExpl;
-
-      return result;
+      
+      return new ComplexExplanation(false, 0.0f, "no matching term");
     }
   }
 
diff --git a/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java b/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java
index 3c8c267..e8e7020 100644
--- a/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/MultiTermQuery.java
@@ -25,7 +25,7 @@
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.util.AttributeSource;
-import org.apache.lucene.util.PerReaderTermState;
+import org.apache.lucene.util.TermContext;
 
 /**
  * An abstract {@link Query} that matches documents
@@ -154,7 +154,7 @@
     }
     
     @Override
-    protected void addClause(BooleanQuery topLevel, Term term, int docCount, float boost, PerReaderTermState states) {
+    protected void addClause(BooleanQuery topLevel, Term term, int docCount, float boost, TermContext states) {
       final TermQuery tq = new TermQuery(term, states);
       tq.setBoost(boost);
       topLevel.add(tq, BooleanClause.Occur.SHOULD);
@@ -195,7 +195,7 @@
     }
     
     @Override
-    protected void addClause(BooleanQuery topLevel, Term term, int docFreq, float boost, PerReaderTermState states) {
+    protected void addClause(BooleanQuery topLevel, Term term, int docFreq, float boost, TermContext states) {
       final Query q = new ConstantScoreQuery(new TermQuery(term, states));
       q.setBoost(boost);
       topLevel.add(q, BooleanClause.Occur.SHOULD);
diff --git a/lucene/src/java/org/apache/lucene/search/PhraseQuery.java b/lucene/src/java/org/apache/lucene/search/PhraseQuery.java
index 300e63f..470cc66 100644
--- a/lucene/src/java/org/apache/lucene/search/PhraseQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/PhraseQuery.java
@@ -22,10 +22,16 @@
 import java.util.ArrayList;
 
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader.ReaderContext;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.Explanation.IDFExplanation;
+import org.apache.lucene.index.TermState;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.Similarity.SloppyDocScorer;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.util.ToStringUtils;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Bits;
@@ -171,18 +177,17 @@
 
   private class PhraseWeight extends Weight {
     private final Similarity similarity;
-    private float value;
-    private float idf;
-    private float queryNorm;
-    private float queryWeight;
-    private IDFExplanation idfExp;
+    private final Similarity.Stats stats;
+    private transient TermContext states[];
 
     public PhraseWeight(IndexSearcher searcher)
       throws IOException {
       this.similarity = searcher.getSimilarityProvider().get(field);
-
-      idfExp = similarity.idfExplain(terms, searcher);
-      idf = idfExp.getIdf();
+      final ReaderContext context = searcher.getTopReaderContext();
+      states = new TermContext[terms.size()];
+      for (int i = 0; i < terms.size(); i++)
+        states[i] = TermContext.build(context, terms.get(i), true);
+      stats = similarity.computeStats(searcher, field, getBoost(), states);
     }
 
     @Override
@@ -192,19 +197,13 @@
     public Query getQuery() { return PhraseQuery.this; }
 
     @Override
-    public float getValue() { return value; }
-
-    @Override
-    public float sumOfSquaredWeights() {
-      queryWeight = idf * getBoost();             // compute query weight
-      return queryWeight * queryWeight;           // square it
+    public float getValueForNormalization() {
+      return stats.getValueForNormalization();
     }
 
     @Override
-    public void normalize(float queryNorm) {
-      this.queryNorm = queryNorm;
-      queryWeight *= queryNorm;                   // normalize query weight
-      value = queryWeight * idf;                  // idf for document 
+    public void normalize(float queryNorm, float topLevelBoost) {
+      stats.normalize(queryNorm, topLevelBoost);
     }
 
     @Override
@@ -216,21 +215,26 @@
       PostingsAndFreq[] postingsFreqs = new PostingsAndFreq[terms.size()];
       for (int i = 0; i < terms.size(); i++) {
         final Term t = terms.get(i);
+        final TermState state = states[i].get(context.ord);
+        if (state == null) { /* term doesnt exist in this segment */
+          assert termNotInReader(reader, field, t.bytes()) : "no termstate found but term exists in reader";
+          return null;
+        }
         DocsAndPositionsEnum postingsEnum = reader.termPositionsEnum(liveDocs,
                                                                      t.field(),
-                                                                     t.bytes());
+                                                                     t.bytes(),
+                                                                     state);
         // PhraseQuery on a field that did not index
         // positions.
         if (postingsEnum == null) {
-          if (reader.termDocsEnum(liveDocs, t.field(), t.bytes()) != null) {
-            // term does exist, but has no positions
-            throw new IllegalStateException("field \"" + t.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + t.text() + ")");
-          } else {
-            // term does not exist
-            return null;
-          }
+          assert (reader.termDocsEnum(liveDocs, t.field(), t.bytes(), state) != null) : "termstate found but no term exists in reader";
+          // term does exist, but has no positions
+          throw new IllegalStateException("field \"" + t.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + t.text() + ")");
         }
-        postingsFreqs[i] = new PostingsAndFreq(postingsEnum, reader.docFreq(t.field(), t.bytes()), positions.get(i).intValue(), t);
+        // get the docFreq without seeking
+        TermsEnum te = reader.fields().terms(field).getThreadTermsEnum();
+        te.seekExact(t.bytes(), state);
+        postingsFreqs[i] = new PostingsAndFreq(postingsEnum, te.docFreq(), positions.get(i).intValue(), t);
       }
 
       // sort by increasing docFreq order
@@ -239,8 +243,7 @@
       }
 
       if (slop == 0) {				  // optimize exact case
-        ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity,
-            reader.norms(field));
+        ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity.exactDocScorer(stats, field, context));
         if (s.noDocs) {
           return null;
         } else {
@@ -248,96 +251,35 @@
         }
       } else {
         return
-          new SloppyPhraseScorer(this, postingsFreqs, similarity, slop,
-              reader.norms(field));
+          new SloppyPhraseScorer(this, postingsFreqs, similarity, slop, similarity.sloppyDocScorer(stats, field, context));
       }
     }
+    
+    private boolean termNotInReader(IndexReader reader, String field, BytesRef bytes) throws IOException {
+      // only called from assert
+      final Terms terms = reader.terms(field);
+      return terms == null || terms.docFreq(bytes) == 0;
+    }
 
     @Override
-    public Explanation explain(AtomicReaderContext context, int doc)
-      throws IOException {
-
-      ComplexExplanation result = new ComplexExplanation();
-      result.setDescription("weight("+getQuery()+" in "+doc+"), product of:");
-
-      StringBuilder docFreqs = new StringBuilder();
-      StringBuilder query = new StringBuilder();
-      query.append('\"');
-      docFreqs.append(idfExp.explain());
-      for (int i = 0; i < terms.size(); i++) {
-        if (i != 0) {
-          query.append(" ");
-        }
-
-        Term term = terms.get(i);
-
-        query.append(term.text());
-      }
-      query.append('\"');
-
-      Explanation idfExpl =
-        new Explanation(idf, "idf(" + field + ":" + docFreqs + ")");
-
-      // explain query weight
-      Explanation queryExpl = new Explanation();
-      queryExpl.setDescription("queryWeight(" + getQuery() + "), product of:");
-
-      Explanation boostExpl = new Explanation(getBoost(), "boost");
-      if (getBoost() != 1.0f)
-        queryExpl.addDetail(boostExpl);
-      queryExpl.addDetail(idfExpl);
-
-      Explanation queryNormExpl = new Explanation(queryNorm,"queryNorm");
-      queryExpl.addDetail(queryNormExpl);
-
-      queryExpl.setValue(boostExpl.getValue() *
-                         idfExpl.getValue() *
-                         queryNormExpl.getValue());
-
-      result.addDetail(queryExpl);
-
-      // explain field weight
-      Explanation fieldExpl = new Explanation();
-      fieldExpl.setDescription("fieldWeight("+field+":"+query+" in "+doc+
-                               "), product of:");
-
+    public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
       Scorer scorer = scorer(context, ScorerContext.def());
-      if (scorer == null) {
-        return new Explanation(0.0f, "no matching docs");
+      if (scorer != null) {
+        int newDoc = scorer.advance(doc);
+        if (newDoc == doc) {
+          float freq = scorer.freq();
+          SloppyDocScorer docScorer = similarity.sloppyDocScorer(stats, field, context);
+          ComplexExplanation result = new ComplexExplanation();
+          result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
+          Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
+          result.addDetail(scoreExplanation);
+          result.setValue(scoreExplanation.getValue());
+          result.setMatch(true);
+          return result;
+        }
       }
-      Explanation tfExplanation = new Explanation();
-      int d = scorer.advance(doc);
-      float phraseFreq;
-      if (d == doc) {
-        phraseFreq = scorer.freq();
-      } else {
-        phraseFreq = 0.0f;
-      }
-
-      tfExplanation.setValue(similarity.tf(phraseFreq));
-      tfExplanation.setDescription("tf(phraseFreq=" + phraseFreq + ")");
       
-      fieldExpl.addDetail(tfExplanation);
-      fieldExpl.addDetail(idfExpl);
-
-      Explanation fieldNormExpl = new Explanation();
-      byte[] fieldNorms = context.reader.norms(field);
-      float fieldNorm =
-        fieldNorms!=null ? similarity.decodeNormValue(fieldNorms[doc]) : 1.0f;
-      fieldNormExpl.setValue(fieldNorm);
-      fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
-      fieldExpl.addDetail(fieldNormExpl);
-
-      fieldExpl.setValue(tfExplanation.getValue() *
-                         idfExpl.getValue() *
-                         fieldNormExpl.getValue());
-
-      result.addDetail(fieldExpl);
-
-      // combine them
-      result.setValue(queryExpl.getValue() * fieldExpl.getValue());
-      result.setMatch(tfExplanation.isMatch());
-      return result;
+      return new ComplexExplanation(false, 0.0f, "no matching term");
     }
   }
 
diff --git a/lucene/src/java/org/apache/lucene/search/PhraseScorer.java b/lucene/src/java/org/apache/lucene/search/PhraseScorer.java
index da84dbc..f50ae07 100644
--- a/lucene/src/java/org/apache/lucene/search/PhraseScorer.java
+++ b/lucene/src/java/org/apache/lucene/search/PhraseScorer.java
@@ -30,9 +30,6 @@
  * means a match. 
  */
 abstract class PhraseScorer extends Scorer {
-  protected byte[] norms;
-  protected float value;
-
   private boolean firstTime = true;
   private boolean more = true;
   protected PhraseQueue pq;
@@ -40,14 +37,12 @@
 
   private float freq; //phrase frequency in current doc as computed by phraseFreq().
 
-  protected final Similarity similarity;
+  protected final Similarity.SloppyDocScorer docScorer;
 
   PhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
-      Similarity similarity, byte[] norms) {
+      Similarity.SloppyDocScorer docScorer) throws IOException {
     super(weight);
-    this.similarity = similarity;
-    this.norms = norms;
-    this.value = weight.getValue();
+    this.docScorer = docScorer;
 
     // convert tps to a list of phrase positions.
     // note: phrase-position differs from term-position in that its position
@@ -107,9 +102,7 @@
 
   @Override
   public float score() throws IOException {
-    //System.out.println("scoring " + first.doc);
-    float raw = similarity.tf(freq) * value; // raw score
-    return norms == null ? raw : raw * similarity.decodeNormValue(norms[first.doc]); // normalize
+    return docScorer.score(first.doc, freq);
   }
 
   @Override
diff --git a/lucene/src/java/org/apache/lucene/search/ScoringRewrite.java b/lucene/src/java/org/apache/lucene/search/ScoringRewrite.java
index f945116..098d8b4 100644
--- a/lucene/src/java/org/apache/lucene/search/ScoringRewrite.java
+++ b/lucene/src/java/org/apache/lucene/search/ScoringRewrite.java
@@ -28,7 +28,7 @@
 import org.apache.lucene.util.ByteBlockPool;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefHash;
-import org.apache.lucene.util.PerReaderTermState;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.util.RamUsageEstimator;
 import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
 
@@ -56,7 +56,7 @@
     
     @Override
     protected void addClause(BooleanQuery topLevel, Term term, int docCount,
-        float boost, PerReaderTermState states) {
+        float boost, TermContext states) {
       final TermQuery tq = new TermQuery(term, states);
       tq.setBoost(boost);
       topLevel.add(tq, BooleanClause.Occur.SHOULD);
@@ -117,7 +117,7 @@
     if (size > 0) {
       final int sort[] = col.terms.sort(col.termsEnum.getComparator());
       final float[] boost = col.array.boost;
-      final PerReaderTermState[] termStates = col.array.termState;
+      final TermContext[] termStates = col.array.termState;
       for (int i = 0; i < size; i++) {
         final int pos = sort[i];
         final Term term = new Term(query.getField(), col.terms.get(pos, new BytesRef()));
@@ -150,12 +150,12 @@
       if (e < 0 ) {
         // duplicate term: update docFreq
         final int pos = (-e)-1;
-        array.termState[pos].register(state, readerContext.ord, termsEnum.docFreq());
+        array.termState[pos].register(state, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
         assert array.boost[pos] == boostAtt.getBoost() : "boost should be equal in all segment TermsEnums";
       } else {
         // new entry: we populate the entry initially
         array.boost[e] = boostAtt.getBoost();
-        array.termState[e] = new PerReaderTermState(topReaderContext, state, readerContext.ord, termsEnum.docFreq());
+        array.termState[e] = new TermContext(topReaderContext, state, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
         ScoringRewrite.this.checkMaxClauseCount(terms.size());
       }
       return true;
@@ -165,7 +165,7 @@
   /** Special implementation of BytesStartArray that keeps parallel arrays for boost and docFreq */
   static final class TermFreqBoostByteStart extends DirectBytesStartArray  {
     float[] boost;
-    PerReaderTermState[] termState;
+    TermContext[] termState;
     
     public TermFreqBoostByteStart(int initSize) {
       super(initSize);
@@ -175,7 +175,7 @@
     public int[] init() {
       final int[] ord = super.init();
       boost = new float[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_FLOAT)];
-      termState = new PerReaderTermState[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
+      termState = new TermContext[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
       assert termState.length >= ord.length && boost.length >= ord.length;
       return ord;
     }
@@ -185,7 +185,7 @@
       final int[] ord = super.grow();
       boost = ArrayUtil.grow(boost, ord.length);
       if (termState.length < ord.length) {
-        PerReaderTermState[] tmpTermState = new PerReaderTermState[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
+        TermContext[] tmpTermState = new TermContext[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
         System.arraycopy(termState, 0, tmpTermState, 0, termState.length);
         termState = tmpTermState;
       }     
diff --git a/lucene/src/java/org/apache/lucene/search/Similarity.java b/lucene/src/java/org/apache/lucene/search/Similarity.java
index e8ae33f..5a907fc 100644
--- a/lucene/src/java/org/apache/lucene/search/Similarity.java
+++ b/lucene/src/java/org/apache/lucene/search/Similarity.java
@@ -19,594 +19,111 @@
 
 
 import java.io.IOException;
-import java.util.Collection;
 
+import org.apache.lucene.document.IndexDocValuesField; // javadoc
 import org.apache.lucene.index.FieldInvertState;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Explanation.IDFExplanation;
-import org.apache.lucene.util.SmallFloat;
+import org.apache.lucene.index.IndexReader; // javadoc
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.Terms; // javadoc
+import org.apache.lucene.search.spans.SpanQuery; // javadoc
+import org.apache.lucene.util.SmallFloat; // javadoc
+import org.apache.lucene.util.TermContext;
 
 
 /** 
+ * Similarity defines the components of Lucene scoring.
+ * <p>
  * Expert: Scoring API.
- *
- * <p>Similarity defines the components of Lucene scoring.
- * Overriding computation of these components is a convenient
- * way to alter Lucene scoring.
- *
- * <p>Suggested reading:
- * <a href="http://nlp.stanford.edu/IR-book/html/htmledition/queries-as-vectors-1.html">
- * Introduction To Information Retrieval, Chapter 6</a>.
- *
- * <p>The following describes how Lucene scoring evolves from
- * underlying information retrieval models to (efficient) implementation.
- * We first brief on <i>VSM Score</i>, 
- * then derive from it <i>Lucene's Conceptual Scoring Formula</i>,
- * from which, finally, evolves <i>Lucene's Practical Scoring Function</i> 
- * (the latter is connected directly with Lucene classes and methods).    
- *
- * <p>Lucene combines
- * <a href="http://en.wikipedia.org/wiki/Standard_Boolean_model">
- * Boolean model (BM) of Information Retrieval</a>
- * with
- * <a href="http://en.wikipedia.org/wiki/Vector_Space_Model">
- * Vector Space Model (VSM) of Information Retrieval</a> -
- * documents "approved" by BM are scored by VSM.
- *
- * <p>In VSM, documents and queries are represented as
- * weighted vectors in a multi-dimensional space,
- * where each distinct index term is a dimension,
- * and weights are
- * <a href="http://en.wikipedia.org/wiki/Tfidf">Tf-idf</a> values.
- *
- * <p>VSM does not require weights to be <i>Tf-idf</i> values,
- * but <i>Tf-idf</i> values are believed to produce search results of high quality,
- * and so Lucene is using <i>Tf-idf</i>.
- * <i>Tf</i> and <i>Idf</i> are described in more detail below,
- * but for now, for completion, let's just say that
- * for given term <i>t</i> and document (or query) <i>x</i>,
- * <i>Tf(t,x)</i> varies with the number of occurrences of term <i>t</i> in <i>x</i>
- * (when one increases so does the other) and
- * <i>idf(t)</i> similarly varies with the inverse of the
- * number of index documents containing term <i>t</i>.
- *
- * <p><i>VSM score</i> of document <i>d</i> for query <i>q</i> is the
- * <a href="http://en.wikipedia.org/wiki/Cosine_similarity">
- * Cosine Similarity</a>
- * of the weighted query vectors <i>V(q)</i> and <i>V(d)</i>:
- *
- *  <br>&nbsp;<br>
- *  <table cellpadding="2" cellspacing="2" border="0" align="center">
- *    <tr><td>
- *    <table cellpadding="1" cellspacing="0" border="1" align="center">
- *      <tr><td>
- *      <table cellpadding="2" cellspacing="2" border="0" align="center">
- *        <tr>
- *          <td valign="middle" align="right" rowspan="1">
- *            cosine-similarity(q,d) &nbsp; = &nbsp;
- *          </td>
- *          <td valign="middle" align="center">
- *            <table>
- *               <tr><td align="center"><small>V(q)&nbsp;&middot;&nbsp;V(d)</small></td></tr>
- *               <tr><td align="center">&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;</td></tr>
- *               <tr><td align="center"><small>|V(q)|&nbsp;|V(d)|</small></td></tr>
- *            </table>
- *          </td>
- *        </tr>
- *      </table>
- *      </td></tr>
- *    </table>
- *    </td></tr>
- *    <tr><td>
- *    <center><font=-1><u>VSM Score</u></font></center>
- *    </td></tr>
- *  </table>
- *  <br>&nbsp;<br>
- *   
- *
- * Where <i>V(q)</i> &middot; <i>V(d)</i> is the
- * <a href="http://en.wikipedia.org/wiki/Dot_product">dot product</a>
- * of the weighted vectors,
- * and <i>|V(q)|</i> and <i>|V(d)|</i> are their
- * <a href="http://en.wikipedia.org/wiki/Euclidean_norm#Euclidean_norm">Euclidean norms</a>.
- *
- * <p>Note: the above equation can be viewed as the dot product of
- * the normalized weighted vectors, in the sense that dividing
- * <i>V(q)</i> by its euclidean norm is normalizing it to a unit vector.
- *
- * <p>Lucene refines <i>VSM score</i> for both search quality and usability:
- * <ul>
- *  <li>Normalizing <i>V(d)</i> to the unit vector is known to be problematic in that 
- *  it removes all document length information. 
- *  For some documents removing this info is probably ok, 
- *  e.g. a document made by duplicating a certain paragraph <i>10</i> times,
- *  especially if that paragraph is made of distinct terms. 
- *  But for a document which contains no duplicated paragraphs, 
- *  this might be wrong. 
- *  To avoid this problem, a different document length normalization 
- *  factor is used, which normalizes to a vector equal to or larger 
- *  than the unit vector: <i>doc-len-norm(d)</i>.
- *  </li>
- *
- *  <li>At indexing, users can specify that certain documents are more
- *  important than others, by assigning a document boost.
- *  For this, the score of each document is also multiplied by its boost value
- *  <i>doc-boost(d)</i>.
- *  </li>
- *
- *  <li>Lucene is field based, hence each query term applies to a single
- *  field, document length normalization is by the length of the certain field,
- *  and in addition to document boost there are also document fields boosts.
- *  </li>
- *
- *  <li>The same field can be added to a document during indexing several times,
- *  and so the boost of that field is the multiplication of the boosts of
- *  the separate additions (or parts) of that field within the document.
- *  </li>
- *
- *  <li>At search time users can specify boosts to each query, sub-query, and
- *  each query term, hence the contribution of a query term to the score of
- *  a document is multiplied by the boost of that query term <i>query-boost(q)</i>.
- *  </li>
- *
- *  <li>A document may match a multi term query without containing all
- *  the terms of that query (this is correct for some of the queries),
- *  and users can further reward documents matching more query terms
- *  through a coordination factor, which is usually larger when
- *  more terms are matched: <i>coord-factor(q,d)</i>.
- *  </li>
- * </ul>
- *
- * <p>Under the simplifying assumption of a single field in the index,
- * we get <i>Lucene's Conceptual scoring formula</i>:
- *
- *  <br>&nbsp;<br>
- *  <table cellpadding="2" cellspacing="2" border="0" align="center">
- *    <tr><td>
- *    <table cellpadding="1" cellspacing="0" border="1" align="center">
- *      <tr><td>
- *      <table cellpadding="2" cellspacing="2" border="0" align="center">
- *        <tr>
- *          <td valign="middle" align="right" rowspan="1">
- *            score(q,d) &nbsp; = &nbsp;
- *            <font color="#FF9933">coord-factor(q,d)</font> &middot; &nbsp;
- *            <font color="#CCCC00">query-boost(q)</font> &middot; &nbsp;
- *          </td>
- *          <td valign="middle" align="center">
- *            <table>
- *               <tr><td align="center"><small><font color="#993399">V(q)&nbsp;&middot;&nbsp;V(d)</font></small></td></tr>
- *               <tr><td align="center">&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;</td></tr>
- *               <tr><td align="center"><small><font color="#FF33CC">|V(q)|</font></small></td></tr>
- *            </table>
- *          </td>
- *          <td valign="middle" align="right" rowspan="1">
- *            &nbsp; &middot; &nbsp; <font color="#3399FF">doc-len-norm(d)</font>
- *            &nbsp; &middot; &nbsp; <font color="#3399FF">doc-boost(d)</font>
- *          </td>
- *        </tr>
- *      </table>
- *      </td></tr>
- *    </table>
- *    </td></tr>
- *    <tr><td>
- *    <center><font=-1><u>Lucene Conceptual Scoring Formula</u></font></center>
- *    </td></tr>
- *  </table>
- *  <br>&nbsp;<br>
- *
- * <p>The conceptual formula is a simplification in the sense that (1) terms and documents
- * are fielded and (2) boosts are usually per query term rather than per query.
- *
- * <p>We now describe how Lucene implements this conceptual scoring formula, and
- * derive from it <i>Lucene's Practical Scoring Function</i>.
- *  
- * <p>For efficient score computation some scoring components
- * are computed and aggregated in advance:
- *
- * <ul>
- *  <li><i>Query-boost</i> for the query (actually for each query term)
- *  is known when search starts.
- *  </li>
- *
- *  <li>Query Euclidean norm <i>|V(q)|</i> can be computed when search starts,
- *  as it is independent of the document being scored.
- *  From search optimization perspective, it is a valid question
- *  why bother to normalize the query at all, because all
- *  scored documents will be multiplied by the same <i>|V(q)|</i>,
- *  and hence documents ranks (their order by score) will not
- *  be affected by this normalization.
- *  There are two good reasons to keep this normalization:
- *  <ul>
- *   <li>Recall that
- *   <a href="http://en.wikipedia.org/wiki/Cosine_similarity">
- *   Cosine Similarity</a> can be used find how similar
- *   two documents are. One can use Lucene for e.g.
- *   clustering, and use a document as a query to compute
- *   its similarity to other documents.
- *   In this use case it is important that the score of document <i>d3</i>
- *   for query <i>d1</i> is comparable to the score of document <i>d3</i>
- *   for query <i>d2</i>. In other words, scores of a document for two
- *   distinct queries should be comparable.
- *   There are other applications that may require this.
- *   And this is exactly what normalizing the query vector <i>V(q)</i>
- *   provides: comparability (to a certain extent) of two or more queries.
- *   </li>
- *
- *   <li>Applying query normalization on the scores helps to keep the
- *   scores around the unit vector, hence preventing loss of score data
- *   because of floating point precision limitations.
- *   </li>
- *  </ul>
- *  </li>
- *
- *  <li>Document length norm <i>doc-len-norm(d)</i> and document
- *  boost <i>doc-boost(d)</i> are known at indexing time.
- *  They are computed in advance and their multiplication
- *  is saved as a single value in the index: <i>norm(d)</i>.
- *  (In the equations below, <i>norm(t in d)</i> means <i>norm(field(t) in doc d)</i>
- *  where <i>field(t)</i> is the field associated with term <i>t</i>.)
- *  </li>
- * </ul>
- *
- * <p><i>Lucene's Practical Scoring Function</i> is derived from the above.
- * The color codes demonstrate how it relates
- * to those of the <i>conceptual</i> formula:
- *
- * <P>
- * <table cellpadding="2" cellspacing="2" border="0" align="center">
- *  <tr><td>
- *  <table cellpadding="" cellspacing="2" border="2" align="center">
- *  <tr><td>
- *   <table cellpadding="2" cellspacing="2" border="0" align="center">
- *   <tr>
- *     <td valign="middle" align="right" rowspan="1">
- *       score(q,d) &nbsp; = &nbsp;
- *       <A HREF="#formula_coord"><font color="#FF9933">coord(q,d)</font></A> &nbsp;&middot;&nbsp;
- *       <A HREF="#formula_queryNorm"><font color="#FF33CC">queryNorm(q)</font></A> &nbsp;&middot;&nbsp;
- *     </td>
- *     <td valign="bottom" align="center" rowspan="1">
- *       <big><big><big>&sum;</big></big></big>
- *     </td>
- *     <td valign="middle" align="right" rowspan="1">
- *       <big><big>(</big></big>
- *       <A HREF="#formula_tf"><font color="#993399">tf(t in d)</font></A> &nbsp;&middot;&nbsp;
- *       <A HREF="#formula_idf"><font color="#993399">idf(t)</font></A><sup>2</sup> &nbsp;&middot;&nbsp;
- *       <A HREF="#formula_termBoost"><font color="#CCCC00">t.getBoost()</font></A>&nbsp;&middot;&nbsp;
- *       <A HREF="#formula_norm"><font color="#3399FF">norm(t,d)</font></A>
- *       <big><big>)</big></big>
- *     </td>
- *   </tr>
- *   <tr valigh="top">
- *    <td></td>
- *    <td align="center"><small>t in q</small></td>
- *    <td></td>
- *   </tr>
- *   </table>
- *  </td></tr>
- *  </table>
- * </td></tr>
- * <tr><td>
- *  <center><font=-1><u>Lucene Practical Scoring Function</u></font></center>
- * </td></tr>
- * </table>
- *
- * <p> where
+ * <p>
+ * This is a low-level API, you should only extend this API if you want to implement 
+ * an information retrieval <i>model</i>.  If you are instead looking for a convenient way 
+ * to alter Lucene's scoring, consider extending a higher-level implementation
+ * such as {@link TFIDFSimilarity}, which implements the vector space model with this API, or 
+ * just tweaking the default implementation: {@link DefaultSimilarity}.
+ * <p>
+ * Similarity determines how Lucene weights terms, and Lucene interacts with
+ * this class at both <a href="#indextime">index-time</a> and 
+ * <a href="#querytime">query-time</a>.
+ * <p>
+ * <a name="indextime"/>
+ * At indexing time, the indexer calls {@link #computeNorm(FieldInvertState)}, allowing
+ * the Similarity implementation to return a per-document byte for the field that will 
+ * be later accessible via {@link IndexReader#norms(String)}.  Lucene makes no assumption
+ * about what is in this byte, but it is most useful for encoding length normalization 
+ * information.
+ * <p>
+ * Implementations should carefully consider how the normalization byte is encoded: while
+ * Lucene's classical {@link TFIDFSimilarity} encodes a combination of index-time boost
+ * and length normalization information with {@link SmallFloat}, this might not be suitable
+ * for all purposes.
+ * <p>
+ * Many formulas require the use of average document length, which can be computed via a 
+ * combination of {@link Terms#getSumTotalTermFreq()} and {@link IndexReader#maxDoc()},
+ * <p>
+ * Because index-time boost is handled entirely at the application level anyway,
+ * an application can alternatively store the index-time boost separately using an 
+ * {@link IndexDocValuesField}, and access this at query-time with 
+ * {@link IndexReader#docValues(String)}.
+ * <p>
+ * Finally, using index-time boosts (either via folding into the normalization byte or
+ * via IndexDocValues), is an inefficient way to boost the scores of different fields if the
+ * boost will be the same for every document, instead the Similarity can simply take a constant
+ * boost parameter <i>C</i>, and the SimilarityProvider can return different instances with
+ * different boosts depending upon field name.
+ * <p>
+ * <a name="querytime"/>
+ * At query-time, Queries interact with the Similarity via these steps:
  * <ol>
- *    <li>
- *      <A NAME="formula_tf"></A>
- *      <b><i>tf(t in d)</i></b>
- *      correlates to the term's <i>frequency</i>,
- *      defined as the number of times term <i>t</i> appears in the currently scored document <i>d</i>.
- *      Documents that have more occurrences of a given term receive a higher score.
- *      Note that <i>tf(t in q)</i> is assumed to be <i>1</i> and therefore it does not appear in this equation,
- *      However if a query contains twice the same term, there will be
- *      two term-queries with that same term and hence the computation would still be correct (although
- *      not very efficient).
- *      The default computation for <i>tf(t in d)</i> in
- *      {@link org.apache.lucene.search.DefaultSimilarity#tf(float) DefaultSimilarity} is:
- *
- *      <br>&nbsp;<br>
- *      <table cellpadding="2" cellspacing="2" border="0" align="center">
- *        <tr>
- *          <td valign="middle" align="right" rowspan="1">
- *            {@link org.apache.lucene.search.DefaultSimilarity#tf(float) tf(t in d)} &nbsp; = &nbsp;
- *          </td>
- *          <td valign="top" align="center" rowspan="1">
- *               frequency<sup><big>&frac12;</big></sup>
- *          </td>
- *        </tr>
- *      </table>
- *      <br>&nbsp;<br>
- *    </li>
- *
- *    <li>
- *      <A NAME="formula_idf"></A>
- *      <b><i>idf(t)</i></b> stands for Inverse Document Frequency. This value
- *      correlates to the inverse of <i>docFreq</i>
- *      (the number of documents in which the term <i>t</i> appears).
- *      This means rarer terms give higher contribution to the total score.
- *      <i>idf(t)</i> appears for <i>t</i> in both the query and the document,
- *      hence it is squared in the equation.
- *      The default computation for <i>idf(t)</i> in
- *      {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) DefaultSimilarity} is:
- *
- *      <br>&nbsp;<br>
- *      <table cellpadding="2" cellspacing="2" border="0" align="center">
- *        <tr>
- *          <td valign="middle" align="right">
- *            {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) idf(t)}&nbsp; = &nbsp;
- *          </td>
- *          <td valign="middle" align="center">
- *            1 + log <big>(</big>
- *          </td>
- *          <td valign="middle" align="center">
- *            <table>
- *               <tr><td align="center"><small>numDocs</small></td></tr>
- *               <tr><td align="center">&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;</td></tr>
- *               <tr><td align="center"><small>docFreq+1</small></td></tr>
- *            </table>
- *          </td>
- *          <td valign="middle" align="center">
- *            <big>)</big>
- *          </td>
- *        </tr>
- *      </table>
- *      <br>&nbsp;<br>
- *    </li>
- *
- *    <li>
- *      <A NAME="formula_coord"></A>
- *      <b><i>coord(q,d)</i></b>
- *      is a score factor based on how many of the query terms are found in the specified document.
- *      Typically, a document that contains more of the query's terms will receive a higher score
- *      than another document with fewer query terms.
- *      This is a search time factor computed in
- *      {@link SimilarityProvider#coord(int, int) coord(q,d)}
- *      by the SimilarityProvider in effect at search time.
- *      <br>&nbsp;<br>
- *    </li>
- *
- *    <li><b>
- *      <A NAME="formula_queryNorm"></A>
- *      <i>queryNorm(q)</i>
- *      </b>
- *      is a normalizing factor used to make scores between queries comparable.
- *      This factor does not affect document ranking (since all ranked documents are multiplied by the same factor),
- *      but rather just attempts to make scores from different queries (or even different indexes) comparable.
- *      This is a search time factor computed by the SimilarityProvider in effect at search time.
- *
- *      The default computation in
- *      {@link org.apache.lucene.search.DefaultSimilarityProvider#queryNorm(float) DefaultSimilarityProvider}
- *      produces a <a href="http://en.wikipedia.org/wiki/Euclidean_norm#Euclidean_norm">Euclidean norm</a>:
- *      <br>&nbsp;<br>
- *      <table cellpadding="1" cellspacing="0" border="0" align="center">
- *        <tr>
- *          <td valign="middle" align="right" rowspan="1">
- *            queryNorm(q)  &nbsp; = &nbsp;
- *            {@link org.apache.lucene.search.DefaultSimilarityProvider#queryNorm(float) queryNorm(sumOfSquaredWeights)}
- *            &nbsp; = &nbsp;
- *          </td>
- *          <td valign="middle" align="center" rowspan="1">
- *            <table>
- *               <tr><td align="center"><big>1</big></td></tr>
- *               <tr><td align="center"><big>
- *                  &ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;
- *               </big></td></tr>
- *               <tr><td align="center">sumOfSquaredWeights<sup><big>&frac12;</big></sup></td></tr>
- *            </table>
- *          </td>
- *        </tr>
- *      </table>
- *      <br>&nbsp;<br>
- *
- *      The sum of squared weights (of the query terms) is
- *      computed by the query {@link org.apache.lucene.search.Weight} object.
- *      For example, a {@link org.apache.lucene.search.BooleanQuery}
- *      computes this value as:
- *
- *      <br>&nbsp;<br>
- *      <table cellpadding="1" cellspacing="0" border="0"n align="center">
- *        <tr>
- *          <td valign="middle" align="right" rowspan="1">
- *            {@link org.apache.lucene.search.Weight#sumOfSquaredWeights() sumOfSquaredWeights} &nbsp; = &nbsp;
- *            {@link org.apache.lucene.search.Query#getBoost() q.getBoost()} <sup><big>2</big></sup>
- *            &nbsp;&middot;&nbsp;
- *          </td>
- *          <td valign="bottom" align="center" rowspan="1">
- *            <big><big><big>&sum;</big></big></big>
- *          </td>
- *          <td valign="middle" align="right" rowspan="1">
- *            <big><big>(</big></big>
- *            <A HREF="#formula_idf">idf(t)</A> &nbsp;&middot;&nbsp;
- *            <A HREF="#formula_termBoost">t.getBoost()</A>
- *            <big><big>) <sup>2</sup> </big></big>
- *          </td>
- *        </tr>
- *        <tr valigh="top">
- *          <td></td>
- *          <td align="center"><small>t in q</small></td>
- *          <td></td>
- *        </tr>
- *      </table>
- *      <br>&nbsp;<br>
- *
- *    </li>
- *
- *    <li>
- *      <A NAME="formula_termBoost"></A>
- *      <b><i>t.getBoost()</i></b>
- *      is a search time boost of term <i>t</i> in the query <i>q</i> as
- *      specified in the query text
- *      (see <A HREF="../../../../../../queryparsersyntax.html#Boosting a Term">query syntax</A>),
- *      or as set by application calls to
- *      {@link org.apache.lucene.search.Query#setBoost(float) setBoost()}.
- *      Notice that there is really no direct API for accessing a boost of one term in a multi term query,
- *      but rather multi terms are represented in a query as multi
- *      {@link org.apache.lucene.search.TermQuery TermQuery} objects,
- *      and so the boost of a term in the query is accessible by calling the sub-query
- *      {@link org.apache.lucene.search.Query#getBoost() getBoost()}.
- *      <br>&nbsp;<br>
- *    </li>
- *
- *    <li>
- *      <A NAME="formula_norm"></A>
- *      <b><i>norm(t,d)</i></b> encapsulates a few (indexing time) boost and length factors:
- *
- *      <ul>
- *        <li><b>Document boost</b> - set by calling
- *        {@link org.apache.lucene.document.Document#setBoost(float) doc.setBoost()}
- *        before adding the document to the index.
- *        </li>
- *        <li><b>Field boost</b> - set by calling
- *        {@link org.apache.lucene.document.Fieldable#setBoost(float) field.setBoost()}
- *        before adding the field to a document.
- *        </li>
- *        <li><b>lengthNorm</b> - computed
- *        when the document is added to the index in accordance with the number of tokens
- *        of this field in the document, so that shorter fields contribute more to the score.
- *        LengthNorm is computed by the Similarity class in effect at indexing.
- *        </li>
- *      </ul>
- *      The {@link #computeNorm} method is responsible for
- *      combining all of these factors into a single float.
- *
- *      <p>
- *      When a document is added to the index, all the above factors are multiplied.
- *      If the document has multiple fields with the same name, all their boosts are multiplied together:
- *
- *      <br>&nbsp;<br>
- *      <table cellpadding="1" cellspacing="0" border="0"n align="center">
- *        <tr>
- *          <td valign="middle" align="right" rowspan="1">
- *            norm(t,d) &nbsp; = &nbsp;
- *            {@link org.apache.lucene.document.Document#getBoost() doc.getBoost()}
- *            &nbsp;&middot;&nbsp;
- *            lengthNorm
- *            &nbsp;&middot;&nbsp;
- *          </td>
- *          <td valign="bottom" align="center" rowspan="1">
- *            <big><big><big>&prod;</big></big></big>
- *          </td>
- *          <td valign="middle" align="right" rowspan="1">
- *            {@link org.apache.lucene.document.Fieldable#getBoost() f.getBoost}()
- *          </td>
- *        </tr>
- *        <tr valigh="top">
- *          <td></td>
- *          <td align="center"><small>field <i><b>f</b></i> in <i>d</i> named as <i><b>t</b></i></small></td>
- *          <td></td>
- *        </tr>
- *      </table>
- *      <br>&nbsp;<br>
- *      However the resulted <i>norm</i> value is {@link #encodeNormValue(float) encoded} as a single byte
- *      before being stored.
- *      At search time, the norm byte value is read from the index
- *      {@link org.apache.lucene.store.Directory directory} and
- *      {@link #decodeNormValue(byte) decoded} back to a float <i>norm</i> value.
- *      This encoding/decoding, while reducing index size, comes with the price of
- *      precision loss - it is not guaranteed that <i>decode(encode(x)) = x</i>.
- *      For instance, <i>decode(encode(0.89)) = 0.75</i>.
- *      <br>&nbsp;<br>
- *      Compression of norm values to a single byte saves memory at search time, 
- *      because once a field is referenced at search time, its norms - for 
- *      all documents - are maintained in memory.
- *      <br>&nbsp;<br>
- *      The rationale supporting such lossy compression of norm values is that
- *      given the difficulty (and inaccuracy) of users to express their true information
- *      need by a query, only big differences matter.
- *      <br>&nbsp;<br>
- *      Last, note that search time is too late to modify this <i>norm</i> part of scoring, e.g. by
- *      using a different {@link Similarity} for search.
- *      <br>&nbsp;<br>
- *    </li>
+ *   <li>The {@link #computeStats(IndexSearcher, String, float, TermContext...)} method is called a single time,
+ *       allowing the implementation to compute any statistics (such as IDF, average document length, etc)
+ *       across <i>the entire collection</i>. The {@link TermContext}s passed in are already positioned
+ *       to the terms involved with the raw statistics involved, so a Similarity can freely use any combination
+ *       of term statistics without causing any additional I/O. Lucene makes no assumption about what is 
+ *       stored in the returned {@link Similarity.Stats} object.
+ *   <li>The query normalization process occurs a single time: {@link Similarity.Stats#getValueForNormalization()}
+ *       is called for each query leaf node, {@link SimilarityProvider#queryNorm(float)} is called for the top-level
+ *       query, and finally {@link Similarity.Stats#normalize(float, float)} passes down the normalization value
+ *       and any top-level boosts (e.g. from enclosing {@link BooleanQuery}s).
+ *   <li>For each segment in the index, the Query creates a {@link #exactDocScorer(Stats, String, IndexReader.AtomicReaderContext)}
+ *       (for queries with exact frequencies such as TermQuerys and exact PhraseQueries) or a 
+ *       {@link #sloppyDocScorer(Stats, String, IndexReader.AtomicReaderContext)} (for queries with sloppy frequencies such as
+ *       SpanQuerys and sloppy PhraseQueries). The score() method is called for each matching document.
  * </ol>
+ * <p>
+ * <a name="explaintime"/>
+ * When {@link IndexSearcher#explain(Query, int)} is called, queries consult the Similarity's DocScorer for an 
+ * explanation of how it computed its score. The query passes in a the document id and an explanation of how the frequency
+ * was computed.
  *
  * @see org.apache.lucene.index.IndexWriterConfig#setSimilarityProvider(SimilarityProvider)
  * @see IndexSearcher#setSimilarityProvider(SimilarityProvider)
+ * @lucene.experimental
  */
 public abstract class Similarity {
   
   public static final int NO_DOC_ID_PROVIDED = -1;
 
-  /** Cache of decoded bytes. */
-  private static final float[] NORM_TABLE = new float[256];
-
-  static {
-    for (int i = 0; i < 256; i++)
-      NORM_TABLE[i] = SmallFloat.byte315ToFloat((byte)i);
-  }
-
-  /** Decodes a normalization factor stored in an index.
-   * @see #encodeNormValue(float)
-   */
-  public float decodeNormValue(byte b) {
-    return NORM_TABLE[b & 0xFF];  // & 0xFF maps negative bytes to positive above 127
-  }
-
   /**
    * Computes the normalization value for a field, given the accumulated
    * state of term processing for this field (see {@link FieldInvertState}).
    * 
-   * <p>Implementations should calculate a float value based on the field
+   * <p>Implementations should calculate a byte value based on the field
    * state and then return that value.
    *
    * <p>Matches in longer fields are less precise, so implementations of this
    * method usually return smaller values when <code>state.getLength()</code> is large,
    * and larger values when <code>state.getLength()</code> is small.
    * 
-   * <p>Note that the return values are computed under 
-   * {@link org.apache.lucene.index.IndexWriter#addDocument(org.apache.lucene.document.Document)} 
-   * and then stored using
-   * {@link #encodeNormValue(float)}.  
-   * Thus they have limited precision, and documents
-   * must be re-indexed if this method is altered.
-   *
    * @lucene.experimental
    * 
    * @param state current processing state for this field
-   * @return the calculated float norm
+   * @return the calculated byte norm
    */
-  public abstract float computeNorm(FieldInvertState state);
-  
-  /** Encodes a normalization factor for storage in an index.
-   *
-   * <p>The encoding uses a three-bit mantissa, a five-bit exponent, and
-   * the zero-exponent point at 15, thus
-   * representing values from around 7x10^9 to 2x10^-9 with about one
-   * significant decimal digit of accuracy.  Zero is also represented.
-   * Negative numbers are rounded up to zero.  Values too large to represent
-   * are rounded down to the largest representable value.  Positive values too
-   * small to represent are rounded up to the smallest positive representable
-   * value.
-   * @see org.apache.lucene.document.Field#setBoost(float)
-   * @see org.apache.lucene.util.SmallFloat
-   */
-  public byte encodeNormValue(float f) {
-    return SmallFloat.floatToByte315(f);
-  }
-  
-  /** Computes a score factor based on a term or phrase's frequency in a
-   * document.  This value is multiplied by the {@link #idf(int, int)}
-   * factor for each term in the query and these products are then summed to
-   * form the initial score for a document.
-   *
-   * <p>Terms and phrases repeated in a document indicate the topic of the
-   * document, so implementations of this method usually return larger values
-   * when <code>freq</code> is large, and smaller values when <code>freq</code>
-   * is small.
-   *
-   * <p>The default implementation calls {@link #tf(float)}.
-   *
-   * @param freq the frequency of a term within a document
-   * @return a score factor based on a term's within-document frequency
-   */
-  public float tf(int freq) {
-    return tf((float)freq);
-  }
+  public abstract byte computeNorm(FieldInvertState state);
 
   /** Computes the amount of a sloppy phrase match, based on an edit distance.
    * This value is summed for each sloppy phrase match in a document to form
-   * the frequency that is passed to {@link #tf(float)}.
+   * the frequency to be used in scoring instead of the exact term count.
    *
    * <p>A phrase match with a small edit distance to a document passage more
    * closely matches the document, so implementations of this method usually
@@ -619,124 +136,6 @@
    */
   public abstract float sloppyFreq(int distance);
 
-  /** Computes a score factor based on a term or phrase's frequency in a
-   * document.  This value is multiplied by the {@link #idf(int, int)}
-   * factor for each term in the query and these products are then summed to
-   * form the initial score for a document.
-   *
-   * <p>Terms and phrases repeated in a document indicate the topic of the
-   * document, so implementations of this method usually return larger values
-   * when <code>freq</code> is large, and smaller values when <code>freq</code>
-   * is small.
-   *
-   * @param freq the frequency of a term within a document
-   * @return a score factor based on a term's within-document frequency
-   */
-  public abstract float tf(float freq);
-
-  /**
-   * Computes a score factor for a simple term and returns an explanation
-   * for that score factor.
-   * 
-   * <p>
-   * The default implementation uses:
-   * 
-   * <pre>
-   * idf(docFreq, searcher.maxDoc());
-   * </pre>
-   * 
-   * Note that {@link IndexSearcher#maxDoc()} is used instead of
-   * {@link org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also 
-   * {@link IndexSearcher#docFreq(Term)} is used, and when the latter 
-   * is inaccurate, so is {@link IndexSearcher#maxDoc()}, and in the same direction.
-   * In addition, {@link IndexSearcher#maxDoc()} is more efficient to compute
-   *   
-   * @param term the term in question
-   * @param searcher the document collection being searched
-   * @param docFreq externally computed docFreq for this term
-   * @return an IDFExplain object that includes both an idf score factor 
-             and an explanation for the term.
-   * @throws IOException
-   */
-  public IDFExplanation idfExplain(final Term term, final IndexSearcher searcher, int docFreq) throws IOException {
-    final int df = docFreq;
-    final int max = searcher.maxDoc();
-    final float idf = idf(df, max);
-    return new IDFExplanation() {
-        @Override
-        public String explain() {
-          return "idf(docFreq=" + df +
-          ", maxDocs=" + max + ")";
-        }
-        @Override
-        public float getIdf() {
-          return idf;
-        }};
-  }
-
-  /**
-   * This method forwards to {@link
-   * #idfExplain(Term,IndexSearcher,int)} by passing
-   * <code>searcher.docFreq(term)</code> as the docFreq.
-   */
-  public IDFExplanation idfExplain(final Term term, final IndexSearcher searcher) throws IOException {
-    return idfExplain(term, searcher, searcher.docFreq(term));
-  }
-
-  /**
-   * Computes a score factor for a phrase.
-   * 
-   * <p>
-   * The default implementation sums the idf factor for
-   * each term in the phrase.
-   * 
-   * @param terms the terms in the phrase
-   * @param searcher the document collection being searched
-   * @return an IDFExplain object that includes both an idf 
-   *         score factor for the phrase and an explanation 
-   *         for each term.
-   * @throws IOException
-   */
-  public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException {
-    final int max = searcher.maxDoc();
-    float idf = 0.0f;
-    final StringBuilder exp = new StringBuilder();
-    for (final Term term : terms ) {
-      final int df = searcher.docFreq(term);
-      idf += idf(df, max);
-      exp.append(" ");
-      exp.append(term.text());
-      exp.append("=");
-      exp.append(df);
-    }
-    final float fIdf = idf;
-    return new IDFExplanation() {
-      @Override
-      public float getIdf() {
-        return fIdf;
-      }
-      @Override
-      public String explain() {
-        return exp.toString();
-      }
-    };
-  }
-
-  /** Computes a score factor based on a term's document frequency (the number
-   * of documents which contain the term).  This value is multiplied by the
-   * {@link #tf(int)} factor for each term in the query and these products are
-   * then summed to form the initial score for a document.
-   *
-   * <p>Terms that occur in fewer documents are better indicators of topic, so
-   * implementations of this method usually return larger values for rare terms,
-   * and smaller values for common terms.
-   *
-   * @param docFreq the number of documents which contain the term
-   * @param numDocs the total number of documents in the collection
-   * @return a score factor based on the term's document frequency
-   */
-  public abstract float idf(int docFreq, int numDocs);
-
   /**
    * Calculate a scoring factor based on the data in the payload.  Overriding implementations
    * are responsible for interpreting what is in the payload.  Lucene makes no assumptions about
@@ -758,5 +157,101 @@
   {
     return 1;
   }
-
+  
+  /**
+   * Compute any collection-level stats (e.g. IDF, average document length, etc) needed for scoring a query.
+   */
+  public abstract Stats computeStats(IndexSearcher searcher, String fieldName, float queryBoost, TermContext... termContexts) throws IOException;
+  
+  /**
+   * returns a new {@link Similarity.ExactDocScorer}.
+   */
+  public abstract ExactDocScorer exactDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException;
+  
+  /**
+   * returns a new {@link Similarity.SloppyDocScorer}.
+   */
+  public abstract SloppyDocScorer sloppyDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException;
+  
+  /**
+   * API for scoring exact queries such as {@link TermQuery} and 
+   * exact {@link PhraseQuery}.
+   * <p>
+   * Term frequencies are integers (the term or phrase's tf)
+   */
+  public abstract class ExactDocScorer {
+    /**
+     * Score a single document
+     * @param doc document id
+     * @param freq term frequency
+     * @return document's score
+     */
+    public abstract float score(int doc, int freq);
+    
+    /**
+     * Explain the score for a single document
+     * @param doc document id
+     * @param freq Explanation of how the term frequency was computed
+     * @return document's score
+     */
+    public Explanation explain(int doc, Explanation freq) {
+      Explanation result = new Explanation(score(doc, (int)freq.getValue()), 
+          "score(doc=" + doc + ",freq=" + freq.getValue() +"), with freq of:");
+      result.addDetail(freq);
+      return result;
+    }
+  }
+  
+  /**
+   * API for scoring "sloppy" queries such as {@link SpanQuery} and 
+   * sloppy {@link PhraseQuery}.
+   * <p>
+   * Term frequencies are floating point values.
+   */
+  public abstract class SloppyDocScorer {
+    /**
+     * Score a single document
+     * @param doc document id
+     * @param freq sloppy term frequency
+     * @return document's score
+     */
+    public abstract float score(int doc, float freq);
+    
+    /**
+     * Explain the score for a single document
+     * @param doc document id
+     * @param freq Explanation of how the sloppy term frequency was computed
+     * @return document's score
+     */
+    public Explanation explain(int doc, Explanation freq) {
+      Explanation result = new Explanation(score(doc, freq.getValue()), 
+          "score(doc=" + doc + ",freq=" + freq.getValue() +"), with freq of:");
+      result.addDetail(freq);
+      return result;
+    }
+  }
+  
+  /** Stores the statistics for the indexed collection. This abstract
+   * implementation is empty; descendants of {@code Similarity} should
+   * subclass {@code Stats} and define the statistics they require in the
+   * subclass. Examples include idf, average field length, etc.
+   */
+  public static abstract class Stats {
+    
+    /** The value for normalization of contained query clauses (e.g. sum of squared weights).
+     * <p>
+     * NOTE: a Similarity implementation might not use any query normalization at all,
+     * its not required. However, if it wants to participate in query normalization,
+     * it can return a value here.
+     */
+    public abstract float getValueForNormalization();
+    
+    /** Assigns the query normalization factor and boost from parent queries to this.
+     * <p>
+     * NOTE: a Similarity implementation might not use this normalized value at all,
+     * its not required. However, its usually a good idea to at least incorporate 
+     * the topLevelBoost (e.g. from an outer BooleanQuery) into its score.
+     */
+    public abstract void normalize(float queryNorm, float topLevelBoost);
+  }
 }
diff --git a/lucene/src/java/org/apache/lucene/search/SloppyPhraseScorer.java b/lucene/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
index 381518b..5252c55 100644
--- a/lucene/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
+++ b/lucene/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
@@ -25,11 +25,13 @@
     private PhrasePositions repeats[];
     private PhrasePositions tmpPos[]; // for flipping repeating pps.
     private boolean checkedRepeats;
-
+    private final Similarity similarity;
+    
     SloppyPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings, Similarity similarity,
-                       int slop, byte[] norms) {
-        super(weight, postings, similarity, norms);
+                       int slop, Similarity.SloppyDocScorer docScorer) throws IOException {
+        super(weight, postings, docScorer);
         this.slop = slop;
+        this.similarity = similarity;
     }
 
     /**
diff --git a/lucene/src/java/org/apache/lucene/search/TFIDFSimilarity.java b/lucene/src/java/org/apache/lucene/search/TFIDFSimilarity.java
new file mode 100644
index 0000000..abc8e51
--- /dev/null
+++ b/lucene/src/java/org/apache/lucene/search/TFIDFSimilarity.java
@@ -0,0 +1,831 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.TermContext;
+import org.apache.lucene.util.SmallFloat;
+
+
+/**
+ * Implementation of {@link Similarity} with the Vector Space Model.
+ * <p>
+ * Expert: Scoring API.
+ * <p>TFIDFSimilarity defines the components of Lucene scoring.
+ * Overriding computation of these components is a convenient
+ * way to alter Lucene scoring.
+ *
+ * <p>Suggested reading:
+ * <a href="http://nlp.stanford.edu/IR-book/html/htmledition/queries-as-vectors-1.html">
+ * Introduction To Information Retrieval, Chapter 6</a>.
+ *
+ * <p>The following describes how Lucene scoring evolves from
+ * underlying information retrieval models to (efficient) implementation.
+ * We first brief on <i>VSM Score</i>, 
+ * then derive from it <i>Lucene's Conceptual Scoring Formula</i>,
+ * from which, finally, evolves <i>Lucene's Practical Scoring Function</i> 
+ * (the latter is connected directly with Lucene classes and methods).    
+ *
+ * <p>Lucene combines
+ * <a href="http://en.wikipedia.org/wiki/Standard_Boolean_model">
+ * Boolean model (BM) of Information Retrieval</a>
+ * with
+ * <a href="http://en.wikipedia.org/wiki/Vector_Space_Model">
+ * Vector Space Model (VSM) of Information Retrieval</a> -
+ * documents "approved" by BM are scored by VSM.
+ *
+ * <p>In VSM, documents and queries are represented as
+ * weighted vectors in a multi-dimensional space,
+ * where each distinct index term is a dimension,
+ * and weights are
+ * <a href="http://en.wikipedia.org/wiki/Tfidf">Tf-idf</a> values.
+ *
+ * <p>VSM does not require weights to be <i>Tf-idf</i> values,
+ * but <i>Tf-idf</i> values are believed to produce search results of high quality,
+ * and so Lucene is using <i>Tf-idf</i>.
+ * <i>Tf</i> and <i>Idf</i> are described in more detail below,
+ * but for now, for completion, let's just say that
+ * for given term <i>t</i> and document (or query) <i>x</i>,
+ * <i>Tf(t,x)</i> varies with the number of occurrences of term <i>t</i> in <i>x</i>
+ * (when one increases so does the other) and
+ * <i>idf(t)</i> similarly varies with the inverse of the
+ * number of index documents containing term <i>t</i>.
+ *
+ * <p><i>VSM score</i> of document <i>d</i> for query <i>q</i> is the
+ * <a href="http://en.wikipedia.org/wiki/Cosine_similarity">
+ * Cosine Similarity</a>
+ * of the weighted query vectors <i>V(q)</i> and <i>V(d)</i>:
+ *
+ *  <br>&nbsp;<br>
+ *  <table cellpadding="2" cellspacing="2" border="0" align="center">
+ *    <tr><td>
+ *    <table cellpadding="1" cellspacing="0" border="1" align="center">
+ *      <tr><td>
+ *      <table cellpadding="2" cellspacing="2" border="0" align="center">
+ *        <tr>
+ *          <td valign="middle" align="right" rowspan="1">
+ *            cosine-similarity(q,d) &nbsp; = &nbsp;
+ *          </td>
+ *          <td valign="middle" align="center">
+ *            <table>
+ *               <tr><td align="center"><small>V(q)&nbsp;&middot;&nbsp;V(d)</small></td></tr>
+ *               <tr><td align="center">&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;</td></tr>
+ *               <tr><td align="center"><small>|V(q)|&nbsp;|V(d)|</small></td></tr>
+ *            </table>
+ *          </td>
+ *        </tr>
+ *      </table>
+ *      </td></tr>
+ *    </table>
+ *    </td></tr>
+ *    <tr><td>
+ *    <center><font=-1><u>VSM Score</u></font></center>
+ *    </td></tr>
+ *  </table>
+ *  <br>&nbsp;<br>
+ *   
+ *
+ * Where <i>V(q)</i> &middot; <i>V(d)</i> is the
+ * <a href="http://en.wikipedia.org/wiki/Dot_product">dot product</a>
+ * of the weighted vectors,
+ * and <i>|V(q)|</i> and <i>|V(d)|</i> are their
+ * <a href="http://en.wikipedia.org/wiki/Euclidean_norm#Euclidean_norm">Euclidean norms</a>.
+ *
+ * <p>Note: the above equation can be viewed as the dot product of
+ * the normalized weighted vectors, in the sense that dividing
+ * <i>V(q)</i> by its euclidean norm is normalizing it to a unit vector.
+ *
+ * <p>Lucene refines <i>VSM score</i> for both search quality and usability:
+ * <ul>
+ *  <li>Normalizing <i>V(d)</i> to the unit vector is known to be problematic in that 
+ *  it removes all document length information. 
+ *  For some documents removing this info is probably ok, 
+ *  e.g. a document made by duplicating a certain paragraph <i>10</i> times,
+ *  especially if that paragraph is made of distinct terms. 
+ *  But for a document which contains no duplicated paragraphs, 
+ *  this might be wrong. 
+ *  To avoid this problem, a different document length normalization 
+ *  factor is used, which normalizes to a vector equal to or larger 
+ *  than the unit vector: <i>doc-len-norm(d)</i>.
+ *  </li>
+ *
+ *  <li>At indexing, users can specify that certain documents are more
+ *  important than others, by assigning a document boost.
+ *  For this, the score of each document is also multiplied by its boost value
+ *  <i>doc-boost(d)</i>.
+ *  </li>
+ *
+ *  <li>Lucene is field based, hence each query term applies to a single
+ *  field, document length normalization is by the length of the certain field,
+ *  and in addition to document boost there are also document fields boosts.
+ *  </li>
+ *
+ *  <li>The same field can be added to a document during indexing several times,
+ *  and so the boost of that field is the multiplication of the boosts of
+ *  the separate additions (or parts) of that field within the document.
+ *  </li>
+ *
+ *  <li>At search time users can specify boosts to each query, sub-query, and
+ *  each query term, hence the contribution of a query term to the score of
+ *  a document is multiplied by the boost of that query term <i>query-boost(q)</i>.
+ *  </li>
+ *
+ *  <li>A document may match a multi term query without containing all
+ *  the terms of that query (this is correct for some of the queries),
+ *  and users can further reward documents matching more query terms
+ *  through a coordination factor, which is usually larger when
+ *  more terms are matched: <i>coord-factor(q,d)</i>.
+ *  </li>
+ * </ul>
+ *
+ * <p>Under the simplifying assumption of a single field in the index,
+ * we get <i>Lucene's Conceptual scoring formula</i>:
+ *
+ *  <br>&nbsp;<br>
+ *  <table cellpadding="2" cellspacing="2" border="0" align="center">
+ *    <tr><td>
+ *    <table cellpadding="1" cellspacing="0" border="1" align="center">
+ *      <tr><td>
+ *      <table cellpadding="2" cellspacing="2" border="0" align="center">
+ *        <tr>
+ *          <td valign="middle" align="right" rowspan="1">
+ *            score(q,d) &nbsp; = &nbsp;
+ *            <font color="#FF9933">coord-factor(q,d)</font> &middot; &nbsp;
+ *            <font color="#CCCC00">query-boost(q)</font> &middot; &nbsp;
+ *          </td>
+ *          <td valign="middle" align="center">
+ *            <table>
+ *               <tr><td align="center"><small><font color="#993399">V(q)&nbsp;&middot;&nbsp;V(d)</font></small></td></tr>
+ *               <tr><td align="center">&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;</td></tr>
+ *               <tr><td align="center"><small><font color="#FF33CC">|V(q)|</font></small></td></tr>
+ *            </table>
+ *          </td>
+ *          <td valign="middle" align="right" rowspan="1">
+ *            &nbsp; &middot; &nbsp; <font color="#3399FF">doc-len-norm(d)</font>
+ *            &nbsp; &middot; &nbsp; <font color="#3399FF">doc-boost(d)</font>
+ *          </td>
+ *        </tr>
+ *      </table>
+ *      </td></tr>
+ *    </table>
+ *    </td></tr>
+ *    <tr><td>
+ *    <center><font=-1><u>Lucene Conceptual Scoring Formula</u></font></center>
+ *    </td></tr>
+ *  </table>
+ *  <br>&nbsp;<br>
+ *
+ * <p>The conceptual formula is a simplification in the sense that (1) terms and documents
+ * are fielded and (2) boosts are usually per query term rather than per query.
+ *
+ * <p>We now describe how Lucene implements this conceptual scoring formula, and
+ * derive from it <i>Lucene's Practical Scoring Function</i>.
+ *  
+ * <p>For efficient score computation some scoring components
+ * are computed and aggregated in advance:
+ *
+ * <ul>
+ *  <li><i>Query-boost</i> for the query (actually for each query term)
+ *  is known when search starts.
+ *  </li>
+ *
+ *  <li>Query Euclidean norm <i>|V(q)|</i> can be computed when search starts,
+ *  as it is independent of the document being scored.
+ *  From search optimization perspective, it is a valid question
+ *  why bother to normalize the query at all, because all
+ *  scored documents will be multiplied by the same <i>|V(q)|</i>,
+ *  and hence documents ranks (their order by score) will not
+ *  be affected by this normalization.
+ *  There are two good reasons to keep this normalization:
+ *  <ul>
+ *   <li>Recall that
+ *   <a href="http://en.wikipedia.org/wiki/Cosine_similarity">
+ *   Cosine Similarity</a> can be used find how similar
+ *   two documents are. One can use Lucene for e.g.
+ *   clustering, and use a document as a query to compute
+ *   its similarity to other documents.
+ *   In this use case it is important that the score of document <i>d3</i>
+ *   for query <i>d1</i> is comparable to the score of document <i>d3</i>
+ *   for query <i>d2</i>. In other words, scores of a document for two
+ *   distinct queries should be comparable.
+ *   There are other applications that may require this.
+ *   And this is exactly what normalizing the query vector <i>V(q)</i>
+ *   provides: comparability (to a certain extent) of two or more queries.
+ *   </li>
+ *
+ *   <li>Applying query normalization on the scores helps to keep the
+ *   scores around the unit vector, hence preventing loss of score data
+ *   because of floating point precision limitations.
+ *   </li>
+ *  </ul>
+ *  </li>
+ *
+ *  <li>Document length norm <i>doc-len-norm(d)</i> and document
+ *  boost <i>doc-boost(d)</i> are known at indexing time.
+ *  They are computed in advance and their multiplication
+ *  is saved as a single value in the index: <i>norm(d)</i>.
+ *  (In the equations below, <i>norm(t in d)</i> means <i>norm(field(t) in doc d)</i>
+ *  where <i>field(t)</i> is the field associated with term <i>t</i>.)
+ *  </li>
+ * </ul>
+ *
+ * <p><i>Lucene's Practical Scoring Function</i> is derived from the above.
+ * The color codes demonstrate how it relates
+ * to those of the <i>conceptual</i> formula:
+ *
+ * <P>
+ * <table cellpadding="2" cellspacing="2" border="0" align="center">
+ *  <tr><td>
+ *  <table cellpadding="" cellspacing="2" border="2" align="center">
+ *  <tr><td>
+ *   <table cellpadding="2" cellspacing="2" border="0" align="center">
+ *   <tr>
+ *     <td valign="middle" align="right" rowspan="1">
+ *       score(q,d) &nbsp; = &nbsp;
+ *       <A HREF="#formula_coord"><font color="#FF9933">coord(q,d)</font></A> &nbsp;&middot;&nbsp;
+ *       <A HREF="#formula_queryNorm"><font color="#FF33CC">queryNorm(q)</font></A> &nbsp;&middot;&nbsp;
+ *     </td>
+ *     <td valign="bottom" align="center" rowspan="1">
+ *       <big><big><big>&sum;</big></big></big>
+ *     </td>
+ *     <td valign="middle" align="right" rowspan="1">
+ *       <big><big>(</big></big>
+ *       <A HREF="#formula_tf"><font color="#993399">tf(t in d)</font></A> &nbsp;&middot;&nbsp;
+ *       <A HREF="#formula_idf"><font color="#993399">idf(t)</font></A><sup>2</sup> &nbsp;&middot;&nbsp;
+ *       <A HREF="#formula_termBoost"><font color="#CCCC00">t.getBoost()</font></A>&nbsp;&middot;&nbsp;
+ *       <A HREF="#formula_norm"><font color="#3399FF">norm(t,d)</font></A>
+ *       <big><big>)</big></big>
+ *     </td>
+ *   </tr>
+ *   <tr valigh="top">
+ *    <td></td>
+ *    <td align="center"><small>t in q</small></td>
+ *    <td></td>
+ *   </tr>
+ *   </table>
+ *  </td></tr>
+ *  </table>
+ * </td></tr>
+ * <tr><td>
+ *  <center><font=-1><u>Lucene Practical Scoring Function</u></font></center>
+ * </td></tr>
+ * </table>
+ *
+ * <p> where
+ * <ol>
+ *    <li>
+ *      <A NAME="formula_tf"></A>
+ *      <b><i>tf(t in d)</i></b>
+ *      correlates to the term's <i>frequency</i>,
+ *      defined as the number of times term <i>t</i> appears in the currently scored document <i>d</i>.
+ *      Documents that have more occurrences of a given term receive a higher score.
+ *      Note that <i>tf(t in q)</i> is assumed to be <i>1</i> and therefore it does not appear in this equation,
+ *      However if a query contains twice the same term, there will be
+ *      two term-queries with that same term and hence the computation would still be correct (although
+ *      not very efficient).
+ *      The default computation for <i>tf(t in d)</i> in
+ *      {@link org.apache.lucene.search.DefaultSimilarity#tf(float) DefaultSimilarity} is:
+ *
+ *      <br>&nbsp;<br>
+ *      <table cellpadding="2" cellspacing="2" border="0" align="center">
+ *        <tr>
+ *          <td valign="middle" align="right" rowspan="1">
+ *            {@link org.apache.lucene.search.DefaultSimilarity#tf(float) tf(t in d)} &nbsp; = &nbsp;
+ *          </td>
+ *          <td valign="top" align="center" rowspan="1">
+ *               frequency<sup><big>&frac12;</big></sup>
+ *          </td>
+ *        </tr>
+ *      </table>
+ *      <br>&nbsp;<br>
+ *    </li>
+ *
+ *    <li>
+ *      <A NAME="formula_idf"></A>
+ *      <b><i>idf(t)</i></b> stands for Inverse Document Frequency. This value
+ *      correlates to the inverse of <i>docFreq</i>
+ *      (the number of documents in which the term <i>t</i> appears).
+ *      This means rarer terms give higher contribution to the total score.
+ *      <i>idf(t)</i> appears for <i>t</i> in both the query and the document,
+ *      hence it is squared in the equation.
+ *      The default computation for <i>idf(t)</i> in
+ *      {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) DefaultSimilarity} is:
+ *
+ *      <br>&nbsp;<br>
+ *      <table cellpadding="2" cellspacing="2" border="0" align="center">
+ *        <tr>
+ *          <td valign="middle" align="right">
+ *            {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) idf(t)}&nbsp; = &nbsp;
+ *          </td>
+ *          <td valign="middle" align="center">
+ *            1 + log <big>(</big>
+ *          </td>
+ *          <td valign="middle" align="center">
+ *            <table>
+ *               <tr><td align="center"><small>numDocs</small></td></tr>
+ *               <tr><td align="center">&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;</td></tr>
+ *               <tr><td align="center"><small>docFreq+1</small></td></tr>
+ *            </table>
+ *          </td>
+ *          <td valign="middle" align="center">
+ *            <big>)</big>
+ *          </td>
+ *        </tr>
+ *      </table>
+ *      <br>&nbsp;<br>
+ *    </li>
+ *
+ *    <li>
+ *      <A NAME="formula_coord"></A>
+ *      <b><i>coord(q,d)</i></b>
+ *      is a score factor based on how many of the query terms are found in the specified document.
+ *      Typically, a document that contains more of the query's terms will receive a higher score
+ *      than another document with fewer query terms.
+ *      This is a search time factor computed in
+ *      {@link SimilarityProvider#coord(int, int) coord(q,d)}
+ *      by the SimilarityProvider in effect at search time.
+ *      <br>&nbsp;<br>
+ *    </li>
+ *
+ *    <li><b>
+ *      <A NAME="formula_queryNorm"></A>
+ *      <i>queryNorm(q)</i>
+ *      </b>
+ *      is a normalizing factor used to make scores between queries comparable.
+ *      This factor does not affect document ranking (since all ranked documents are multiplied by the same factor),
+ *      but rather just attempts to make scores from different queries (or even different indexes) comparable.
+ *      This is a search time factor computed by the Similarity in effect at search time.
+ *
+ *      The default computation in
+ *      {@link org.apache.lucene.search.DefaultSimilarityProvider#queryNorm(float) DefaultSimilarityProvider}
+ *      produces a <a href="http://en.wikipedia.org/wiki/Euclidean_norm#Euclidean_norm">Euclidean norm</a>:
+ *      <br>&nbsp;<br>
+ *      <table cellpadding="1" cellspacing="0" border="0" align="center">
+ *        <tr>
+ *          <td valign="middle" align="right" rowspan="1">
+ *            queryNorm(q)  &nbsp; = &nbsp;
+ *            {@link org.apache.lucene.search.DefaultSimilarityProvider#queryNorm(float) queryNorm(sumOfSquaredWeights)}
+ *            &nbsp; = &nbsp;
+ *          </td>
+ *          <td valign="middle" align="center" rowspan="1">
+ *            <table>
+ *               <tr><td align="center"><big>1</big></td></tr>
+ *               <tr><td align="center"><big>
+ *                  &ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;
+ *               </big></td></tr>
+ *               <tr><td align="center">sumOfSquaredWeights<sup><big>&frac12;</big></sup></td></tr>
+ *            </table>
+ *          </td>
+ *        </tr>
+ *      </table>
+ *      <br>&nbsp;<br>
+ *
+ *      The sum of squared weights (of the query terms) is
+ *      computed by the query {@link org.apache.lucene.search.Weight} object.
+ *      For example, a {@link org.apache.lucene.search.BooleanQuery}
+ *      computes this value as:
+ *
+ *      <br>&nbsp;<br>
+ *      <table cellpadding="1" cellspacing="0" border="0"n align="center">
+ *        <tr>
+ *          <td valign="middle" align="right" rowspan="1">
+ *            {@link org.apache.lucene.search.Weight#getValueForNormalization() sumOfSquaredWeights} &nbsp; = &nbsp;
+ *            {@link org.apache.lucene.search.Query#getBoost() q.getBoost()} <sup><big>2</big></sup>
+ *            &nbsp;&middot;&nbsp;
+ *          </td>
+ *          <td valign="bottom" align="center" rowspan="1">
+ *            <big><big><big>&sum;</big></big></big>
+ *          </td>
+ *          <td valign="middle" align="right" rowspan="1">
+ *            <big><big>(</big></big>
+ *            <A HREF="#formula_idf">idf(t)</A> &nbsp;&middot;&nbsp;
+ *            <A HREF="#formula_termBoost">t.getBoost()</A>
+ *            <big><big>) <sup>2</sup> </big></big>
+ *          </td>
+ *        </tr>
+ *        <tr valigh="top">
+ *          <td></td>
+ *          <td align="center"><small>t in q</small></td>
+ *          <td></td>
+ *        </tr>
+ *      </table>
+ *      <br>&nbsp;<br>
+ *
+ *    </li>
+ *
+ *    <li>
+ *      <A NAME="formula_termBoost"></A>
+ *      <b><i>t.getBoost()</i></b>
+ *      is a search time boost of term <i>t</i> in the query <i>q</i> as
+ *      specified in the query text
+ *      (see <A HREF="../../../../../../queryparsersyntax.html#Boosting a Term">query syntax</A>),
+ *      or as set by application calls to
+ *      {@link org.apache.lucene.search.Query#setBoost(float) setBoost()}.
+ *      Notice that there is really no direct API for accessing a boost of one term in a multi term query,
+ *      but rather multi terms are represented in a query as multi
+ *      {@link org.apache.lucene.search.TermQuery TermQuery} objects,
+ *      and so the boost of a term in the query is accessible by calling the sub-query
+ *      {@link org.apache.lucene.search.Query#getBoost() getBoost()}.
+ *      <br>&nbsp;<br>
+ *    </li>
+ *
+ *    <li>
+ *      <A NAME="formula_norm"></A>
+ *      <b><i>norm(t,d)</i></b> encapsulates a few (indexing time) boost and length factors:
+ *
+ *      <ul>
+ *        <li><b>Document boost</b> - set by calling
+ *        {@link org.apache.lucene.document.Document#setBoost(float) doc.setBoost()}
+ *        before adding the document to the index.
+ *        </li>
+ *        <li><b>Field boost</b> - set by calling
+ *        {@link org.apache.lucene.document.Fieldable#setBoost(float) field.setBoost()}
+ *        before adding the field to a document.
+ *        </li>
+ *        <li><b>lengthNorm</b> - computed
+ *        when the document is added to the index in accordance with the number of tokens
+ *        of this field in the document, so that shorter fields contribute more to the score.
+ *        LengthNorm is computed by the Similarity class in effect at indexing.
+ *        </li>
+ *      </ul>
+ *      The {@link #computeNorm} method is responsible for
+ *      combining all of these factors into a single float.
+ *
+ *      <p>
+ *      When a document is added to the index, all the above factors are multiplied.
+ *      If the document has multiple fields with the same name, all their boosts are multiplied together:
+ *
+ *      <br>&nbsp;<br>
+ *      <table cellpadding="1" cellspacing="0" border="0"n align="center">
+ *        <tr>
+ *          <td valign="middle" align="right" rowspan="1">
+ *            norm(t,d) &nbsp; = &nbsp;
+ *            {@link org.apache.lucene.document.Document#getBoost() doc.getBoost()}
+ *            &nbsp;&middot;&nbsp;
+ *            lengthNorm
+ *            &nbsp;&middot;&nbsp;
+ *          </td>
+ *          <td valign="bottom" align="center" rowspan="1">
+ *            <big><big><big>&prod;</big></big></big>
+ *          </td>
+ *          <td valign="middle" align="right" rowspan="1">
+ *            {@link org.apache.lucene.document.Fieldable#getBoost() f.getBoost}()
+ *          </td>
+ *        </tr>
+ *        <tr valigh="top">
+ *          <td></td>
+ *          <td align="center"><small>field <i><b>f</b></i> in <i>d</i> named as <i><b>t</b></i></small></td>
+ *          <td></td>
+ *        </tr>
+ *      </table>
+ *      <br>&nbsp;<br>
+ *      However the resulted <i>norm</i> value is {@link #encodeNormValue(float) encoded} as a single byte
+ *      before being stored.
+ *      At search time, the norm byte value is read from the index
+ *      {@link org.apache.lucene.store.Directory directory} and
+ *      {@link #decodeNormValue(byte) decoded} back to a float <i>norm</i> value.
+ *      This encoding/decoding, while reducing index size, comes with the price of
+ *      precision loss - it is not guaranteed that <i>decode(encode(x)) = x</i>.
+ *      For instance, <i>decode(encode(0.89)) = 0.75</i>.
+ *      <br>&nbsp;<br>
+ *      Compression of norm values to a single byte saves memory at search time, 
+ *      because once a field is referenced at search time, its norms - for 
+ *      all documents - are maintained in memory.
+ *      <br>&nbsp;<br>
+ *      The rationale supporting such lossy compression of norm values is that
+ *      given the difficulty (and inaccuracy) of users to express their true information
+ *      need by a query, only big differences matter.
+ *      <br>&nbsp;<br>
+ *      Last, note that search time is too late to modify this <i>norm</i> part of scoring, e.g. by
+ *      using a different {@link Similarity} for search.
+ *      <br>&nbsp;<br>
+ *    </li>
+ * </ol>
+ *
+ * @see org.apache.lucene.index.IndexWriterConfig#setSimilarityProvider(SimilarityProvider)
+ * @see IndexSearcher#setSimilarityProvider(SimilarityProvider)
+ */
+public abstract class TFIDFSimilarity extends Similarity {
+  
+  /** Computes a score factor based on a term or phrase's frequency in a
+   * document.  This value is multiplied by the {@link #idf(int, int)}
+   * factor for each term in the query and these products are then summed to
+   * form the initial score for a document.
+   *
+   * <p>Terms and phrases repeated in a document indicate the topic of the
+   * document, so implementations of this method usually return larger values
+   * when <code>freq</code> is large, and smaller values when <code>freq</code>
+   * is small.
+   *
+   * <p>The default implementation calls {@link #tf(float)}.
+   *
+   * @param freq the frequency of a term within a document
+   * @return a score factor based on a term's within-document frequency
+   */
+  public float tf(int freq) {
+    return tf((float)freq);
+  }
+
+  /** Computes a score factor based on a term or phrase's frequency in a
+   * document.  This value is multiplied by the {@link #idf(int, int)}
+   * factor for each term in the query and these products are then summed to
+   * form the initial score for a document.
+   *
+   * <p>Terms and phrases repeated in a document indicate the topic of the
+   * document, so implementations of this method usually return larger values
+   * when <code>freq</code> is large, and smaller values when <code>freq</code>
+   * is small.
+   *
+   * @param freq the frequency of a term within a document
+   * @return a score factor based on a term's within-document frequency
+   */
+  public abstract float tf(float freq);
+
+  /**
+   * Computes a score factor for a simple term and returns an explanation
+   * for that score factor.
+   * 
+   * <p>
+   * The default implementation uses:
+   * 
+   * <pre>
+   * idf(docFreq, searcher.maxDoc());
+   * </pre>
+   * 
+   * Note that {@link IndexSearcher#maxDoc()} is used instead of
+   * {@link org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also 
+   * {@link IndexSearcher#docFreq(Term)} is used, and when the latter 
+   * is inaccurate, so is {@link IndexSearcher#maxDoc()}, and in the same direction.
+   * In addition, {@link IndexSearcher#maxDoc()} is more efficient to compute
+   *   
+   * @param stats statistics of the term in question
+   * @param searcher the document collection being searched
+   * @return an Explain object that includes both an idf score factor 
+             and an explanation for the term.
+   * @throws IOException
+   */
+  public Explanation idfExplain(TermContext stats, final IndexSearcher searcher) throws IOException {
+    final int df = stats.docFreq();
+    final int max = searcher.maxDoc();
+    final float idf = idf(df, max);
+    return new Explanation(idf, "idf(docFreq=" + df + ", maxDocs=" + max + ")");
+  }
+
+  /**
+   * Computes a score factor for a phrase.
+   * 
+   * <p>
+   * The default implementation sums the idf factor for
+   * each term in the phrase.
+   * 
+   * @param stats statistics of the terms in the phrase
+   * @param searcher the document collection being searched
+   * @return an Explain object that includes both an idf 
+   *         score factor for the phrase and an explanation 
+   *         for each term.
+   * @throws IOException
+   */
+  public Explanation idfExplain(final TermContext stats[], IndexSearcher searcher) throws IOException {
+    final int max = searcher.maxDoc();
+    float idf = 0.0f;
+    final Explanation exp = new Explanation();
+    exp.setDescription("idf(), sum of:");
+    for (final TermContext stat : stats ) {
+      final int df = stat.docFreq();
+      final float termIdf = idf(df, max);
+      exp.addDetail(new Explanation(termIdf, "idf(docFreq=" + df + ", maxDocs=" + max + ")"));
+      idf += termIdf;
+    }
+    exp.setValue(idf);
+    return exp;
+  }
+
+  /** Computes a score factor based on a term's document frequency (the number
+   * of documents which contain the term).  This value is multiplied by the
+   * {@link #tf(int)} factor for each term in the query and these products are
+   * then summed to form the initial score for a document.
+   *
+   * <p>Terms that occur in fewer documents are better indicators of topic, so
+   * implementations of this method usually return larger values for rare terms,
+   * and smaller values for common terms.
+   *
+   * @param docFreq the number of documents which contain the term
+   * @param numDocs the total number of documents in the collection
+   * @return a score factor based on the term's document frequency
+   */
+  public abstract float idf(int docFreq, int numDocs);
+
+  /** Cache of decoded bytes. */
+  private static final float[] NORM_TABLE = new float[256];
+
+  static {
+    for (int i = 0; i < 256; i++)
+      NORM_TABLE[i] = SmallFloat.byte315ToFloat((byte)i);
+  }
+
+  /** Decodes a normalization factor stored in an index.
+   * @see #encodeNormValue(float)
+   */
+  public float decodeNormValue(byte b) {
+    return NORM_TABLE[b & 0xFF];  // & 0xFF maps negative bytes to positive above 127
+  }
+
+  /** Encodes a normalization factor for storage in an index.
+  *
+  * <p>The encoding uses a three-bit mantissa, a five-bit exponent, and
+  * the zero-exponent point at 15, thus
+  * representing values from around 7x10^9 to 2x10^-9 with about one
+  * significant decimal digit of accuracy.  Zero is also represented.
+  * Negative numbers are rounded up to zero.  Values too large to represent
+  * are rounded down to the largest representable value.  Positive values too
+  * small to represent are rounded up to the smallest positive representable
+  * value.
+  * @see org.apache.lucene.document.Field#setBoost(float)
+  * @see org.apache.lucene.util.SmallFloat
+  */
+  public byte encodeNormValue(float f) {
+    return SmallFloat.floatToByte315(f);
+  }
+ 
+  @Override
+  public final Stats computeStats(IndexSearcher searcher, String fieldName, float queryBoost,
+      TermContext... termContexts) throws IOException {
+    final Explanation idf = termContexts.length == 1
+    ? idfExplain(termContexts[0], searcher)
+    : idfExplain(termContexts, searcher);
+    return new IDFStats(idf, queryBoost);
+  }
+
+  @Override
+  public final ExactDocScorer exactDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
+    return new ExactTFIDFDocScorer((IDFStats)stats, context.reader.norms(fieldName));
+  }
+
+  @Override
+  public final SloppyDocScorer sloppyDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
+    return new SloppyTFIDFDocScorer((IDFStats)stats, context.reader.norms(fieldName));
+  }
+  
+  // TODO: we can specialize these for omitNorms up front, but we should test that it doesn't confuse stupid hotspot.
+
+  private final class ExactTFIDFDocScorer extends ExactDocScorer {
+    private final IDFStats stats;
+    private final float weightValue;
+    private final byte[] norms;
+    private static final int SCORE_CACHE_SIZE = 32;
+    private float[] scoreCache = new float[SCORE_CACHE_SIZE];
+    
+    ExactTFIDFDocScorer(IDFStats stats, byte norms[]) {
+      this.stats = stats;
+      this.weightValue = stats.value;
+      this.norms = norms;
+      for (int i = 0; i < SCORE_CACHE_SIZE; i++)
+        scoreCache[i] = tf(i) * weightValue;
+    }
+    
+    @Override
+    public float score(int doc, int freq) {
+      final float raw =                                // compute tf(f)*weight
+        freq < SCORE_CACHE_SIZE                        // check cache
+        ? scoreCache[freq]                             // cache hit
+        : tf(freq)*weightValue;        // cache miss
+
+      return norms == null ? raw : raw * decodeNormValue(norms[doc]); // normalize for field
+    }
+
+    @Override
+    public Explanation explain(int doc, Explanation freq) {
+      return explainScore(doc, freq, stats, norms);
+    }
+  }
+  
+  private final class SloppyTFIDFDocScorer extends SloppyDocScorer {
+    private final IDFStats stats;
+    private final float weightValue;
+    private final byte[] norms;
+    
+    SloppyTFIDFDocScorer(IDFStats stats, byte norms[]) {
+      this.stats = stats;
+      this.weightValue = stats.value;
+      this.norms = norms;
+    }
+    
+    @Override
+    public float score(int doc, float freq) {
+      final float raw = tf(freq) * weightValue; // compute tf(f)*weight
+      
+      return norms == null ? raw : raw * decodeNormValue(norms[doc]);  // normalize for field
+    }
+    
+    @Override
+    public Explanation explain(int doc, Explanation freq) {
+      return explainScore(doc, freq, stats, norms);
+    }
+  }
+  
+  /** Collection statistics for the TF-IDF model. The only statistic of interest
+   * to this model is idf. */
+  private static class IDFStats extends Stats {
+    /** The idf and its explanation */
+    private final Explanation idf;
+    private float queryNorm;
+    private float queryWeight;
+    private final float queryBoost;
+    private float value;
+    
+    public IDFStats(Explanation idf, float queryBoost) {
+      // TODO: Validate?
+      this.idf = idf;
+      this.queryBoost = queryBoost;
+      this.queryWeight = idf.getValue() * queryBoost; // compute query weight
+    }
+
+    @Override
+    public float getValueForNormalization() {
+      // TODO: (sorta LUCENE-1907) make non-static class and expose this squaring via a nice method to subclasses?
+      return queryWeight * queryWeight;  // sum of squared weights
+    }
+
+    @Override
+    public void normalize(float queryNorm, float topLevelBoost) {
+      this.queryNorm = queryNorm * topLevelBoost;
+      queryWeight *= this.queryNorm;              // normalize query weight
+      value = queryWeight * idf.getValue();         // idf for document
+    }
+  }
+  
+  private Explanation explainScore(int doc, Explanation freq, IDFStats stats, byte[] norms) {
+    Explanation result = new Explanation();
+    result.setDescription("score(doc="+doc+",freq="+freq+"), product of:");
+
+    // explain query weight
+    Explanation queryExpl = new Explanation();
+    queryExpl.setDescription("queryWeight, product of:");
+
+    Explanation boostExpl = new Explanation(stats.queryBoost, "boost");
+    if (stats.queryBoost != 1.0f)
+      queryExpl.addDetail(boostExpl);
+    queryExpl.addDetail(stats.idf);
+
+    Explanation queryNormExpl = new Explanation(stats.queryNorm,"queryNorm");
+    queryExpl.addDetail(queryNormExpl);
+
+    queryExpl.setValue(boostExpl.getValue() *
+                       stats.idf.getValue() *
+                       queryNormExpl.getValue());
+
+    result.addDetail(queryExpl);
+
+    // explain field weight
+    Explanation fieldExpl = new Explanation();
+    fieldExpl.setDescription("fieldWeight in "+doc+
+                             ", product of:");
+
+    Explanation tfExplanation = new Explanation();
+    tfExplanation.setValue(tf(freq.getValue()));
+    tfExplanation.setDescription("tf(freq="+freq.getValue()+"), with freq of:");
+    tfExplanation.addDetail(freq);
+    fieldExpl.addDetail(tfExplanation);
+    fieldExpl.addDetail(stats.idf);
+
+    Explanation fieldNormExpl = new Explanation();
+    float fieldNorm =
+      norms!=null ? decodeNormValue(norms[doc]) : 1.0f;
+    fieldNormExpl.setValue(fieldNorm);
+    fieldNormExpl.setDescription("fieldNorm(doc="+doc+")");
+    fieldExpl.addDetail(fieldNormExpl);
+    
+    fieldExpl.setValue(tfExplanation.getValue() *
+                       stats.idf.getValue() *
+                       fieldNormExpl.getValue());
+
+    result.addDetail(fieldExpl);
+    
+    // combine them
+    result.setValue(queryExpl.getValue() * fieldExpl.getValue());
+
+    if (queryExpl.getValue() == 1.0f)
+      return fieldExpl;
+
+    return result;
+  }
+}
diff --git a/lucene/src/java/org/apache/lucene/search/TermCollectingRewrite.java b/lucene/src/java/org/apache/lucene/search/TermCollectingRewrite.java
index 5018317..192dd43 100644
--- a/lucene/src/java/org/apache/lucene/search/TermCollectingRewrite.java
+++ b/lucene/src/java/org/apache/lucene/search/TermCollectingRewrite.java
@@ -29,7 +29,7 @@
 import org.apache.lucene.index.IndexReader.ReaderContext;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.PerReaderTermState;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.util.ReaderUtil;
 
 abstract class TermCollectingRewrite<Q extends Query> extends MultiTermQuery.RewriteMethod {
@@ -43,7 +43,7 @@
     addClause(topLevel, term, docCount, boost, null);
   }
   
-  protected abstract void addClause(Q topLevel, Term term, int docCount, float boost, PerReaderTermState states) throws IOException;
+  protected abstract void addClause(Q topLevel, Term term, int docCount, float boost, TermContext states) throws IOException;
 
   
   protected final void collectTerms(IndexReader reader, MultiTermQuery query, TermCollector collector) throws IOException {
diff --git a/lucene/src/java/org/apache/lucene/search/TermQuery.java b/lucene/src/java/org/apache/lucene/search/TermQuery.java
index 078d02f..936b0bf 100644
--- a/lucene/src/java/org/apache/lucene/search/TermQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/TermQuery.java
@@ -27,9 +27,9 @@
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader.ReaderContext;
 import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Explanation.IDFExplanation;
+import org.apache.lucene.search.Similarity.ExactDocScorer;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.PerReaderTermState;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.util.ReaderUtil;
 import org.apache.lucene.util.ToStringUtils;
 
@@ -39,28 +39,19 @@
 public class TermQuery extends Query {
   private final Term term;
   private int docFreq;
-  private transient PerReaderTermState perReaderTermState;
+  private transient TermContext perReaderTermState;
 
   private class TermWeight extends Weight {
     private final Similarity similarity;
-    private float value;
-    private final float idf;
-    private float queryNorm;
-    private float queryWeight;
-    private final IDFExplanation idfExp;
-    private transient PerReaderTermState termStates;
+    private final Similarity.Stats stats;
+    private transient TermContext termStates;
 
-    public TermWeight(IndexSearcher searcher, PerReaderTermState termStates, int docFreq)
+    public TermWeight(IndexSearcher searcher, TermContext termStates)
       throws IOException {
-      assert termStates != null : "PerReaderTermState must not be null";
+      assert termStates != null : "TermContext must not be null";
       this.termStates = termStates;
       this.similarity = searcher.getSimilarityProvider().get(term.field());
-      if (docFreq != -1) {
-        idfExp = similarity.idfExplain(term, searcher, docFreq);
-      } else {
-        idfExp = similarity.idfExplain(term, searcher);
-      }
-      idf = idfExp.getIdf();
+      this.stats = similarity.computeStats(searcher, term.field(), getBoost(), termStates);
     }
 
     @Override
@@ -70,19 +61,13 @@
     public Query getQuery() { return TermQuery.this; }
 
     @Override
-    public float getValue() { return value; }
-
-    @Override
-    public float sumOfSquaredWeights() {
-      queryWeight = idf * getBoost();             // compute query weight
-      return queryWeight * queryWeight;           // square it
+    public float getValueForNormalization() {
+      return stats.getValueForNormalization();
     }
 
     @Override
-    public void normalize(float queryNorm) {
-      this.queryNorm = queryNorm;
-      queryWeight *= queryNorm;                   // normalize query weight
-      value = queryWeight * idf;                  // idf for document
+    public void normalize(float queryNorm, float topLevelBoost) {
+      stats.normalize(queryNorm, topLevelBoost);
     }
 
     @Override
@@ -97,7 +82,7 @@
       }
       final DocsEnum docs = reader.termDocsEnum(reader.getLiveDocs(), field, term.bytes(), state);
       assert docs != null;
-      return new TermScorer(this, docs, similarity, context.reader.norms(field));
+      return new TermScorer(this, docs, similarity.exactDocScorer(stats, field, context));
     }
     
     private boolean termNotInReader(IndexReader reader, String field, BytesRef bytes) throws IOException {
@@ -107,79 +92,25 @@
     }
     
     @Override
-    public Explanation explain(AtomicReaderContext context, int doc)
-      throws IOException {
-      final IndexReader reader = context.reader;
-
-      ComplexExplanation result = new ComplexExplanation();
-      result.setDescription("weight("+getQuery()+" in "+doc+"), product of:");
-
-      Explanation expl = new Explanation(idf, idfExp.explain());
-
-      // explain query weight
-      Explanation queryExpl = new Explanation();
-      queryExpl.setDescription("queryWeight(" + getQuery() + "), product of:");
-
-      Explanation boostExpl = new Explanation(getBoost(), "boost");
-      if (getBoost() != 1.0f)
-        queryExpl.addDetail(boostExpl);
-      queryExpl.addDetail(expl);
-
-      Explanation queryNormExpl = new Explanation(queryNorm,"queryNorm");
-      queryExpl.addDetail(queryNormExpl);
-
-      queryExpl.setValue(boostExpl.getValue() *
-                         expl.getValue() *
-                         queryNormExpl.getValue());
-
-      result.addDetail(queryExpl);
-
-      // explain field weight
-      String field = term.field();
-      ComplexExplanation fieldExpl = new ComplexExplanation();
-      fieldExpl.setDescription("fieldWeight("+term+" in "+doc+
-                               "), product of:");
-
-      Explanation tfExplanation = new Explanation();
-      int tf = 0;
+    public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+      IndexReader reader = context.reader;
       DocsEnum docs = reader.termDocsEnum(context.reader.getLiveDocs(), term.field(), term.bytes());
       if (docs != null) {
-          int newDoc = docs.advance(doc);
-          if (newDoc == doc) {
-            tf = docs.freq();
-          }
-        tfExplanation.setValue(similarity.tf(tf));
-        tfExplanation.setDescription("tf(termFreq("+term+")="+tf+")");
-      } else {
-        tfExplanation.setValue(0.0f);
-        tfExplanation.setDescription("no matching term");
+        int newDoc = docs.advance(doc);
+        if (newDoc == doc) {
+          int freq = docs.freq();
+          ExactDocScorer docScorer = similarity.exactDocScorer(stats, term.field(), context);
+          ComplexExplanation result = new ComplexExplanation();
+          result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
+          Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "termFreq=" + freq));
+          result.addDetail(scoreExplanation);
+          result.setValue(scoreExplanation.getValue());
+          result.setMatch(true);
+          return result;
+        }
       }
-      fieldExpl.addDetail(tfExplanation);
-      fieldExpl.addDetail(expl);
-
-      Explanation fieldNormExpl = new Explanation();
-      final byte[] fieldNorms = reader.norms(field);
-      float fieldNorm =
-        fieldNorms!=null ? similarity.decodeNormValue(fieldNorms[doc]) : 1.0f;
-      fieldNormExpl.setValue(fieldNorm);
-      fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
-      fieldExpl.addDetail(fieldNormExpl);
       
-      fieldExpl.setMatch(Boolean.valueOf(tfExplanation.isMatch()));
-      fieldExpl.setValue(tfExplanation.getValue() *
-                         expl.getValue() *
-                         fieldNormExpl.getValue());
-
-      result.addDetail(fieldExpl);
-      result.setMatch(fieldExpl.getMatch());
-      
-      // combine them
-      result.setValue(queryExpl.getValue() * fieldExpl.getValue());
-
-      if (queryExpl.getValue() == 1.0f)
-        return fieldExpl;
-
-      return result;
+      return new ComplexExplanation(false, 0.0f, "no matching term");
     }
   }
 
@@ -200,7 +131,7 @@
   /** Expert: constructs a TermQuery that will use the
    *  provided docFreq instead of looking up the docFreq
    *  against the searcher. */
-  public TermQuery(Term t, PerReaderTermState states) {
+  public TermQuery(Term t, TermContext states) {
     assert states != null;
     term = t;
     docFreq = states.docFreq();
@@ -213,20 +144,20 @@
   @Override
   public Weight createWeight(IndexSearcher searcher) throws IOException {
     final ReaderContext context = searcher.getTopReaderContext();
-    final int weightDocFreq;
-    final PerReaderTermState termState;
+    final TermContext termState;
     if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
       // make TermQuery single-pass if we don't have a PRTS or if the context differs!
-      termState = PerReaderTermState.build(context, term, true); // cache term lookups!
-      // we must not ignore the given docFreq - if set use the given value
-      weightDocFreq = docFreq == -1 ? termState.docFreq() : docFreq;
+      termState = TermContext.build(context, term, true); // cache term lookups!
     } else {
      // PRTS was pre-build for this IS
      termState = this.perReaderTermState;
-     weightDocFreq = docFreq;
     }
+
+    // we must not ignore the given docFreq - if set use the given value (lie)
+    if (docFreq != -1)
+      termState.setDocFreq(docFreq);
     
-    return new TermWeight(searcher, termState, weightDocFreq);
+    return new TermWeight(searcher, termState);
   }
 
   @Override
diff --git a/lucene/src/java/org/apache/lucene/search/TermScorer.java b/lucene/src/java/org/apache/lucene/search/TermScorer.java
index 9a9ef5e..3534079 100644
--- a/lucene/src/java/org/apache/lucene/search/TermScorer.java
+++ b/lucene/src/java/org/apache/lucene/search/TermScorer.java
@@ -25,20 +25,16 @@
  */
 final class TermScorer extends Scorer {
   private DocsEnum docsEnum;
-  private byte[] norms;
-  private float weightValue;
   private int doc = -1;
   private int freq;
 
   private int pointer;
   private int pointerMax;
 
-  private static final int SCORE_CACHE_SIZE = 32;
-  private float[] scoreCache = new float[SCORE_CACHE_SIZE];
   private int[] docs;
   private int[] freqs;
   private final DocsEnum.BulkReadResult bulkResult;
-  private final Similarity similarity;
+  private final Similarity.ExactDocScorer docScorer;
   
   /**
    * Construct a <code>TermScorer</code>.
@@ -47,22 +43,15 @@
    *          The weight of the <code>Term</code> in the query.
    * @param td
    *          An iterator over the documents matching the <code>Term</code>.
-   * @param similarity
-   *          The </code>Similarity</code> implementation to be used for score
-   *          computations.
-   * @param norms
-   *          The field norms of the document fields for the <code>Term</code>.
+   * @param docScorer
+   *          The </code>Similarity.ExactDocScorer</code> implementation 
+   *          to be used for score computations.
    */
-  TermScorer(Weight weight, DocsEnum td, Similarity similarity, byte[] norms) {
+  TermScorer(Weight weight, DocsEnum td, Similarity.ExactDocScorer docScorer) throws IOException {
     super(weight);
-    this.similarity = similarity;
+    this.docScorer = docScorer;
     this.docsEnum = td;
-    this.norms = norms;
-    this.weightValue = weight.getValue();
     bulkResult = td.getBulkResult();
-
-    for (int i = 0; i < SCORE_CACHE_SIZE; i++)
-      scoreCache[i] = similarity.tf(i) * weightValue;
   }
 
   @Override
@@ -134,12 +123,7 @@
   @Override
   public float score() {
     assert doc != NO_MORE_DOCS;
-    float raw =                                   // compute tf(f)*weight
-      freq < SCORE_CACHE_SIZE                        // check cache
-      ? scoreCache[freq]                             // cache hit
-      : similarity.tf(freq)*weightValue;        // cache miss
-
-    return norms == null ? raw : raw * similarity.decodeNormValue(norms[doc]); // normalize for field
+    return docScorer.score(doc, freq);  
   }
 
   /**
diff --git a/lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java b/lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java
index 5b322a8..4ad6222 100644
--- a/lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java
+++ b/lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java
@@ -29,7 +29,7 @@
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.PerReaderTermState;
+import org.apache.lucene.util.TermContext;
 
 /**
  * Base rewrite method for collecting only the top terms
@@ -80,7 +80,7 @@
         this.termComp = termsEnum.getComparator();
         // lazy init the initial ScoreTerm because comparator is not known on ctor:
         if (st == null)
-          st = new ScoreTerm(this.termComp, new PerReaderTermState(topReaderContext));
+          st = new ScoreTerm(this.termComp, new TermContext(topReaderContext));
         boostAtt = termsEnum.attributes().addAttribute(BoostAttribute.class);
       }
     
@@ -101,14 +101,14 @@
         if (t != null) {
           // if the term is already in the PQ, only update docFreq of term in PQ
           assert t.boost == boost : "boost should be equal in all segment TermsEnums";
-          t.termState.register(state, readerContext.ord, termsEnum.docFreq());
+          t.termState.register(state, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
         } else {
           // add new entry in PQ, we must clone the term, else it may get overwritten!
           st.bytes.copy(bytes);
           st.boost = boost;
           visitedTerms.put(st.bytes, st);
           assert st.termState.docFreq() == 0;
-          st.termState.register(state, readerContext.ord, termsEnum.docFreq());
+          st.termState.register(state, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
           stQueue.offer(st);
           // possibly drop entries from queue
           if (stQueue.size() > maxSize) {
@@ -116,7 +116,7 @@
             visitedTerms.remove(st.bytes);
             st.termState.clear(); // reset the termstate! 
           } else {
-            st = new ScoreTerm(termComp, new PerReaderTermState(topReaderContext));
+            st = new ScoreTerm(termComp, new TermContext(topReaderContext));
           }
           assert stQueue.size() <= maxSize : "the PQ size must be limited to maxSize";
           // set maxBoostAtt with values to help FuzzyTermsEnum to optimize
@@ -171,8 +171,8 @@
     public final Comparator<BytesRef> termComp;
     public final BytesRef bytes = new BytesRef();
     public float boost;
-    public final PerReaderTermState termState;
-    public ScoreTerm(Comparator<BytesRef> termComp, PerReaderTermState termState) {
+    public final TermContext termState;
+    public ScoreTerm(Comparator<BytesRef> termComp, TermContext termState) {
       this.termComp = termComp;
       this.termState = termState;
     }
diff --git a/lucene/src/java/org/apache/lucene/search/Weight.java b/lucene/src/java/org/apache/lucene/search/Weight.java
index 3fb8927..e99c5a6 100644
--- a/lucene/src/java/org/apache/lucene/search/Weight.java
+++ b/lucene/src/java/org/apache/lucene/search/Weight.java
@@ -41,11 +41,11 @@
  * <ol>
  * <li>A <code>Weight</code> is constructed by a top-level query, given a
  * <code>IndexSearcher</code> ({@link Query#createWeight(IndexSearcher)}).
- * <li>The {@link #sumOfSquaredWeights()} method is called on the
+ * <li>The {@link #getValueForNormalization()} method is called on the
  * <code>Weight</code> to compute the query normalization factor
  * {@link SimilarityProvider#queryNorm(float)} of the query clauses contained in the
  * query.
- * <li>The query normalization factor is passed to {@link #normalize(float)}. At
+ * <li>The query normalization factor is passed to {@link #normalize(float, float)}. At
  * this point the weighting is complete.
  * <li>A <code>Scorer</code> is constructed by
  * {@link #scorer(IndexReader.AtomicReaderContext, ScorerContext)}.
@@ -67,12 +67,12 @@
 
   /** The query that this concerns. */
   public abstract Query getQuery();
+  
+  /** The value for normalization of contained query clauses (e.g. sum of squared weights). */
+  public abstract float getValueForNormalization() throws IOException;
 
-  /** The weight for this query. */
-  public abstract float getValue();
-
-  /** Assigns the query normalization factor to this. */
-  public abstract void normalize(float norm);
+  /** Assigns the query normalization factor and boost from parent queries to this. */
+  public abstract void normalize(float norm, float topLevelBoost);
 
   /**
    * Returns a {@link Scorer} which scores documents in/out-of order according
@@ -93,9 +93,6 @@
    * @throws IOException
    */
   public abstract Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException;
-  
-  /** The sum of squared weights of contained query clauses. */
-  public abstract float sumOfSquaredWeights() throws IOException;
 
   /**
    * Returns true iff this implementation scores docs only out of order. This
diff --git a/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java b/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
index da91ef5..ac2f500 100644
--- a/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
@@ -18,11 +18,13 @@
  */
 
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.search.ComplexExplanation;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.Similarity.SloppyDocScorer;
 import org.apache.lucene.search.spans.NearSpansOrdered;
 import org.apache.lucene.search.spans.NearSpansUnordered;
 import org.apache.lucene.search.spans.SpanNearQuery;
@@ -145,7 +147,35 @@
     @Override
     public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
       return new PayloadNearSpanScorer(query.getSpans(context), this,
-          similarity, context.reader.norms(query.getField()));
+          similarity, similarity.sloppyDocScorer(stats, query.getField(), context));
+    }
+    
+    @Override
+    public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+      PayloadNearSpanScorer scorer = (PayloadNearSpanScorer) scorer(context, ScorerContext.def());
+      if (scorer != null) {
+        int newDoc = scorer.advance(doc);
+        if (newDoc == doc) {
+          float freq = scorer.freq();
+          SloppyDocScorer docScorer = similarity.sloppyDocScorer(stats, query.getField(), context);
+          Explanation expl = new Explanation();
+          expl.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
+          Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
+          expl.addDetail(scoreExplanation);
+          expl.setValue(scoreExplanation.getValue());
+          // now the payloads part
+          Explanation payloadExpl = function.explain(doc, scorer.payloadsSeen, scorer.payloadScore);
+          // combined
+          ComplexExplanation result = new ComplexExplanation();
+          result.addDetail(expl);
+          result.addDetail(payloadExpl);
+          result.setValue(expl.getValue() * payloadExpl.getValue());
+          result.setDescription("PayloadNearQuery, product of:");
+          return result;
+        }
+      }
+      
+      return new ComplexExplanation(false, 0.0f, "no matching term");
     }
   }
 
@@ -155,8 +185,8 @@
     private int payloadsSeen;
 
     protected PayloadNearSpanScorer(Spans spans, Weight weight,
-        Similarity similarity, byte[] norms) throws IOException {
-      super(spans, weight, similarity, norms);
+        Similarity similarity, Similarity.SloppyDocScorer docScorer) throws IOException {
+      super(spans, weight, similarity, docScorer);
       this.spans = spans;
     }
 
@@ -225,20 +255,6 @@
       return super.score()
           * function.docScore(doc, fieldName, payloadsSeen, payloadScore);
     }
-
-    @Override
-    protected Explanation explain(int doc) throws IOException {
-      Explanation result = new Explanation();
-      // Add detail about tf/idf...
-      Explanation nonPayloadExpl = super.explain(doc);
-      result.addDetail(nonPayloadExpl);
-      // Add detail about payload
-      Explanation payloadExpl = function.explain(doc, payloadsSeen, payloadScore);
-      result.addDetail(payloadExpl);
-      result.setValue(nonPayloadExpl.getValue() * payloadExpl.getValue());
-      result.setDescription("PayloadNearQuery, product of:");
-      return result;
-    }
   }
 
 }
diff --git a/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java b/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
index b3415a7..9c697de 100644
--- a/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
@@ -26,6 +26,9 @@
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Similarity.SloppyDocScorer;
+import org.apache.lucene.search.Weight.ScorerContext;
+import org.apache.lucene.search.payloads.PayloadNearQuery.PayloadNearSpanScorer;
 import org.apache.lucene.search.spans.TermSpans;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.search.spans.SpanWeight;
@@ -76,7 +79,7 @@
     @Override
     public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
       return new PayloadTermSpanScorer((TermSpans) query.getSpans(context),
-          this, similarity, context.reader.norms(query.getField()));
+          this, similarity, similarity.sloppyDocScorer(stats, query.getField(), context));
     }
 
     protected class PayloadTermSpanScorer extends SpanScorer {
@@ -86,8 +89,8 @@
       private final TermSpans termSpans;
 
       public PayloadTermSpanScorer(TermSpans spans, Weight weight,
-          Similarity similarity, byte[] norms) throws IOException {
-        super(spans, weight, similarity, norms);
+          Similarity similarity, Similarity.SloppyDocScorer docScorer) throws IOException {
+        super(spans, weight, similarity, docScorer);
         termSpans = spans;
       }
 
@@ -173,29 +176,40 @@
       protected float getPayloadScore() {
         return function.docScore(doc, term.field(), payloadsSeen, payloadScore);
       }
-
-      @Override
-      protected Explanation explain(final int doc) throws IOException {
-        ComplexExplanation result = new ComplexExplanation();
-        Explanation nonPayloadExpl = super.explain(doc);
-        result.addDetail(nonPayloadExpl);
-        // QUESTION: Is there a way to avoid this skipTo call? We need to know
-        // whether to load the payload or not
-        Explanation payloadBoost = new Explanation();
-        result.addDetail(payloadBoost);
-
-        float payloadScore = getPayloadScore();
-        payloadBoost.setValue(payloadScore);
-        // GSI: I suppose we could toString the payload, but I don't think that
-        // would be a good idea
-        payloadBoost.setDescription("scorePayload(...)");
-        result.setValue(nonPayloadExpl.getValue() * payloadScore);
-        result.setDescription("btq, product of:");
-        result.setMatch(nonPayloadExpl.getValue() == 0 ? Boolean.FALSE
-            : Boolean.TRUE); // LUCENE-1303
-        return result;
+    }
+    
+    @Override
+    public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+      PayloadTermSpanScorer scorer = (PayloadTermSpanScorer) scorer(context, ScorerContext.def());
+      if (scorer != null) {
+        int newDoc = scorer.advance(doc);
+        if (newDoc == doc) {
+          float freq = scorer.freq();
+          SloppyDocScorer docScorer = similarity.sloppyDocScorer(stats, query.getField(), context);
+          Explanation expl = new Explanation();
+          expl.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
+          Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
+          expl.addDetail(scoreExplanation);
+          expl.setValue(scoreExplanation.getValue());
+          // now the payloads part
+          // QUESTION: Is there a way to avoid this skipTo call? We need to know
+          // whether to load the payload or not
+          // GSI: I suppose we could toString the payload, but I don't think that
+          // would be a good idea
+          Explanation payloadExpl = new Explanation(scorer.getPayloadScore(), "scorePayload(...)");
+          payloadExpl.setValue(scorer.getPayloadScore());
+          // combined
+          ComplexExplanation result = new ComplexExplanation();
+          result.addDetail(expl);
+          result.addDetail(payloadExpl);
+          result.setValue(expl.getValue() * payloadExpl.getValue());
+          result.setDescription("btq, product of:");
+          result.setMatch(expl.getValue() == 0 ? Boolean.FALSE : Boolean.TRUE); // LUCENE-1303
+          return result;
+        }
       }
-
+      
+      return new ComplexExplanation(false, 0.0f, "no matching term");
     }
   }
 
diff --git a/lucene/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java b/lucene/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java
index 865e2b1..a393b38 100644
--- a/lucene/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java
+++ b/lucene/src/java/org/apache/lucene/search/spans/SpanMultiTermQueryWrapper.java
@@ -27,7 +27,7 @@
 import org.apache.lucene.search.TopTermsRewrite;
 import org.apache.lucene.search.ScoringRewrite;
 import org.apache.lucene.search.BooleanClause.Occur; // javadocs only
-import org.apache.lucene.util.PerReaderTermState;
+import org.apache.lucene.util.TermContext;
 
 /**
  * Wraps any {@link MultiTermQuery} as a {@link SpanQuery}, 
@@ -155,7 +155,7 @@
       }
     
       @Override
-      protected void addClause(SpanOrQuery topLevel, Term term, int docCount, float boost, PerReaderTermState states) {
+      protected void addClause(SpanOrQuery topLevel, Term term, int docCount, float boost, TermContext states) {
         final SpanTermQuery q = new SpanTermQuery(term);
         q.setBoost(boost);
         topLevel.addClause(q);
@@ -204,7 +204,7 @@
         }
 
         @Override
-        protected void addClause(SpanOrQuery topLevel, Term term, int docFreq, float boost, PerReaderTermState states) {
+        protected void addClause(SpanOrQuery topLevel, Term term, int docFreq, float boost, TermContext states) {
           final SpanTermQuery q = new SpanTermQuery(term);
           q.setBoost(boost);
           topLevel.addClause(q);
diff --git a/lucene/src/java/org/apache/lucene/search/spans/SpanScorer.java b/lucene/src/java/org/apache/lucene/search/spans/SpanScorer.java
index 8b309a3..9cce1f4 100644
--- a/lucene/src/java/org/apache/lucene/search/spans/SpanScorer.java
+++ b/lucene/src/java/org/apache/lucene/search/spans/SpanScorer.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 
 import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.TFIDFSimilarity;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Similarity;
@@ -29,22 +30,21 @@
  */
 public class SpanScorer extends Scorer {
   protected Spans spans;
-  protected byte[] norms;
-  protected float value;
 
   protected boolean more = true;
 
   protected int doc;
   protected float freq;
   protected final Similarity similarity;
+  protected final Similarity.SloppyDocScorer docScorer;
   
-  protected SpanScorer(Spans spans, Weight weight, Similarity similarity, byte[] norms)
+  protected SpanScorer(Spans spans, Weight weight, Similarity similarity, Similarity.SloppyDocScorer docScorer)
   throws IOException {
     super(weight);
     this.similarity = similarity;
+    this.docScorer = docScorer;
     this.spans = spans;
-    this.norms = norms;
-    this.value = weight.getValue();
+
     if (this.spans.next()) {
       doc = -1;
     } else {
@@ -94,27 +94,11 @@
 
   @Override
   public float score() throws IOException {
-    float raw = similarity.tf(freq) * value; // raw score
-    return norms == null? raw : raw * similarity.decodeNormValue(norms[doc]); // normalize
+    return docScorer.score(doc, freq);
   }
   
   @Override
   public float freq() throws IOException {
     return freq;
   }
-
-  /** This method is no longer an official member of {@link Scorer},
-   * but it is needed by SpanWeight to build an explanation. */
-  protected Explanation explain(final int doc) throws IOException {
-    Explanation tfExplanation = new Explanation();
-
-    int expDoc = advance(doc);
-
-    float phraseFreq = (expDoc == doc) ? freq : 0.0f;
-    tfExplanation.setValue(similarity.tf(phraseFreq));
-    tfExplanation.setDescription("tf(phraseFreq=" + phraseFreq + ")");
-
-    return tfExplanation;
-  }
-
 }
diff --git a/lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java b/lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java
index 104bacf..cf8bf4e 100644
--- a/lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java
+++ b/lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java
@@ -18,125 +18,76 @@
  */
 
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader.ReaderContext;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.search.*;
-import org.apache.lucene.search.Explanation.IDFExplanation;
+import org.apache.lucene.search.Similarity.SloppyDocScorer;
+import org.apache.lucene.util.TermContext;
 
 import java.io.IOException;
-import java.util.HashSet;
 import java.util.Set;
+import java.util.TreeSet;
 
 /**
  * Expert-only.  Public for use by other weight implementations
  */
 public class SpanWeight extends Weight {
   protected Similarity similarity;
-  protected float value;
-  protected float idf;
-  protected float queryNorm;
-  protected float queryWeight;
-
   protected Set<Term> terms;
   protected SpanQuery query;
-  private IDFExplanation idfExp;
+  protected Similarity.Stats stats;
 
   public SpanWeight(SpanQuery query, IndexSearcher searcher)
     throws IOException {
     this.similarity = searcher.getSimilarityProvider().get(query.getField());
     this.query = query;
     
-    terms=new HashSet<Term>();
+    terms=new TreeSet<Term>();
     query.extractTerms(terms);
-    
-    idfExp = similarity.idfExplain(terms, searcher);
-    idf = idfExp.getIdf();
+    final ReaderContext context = searcher.getTopReaderContext();
+    final TermContext states[] = new TermContext[terms.size()];
+    int i = 0;
+    for (Term term : terms)
+      states[i++] = TermContext.build(context, term, true);
+    stats = similarity.computeStats(searcher, query.getField(), query.getBoost(), states);
   }
 
   @Override
   public Query getQuery() { return query; }
 
   @Override
-  public float getValue() { return value; }
-
-  @Override
-  public float sumOfSquaredWeights() throws IOException {
-    queryWeight = idf * query.getBoost();         // compute query weight
-    return queryWeight * queryWeight;             // square it
+  public float getValueForNormalization() throws IOException {
+    return stats.getValueForNormalization();
   }
 
   @Override
-  public void normalize(float queryNorm) {
-    this.queryNorm = queryNorm;
-    queryWeight *= queryNorm;                     // normalize query weight
-    value = queryWeight * idf;                    // idf for document
+  public void normalize(float queryNorm, float topLevelBoost) {
+    stats.normalize(queryNorm, topLevelBoost);
   }
 
   @Override
   public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
-    return new SpanScorer(query.getSpans(context), this, similarity, context.reader
-        .norms(query.getField()));
+    return new SpanScorer(query.getSpans(context), this, similarity, similarity.sloppyDocScorer(stats, query.getField(), context));
   }
 
   @Override
-  public Explanation explain(AtomicReaderContext context, int doc)
-    throws IOException {
-
-    ComplexExplanation result = new ComplexExplanation();
-    result.setDescription("weight("+getQuery()+" in "+doc+"), product of:");
-    String field = ((SpanQuery)getQuery()).getField();
-
-    Explanation idfExpl =
-      new Explanation(idf, "idf(" + field + ": " + idfExp.explain() + ")");
-
-    // explain query weight
-    Explanation queryExpl = new Explanation();
-    queryExpl.setDescription("queryWeight(" + getQuery() + "), product of:");
-
-    Explanation boostExpl = new Explanation(getQuery().getBoost(), "boost");
-    if (getQuery().getBoost() != 1.0f)
-      queryExpl.addDetail(boostExpl);
-    queryExpl.addDetail(idfExpl);
-
-    Explanation queryNormExpl = new Explanation(queryNorm,"queryNorm");
-    queryExpl.addDetail(queryNormExpl);
-
-    queryExpl.setValue(boostExpl.getValue() *
-                       idfExpl.getValue() *
-                       queryNormExpl.getValue());
-
-    result.addDetail(queryExpl);
-
-    // explain field weight
-    ComplexExplanation fieldExpl = new ComplexExplanation();
-    fieldExpl.setDescription("fieldWeight("+field+":"+query.toString(field)+
-                             " in "+doc+"), product of:");
-
-    Explanation tfExpl = ((SpanScorer)scorer(context, ScorerContext.def())).explain(doc);
-    fieldExpl.addDetail(tfExpl);
-    fieldExpl.addDetail(idfExpl);
-
-    Explanation fieldNormExpl = new Explanation();
-    byte[] fieldNorms = context.reader.norms(field);
-    float fieldNorm =
-      fieldNorms!=null ? similarity.decodeNormValue(fieldNorms[doc]) : 1.0f;
-    fieldNormExpl.setValue(fieldNorm);
-    fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
-    fieldExpl.addDetail(fieldNormExpl);
-
-    fieldExpl.setMatch(Boolean.valueOf(tfExpl.isMatch()));
-    fieldExpl.setValue(tfExpl.getValue() *
-                       idfExpl.getValue() *
-                       fieldNormExpl.getValue());
-
-    result.addDetail(fieldExpl);
-    result.setMatch(fieldExpl.getMatch());
-
-    // combine them
-    result.setValue(queryExpl.getValue() * fieldExpl.getValue());
-
-    if (queryExpl.getValue() == 1.0f)
-      return fieldExpl;
-
-    return result;
+  public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+    Scorer scorer = scorer(context, ScorerContext.def());
+    if (scorer != null) {
+      int newDoc = scorer.advance(doc);
+      if (newDoc == doc) {
+        float freq = scorer.freq();
+        SloppyDocScorer docScorer = similarity.sloppyDocScorer(stats, query.getField(), context);
+        ComplexExplanation result = new ComplexExplanation();
+        result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
+        Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "phraseFreq=" + freq));
+        result.addDetail(scoreExplanation);
+        result.setValue(scoreExplanation.getValue());
+        result.setMatch(true);          
+        return result;
+      }
+    }
+    
+    return new ComplexExplanation(false, 0.0f, "no matching term");
   }
 }
diff --git a/lucene/src/java/org/apache/lucene/store/CompoundFileDirectory.java b/lucene/src/java/org/apache/lucene/store/CompoundFileDirectory.java
index 371425f..92c5256 100644
--- a/lucene/src/java/org/apache/lucene/store/CompoundFileDirectory.java
+++ b/lucene/src/java/org/apache/lucene/store/CompoundFileDirectory.java
@@ -60,7 +60,7 @@
    * NOTE: subclasses must call {@link #initForRead(Map)} before the directory can be used.
    */
   public CompoundFileDirectory(Directory directory, String fileName, IOContext context) throws IOException {
-    assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
+
     this.directory = directory;
     this.fileName = fileName;
     this.readBufferSize = BufferedIndexInput.bufferSize(context);
@@ -75,9 +75,11 @@
   }
   
   protected final void initForWrite() {
+    assert !(directory instanceof CompoundFileDirectory) : "compound file inside of compound file: " + fileName;
     this.entries = SENTINEL;
     this.openForWrite = true;
     this.isOpen = true;
+    writer = new CompoundFileWriter(directory, fileName);
   }
   
   /** Helper method that reads CFS entries from an input stream */
@@ -173,7 +175,11 @@
   
   @Override
   public synchronized void close() throws IOException {
-    ensureOpen();
+    if (!isOpen) {
+      // allow double close - usually to be consistent with other closeables
+      assert entries == null; 
+      return; // already closed
+     }
     entries = null;
     isOpen = false;
     if (writer != null) {
@@ -263,7 +269,6 @@
   @Override
   public IndexOutput createOutput(String name, IOContext context) throws IOException {
     ensureOpen();
-    initWriter();
     return writer.createOutput(name, context);
   }
   
@@ -279,12 +284,13 @@
     throw new UnsupportedOperationException();
   }
   
-  /** Not implemented
-   * @throws UnsupportedOperationException */
   @Override
-  public final CompoundFileDirectory openCompoundInput(String name, IOContext context) throws IOException {
-    // NOTE: final to make nested compounding impossible.
-    throw new UnsupportedOperationException();
+  public CompoundFileDirectory openCompoundInput(String name, IOContext context) throws IOException {
+    FileEntry fileEntry = this.entries.get(IndexFileNames.stripSegmentName(name));
+    if (fileEntry == null) {
+      throw new FileNotFoundException("file " + name + " does not exists in this CFS");
+    }
+    return new NestedCompoundFileDirectory(name, context, fileEntry.offset, fileEntry.length);
   }
   
   /** Not implemented
@@ -292,16 +298,36 @@
   @Override
   public CompoundFileDirectory createCompoundOutput(String name, IOContext context)
       throws IOException {
-    // NOTE: final to make nested compounding impossible.
-    throw new UnsupportedOperationException();
+    throw new UnsupportedOperationException("can not create nested CFS, create seperately and use Directory.copy instead");
+  }
+   
+  private class NestedCompoundFileDirectory extends CompoundFileDirectory {
+
+    private final long cfsOffset;
+    private final long cfsLength;
+
+    public NestedCompoundFileDirectory(String fileName, IOContext context, long offset, long length)
+        throws IOException {
+      super(directory, fileName, context);
+      this.cfsOffset = offset;
+      this.cfsLength = length;
+      IndexInput input = null;
+      try {
+        input = CompoundFileDirectory.this.openInput(fileName, IOContext.READONCE);
+        initForRead(CompoundFileDirectory.readEntries(input,
+            CompoundFileDirectory.this, fileName));
+      } finally {
+        IOUtils.closeSafely(false, input);
+      }
+    }
+
+    @Override
+    public IndexInput openInputSlice(String id, long offset, long length,
+        int readBufferSize) throws IOException {
+      assert offset + length <= cfsLength; 
+      return CompoundFileDirectory.this.openInputSlice(id, cfsOffset + offset, length, readBufferSize);
+    }
+    
   }
   
-  private final void initWriter() {
-    assert openForWrite;
-    assert entries == SENTINEL;
-    if (writer == null) {
-      writer = new CompoundFileWriter(directory, fileName);
-    }
-  }
- 
 }
diff --git a/lucene/src/java/org/apache/lucene/store/CompoundFileWriter.java b/lucene/src/java/org/apache/lucene/store/CompoundFileWriter.java
index 7587a98..cd56d8f 100644
--- a/lucene/src/java/org/apache/lucene/store/CompoundFileWriter.java
+++ b/lucene/src/java/org/apache/lucene/store/CompoundFileWriter.java
@@ -17,6 +17,7 @@
  * limitations under the License.
  */
 
+import java.io.Closeable;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Collection;
@@ -55,7 +56,7 @@
  * 
  * @lucene.internal
  */
-final class CompoundFileWriter {
+final class CompoundFileWriter implements Closeable{
 
   private static final class FileEntry {
     /** source file */
@@ -89,8 +90,8 @@
   private boolean closed = false;
   private volatile IndexOutput dataOut;
   private final AtomicBoolean outputTaken = new AtomicBoolean(false);
-  private final String entryTableName;
-  private final String dataFileName;
+  final String entryTableName;
+  final String dataFileName;
 
   /**
    * Create the compound stream in the specified file. The file name is the
@@ -128,17 +129,14 @@
    *           if close() had been called before or if no file has been added to
    *           this object
    */
-  void close() throws IOException {
+  public void close() throws IOException {
     if (closed) {
       throw new IllegalStateException("already closed");
     }
     IOException priorException = null;
     IndexOutput entryTableOut = null;
     try {
-      if (entries.isEmpty()) {
-        throw new IllegalStateException("CFS has no entries");
-      }
-      
+      initDataOut(IOContext.DEFAULT);
       if (!pendingEntries.isEmpty() || outputTaken.get()) {
         throw new IllegalStateException("CFS has pending open files");
       }
@@ -147,12 +145,18 @@
       assert dataOut != null;
       long finalLength = dataOut.getFilePointer();
       assert assertFileLength(finalLength, dataOut);
+    } catch (IOException e) {
+      priorException = e;
+    } finally {
+      IOUtils.closeSafely(priorException, dataOut);
+    }
+    try {
       entryTableOut = directory.createOutput(entryTableName, IOContext.DEFAULT);
       writeEntryTable(entries.values(), entryTableOut);
     } catch (IOException e) {
       priorException = e;
     } finally {
-      IOUtils.closeSafely(priorException, dataOut, entryTableOut);
+      IOUtils.closeSafely(priorException, entryTableOut);
     }
   }
 
@@ -321,6 +325,7 @@
         closed = true;
         entry.length = writtenBytes;
         if (isSeparate) {
+          delegate.close();
           // we are a separate file - push into the pending entries
           pendingEntries.add(entry);
         } else {
diff --git a/lucene/src/java/org/apache/lucene/util/PerReaderTermState.java b/lucene/src/java/org/apache/lucene/util/TermContext.java
similarity index 72%
rename from lucene/src/java/org/apache/lucene/util/PerReaderTermState.java
rename to lucene/src/java/org/apache/lucene/util/TermContext.java
index a5139b6..746405c 100644
--- a/lucene/src/java/org/apache/lucene/util/PerReaderTermState.java
+++ b/lucene/src/java/org/apache/lucene/util/TermContext.java
@@ -28,25 +28,27 @@
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
 import org.apache.lucene.index.IndexReader.ReaderContext;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
 
 /**
  * Maintains a {@link IndexReader} {@link TermState} view over
  * {@link IndexReader} instances containing a single term. The
- * {@link PerReaderTermState} doesn't track if the given {@link TermState}
+ * {@link TermContext} doesn't track if the given {@link TermState}
  * objects are valid, neither if the {@link TermState} instances refer to the
  * same terms in the associated readers.
  * 
  * @lucene.experimental
  */
-public final class PerReaderTermState {
+public final class TermContext {
   public final ReaderContext topReaderContext; // for asserting!
   private final TermState[] states;
   private int docFreq;
+  private long totalTermFreq;
 
   /**
-   * Creates an empty {@link PerReaderTermState} from a {@link ReaderContext}
+   * Creates an empty {@link TermContext} from a {@link ReaderContext}
    */
-  public PerReaderTermState(ReaderContext context) {
+  public TermContext(ReaderContext context) {
     assert context != null && context.isTopLevel;
     topReaderContext = context;
     docFreq = 0;
@@ -60,28 +62,28 @@
   }
   
   /**
-   * Creates a {@link PerReaderTermState} with an initial {@link TermState},
+   * Creates a {@link TermContext} with an initial {@link TermState},
    * {@link IndexReader} pair.
    */
-  public PerReaderTermState(ReaderContext context, TermState state, int ord, int docFreq) {
+  public TermContext(ReaderContext context, TermState state, int ord, int docFreq, long totalTermFreq) {
     this(context);
-    register(state, ord, docFreq);
+    register(state, ord, docFreq, totalTermFreq);
   }
 
   /**
-   * Creates a {@link PerReaderTermState} from a top-level {@link ReaderContext} and the
+   * Creates a {@link TermContext} from a top-level {@link ReaderContext} and the
    * given {@link Term}. This method will lookup the given term in all context's leaf readers 
-   * and register each of the readers containing the term in the returned {@link PerReaderTermState}
+   * and register each of the readers containing the term in the returned {@link TermContext}
    * using the leaf reader's ordinal.
    * <p>
    * Note: the given context must be a top-level context.
    */
-  public static PerReaderTermState build(ReaderContext context, Term term, boolean cache)
+  public static TermContext build(ReaderContext context, Term term, boolean cache)
       throws IOException {
     assert context != null && context.isTopLevel;
     final String field = term.field();
     final BytesRef bytes = term.bytes();
-    final PerReaderTermState perReaderTermState = new PerReaderTermState(context);
+    final TermContext perReaderTermState = new TermContext(context);
     final AtomicReaderContext[] leaves = ReaderUtil.leaves(context);
     for (int i = 0; i < leaves.length; i++) {
       final Fields fields = leaves[i].reader.fields();
@@ -91,7 +93,7 @@
           final TermsEnum termsEnum = terms.getThreadTermsEnum(); // thread-private don't share!
           if (termsEnum.seekExact(bytes, cache)) { 
             final TermState termState = termsEnum.termState();
-            perReaderTermState.register(termState, leaves[i].ord, termsEnum.docFreq());
+            perReaderTermState.register(termState, leaves[i].ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
           }
         }
       }
@@ -100,7 +102,7 @@
   }
 
   /**
-   * Clears the {@link PerReaderTermState} internal state and removes all
+   * Clears the {@link TermContext} internal state and removes all
    * registered {@link TermState}s
    */
   public void clear() {
@@ -112,12 +114,16 @@
    * Registers and associates a {@link TermState} with an leaf ordinal. The leaf ordinal
    * should be derived from a {@link ReaderContext}'s leaf ord.
    */
-  public void register(TermState state, final int ord, final int docFreq) {
+  public void register(TermState state, final int ord, final int docFreq, final long totalTermFreq) {
     assert state != null : "state must not be null";
     assert ord >= 0 && ord < states.length;
     assert states[ord] == null : "state for ord: " + ord
         + " already registered";
     this.docFreq += docFreq;
+    if (this.totalTermFreq >= 0 && totalTermFreq >= 0)
+      this.totalTermFreq += totalTermFreq;
+    else
+      this.totalTermFreq = -1;
     states[ord] = state;
   }
 
@@ -137,11 +143,27 @@
 
   /**
    *  Returns the accumulated document frequency of all {@link TermState}
-   *         instances passed to {@link #register(TermState, int, int)}.
+   *         instances passed to {@link #register(TermState, int, int, long)}.
    * @return the accumulated document frequency of all {@link TermState}
-   *         instances passed to {@link #register(TermState, int, int)}.
+   *         instances passed to {@link #register(TermState, int, int, long)}.
    */
   public int docFreq() {
     return docFreq;
   }
+  
+  /**
+   *  Returns the accumulated term frequency of all {@link TermState}
+   *         instances passed to {@link #register(TermState, int, int, long)}.
+   * @return the accumulated term frequency of all {@link TermState}
+   *         instances passed to {@link #register(TermState, int, int, long)}.
+   */
+  public long totalTermFreq() {
+    return totalTermFreq;
+  }
+  
+  /** expert: only available for queries that want to lie about docfreq
+   * @lucene.internal */
+  public void setDocFreq(int docFreq) {
+    this.docFreq = docFreq;
+  }
 }
\ No newline at end of file
diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockFixedIntBlockCodec.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockFixedIntBlockCodec.java
index aa3c799..e665e82 100644
--- a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockFixedIntBlockCodec.java
+++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockFixedIntBlockCodec.java
@@ -32,6 +32,7 @@
 import org.apache.lucene.index.codecs.sep.IntIndexOutput;
 import org.apache.lucene.index.codecs.sep.SepPostingsReaderImpl;
 import org.apache.lucene.index.codecs.sep.SepPostingsWriterImpl;
+import org.apache.lucene.index.codecs.standard.StandardCodec;
 import org.apache.lucene.index.codecs.intblock.FixedIntBlockIndexInput;
 import org.apache.lucene.index.codecs.intblock.FixedIntBlockIndexOutput;
 import org.apache.lucene.index.codecs.DefaultDocValuesProducer;
@@ -46,7 +47,6 @@
 import org.apache.lucene.index.codecs.BlockTermsWriter;
 import org.apache.lucene.index.codecs.TermsIndexReaderBase;
 import org.apache.lucene.index.codecs.TermsIndexWriterBase;
-import org.apache.lucene.index.codecs.standard.StandardCodec;
 import org.apache.lucene.store.*;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
@@ -62,8 +62,8 @@
   private final int blockSize;
 
   public MockFixedIntBlockCodec(int blockSize) {
+    super("MockFixedIntBlock");
     this.blockSize = blockSize;
-    name = "MockFixedIntBlock";
   }
 
   @Override
@@ -207,7 +207,7 @@
     SepPostingsReaderImpl.files(segmentInfo, codecId, files);
     BlockTermsReader.files(dir, segmentInfo, codecId, files);
     FixedGapTermsIndexReader.files(dir, segmentInfo, codecId, files);
-    DefaultDocValuesConsumer.files(dir, segmentInfo, codecId, files);
+    DefaultDocValuesConsumer.files(dir, segmentInfo, codecId, files, getDocValuesUseCFS());
   }
 
   @Override
@@ -215,16 +215,16 @@
     SepPostingsWriterImpl.getExtensions(extensions);
     BlockTermsReader.getExtensions(extensions);
     FixedGapTermsIndexReader.getIndexExtensions(extensions);
-    DefaultDocValuesConsumer.getDocValuesExtensions(extensions);
+    DefaultDocValuesConsumer.getDocValuesExtensions(extensions, getDocValuesUseCFS());
   }
   
   @Override
   public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-    return new DefaultDocValuesConsumer(state, BytesRef.getUTF8SortedAsUnicodeComparator());
+    return new DefaultDocValuesConsumer(state, getDocValuesSortComparator(), getDocValuesUseCFS());
   }
 
   @Override
   public PerDocValues docsProducer(SegmentReadState state) throws IOException {
-    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, state.context);
+    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, getDocValuesUseCFS(), getDocValuesSortComparator(), state.context);
   }
 }
diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockVariableIntBlockCodec.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockVariableIntBlockCodec.java
index 3ba849c..6d15b92 100644
--- a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockVariableIntBlockCodec.java
+++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockintblock/MockVariableIntBlockCodec.java
@@ -32,6 +32,7 @@
 import org.apache.lucene.index.codecs.sep.IntIndexOutput;
 import org.apache.lucene.index.codecs.sep.SepPostingsReaderImpl;
 import org.apache.lucene.index.codecs.sep.SepPostingsWriterImpl;
+import org.apache.lucene.index.codecs.standard.StandardCodec;
 import org.apache.lucene.index.codecs.intblock.VariableIntBlockIndexInput;
 import org.apache.lucene.index.codecs.intblock.VariableIntBlockIndexOutput;
 import org.apache.lucene.index.codecs.DefaultDocValuesProducer;
@@ -46,7 +47,6 @@
 import org.apache.lucene.index.codecs.BlockTermsWriter;
 import org.apache.lucene.index.codecs.TermsIndexReaderBase;
 import org.apache.lucene.index.codecs.TermsIndexWriterBase;
-import org.apache.lucene.index.codecs.standard.StandardCodec;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexInput;
@@ -63,9 +63,9 @@
 
 public class MockVariableIntBlockCodec extends Codec {
   private final int baseBlockSize;
-
+  
   public MockVariableIntBlockCodec(int baseBlockSize) {
-    name = "MockVariableIntBlock";
+    super("MockVariableIntBlock");
     this.baseBlockSize = baseBlockSize;
   }
 
@@ -230,7 +230,7 @@
     SepPostingsReaderImpl.files(segmentInfo, codecId, files);
     BlockTermsReader.files(dir, segmentInfo, codecId, files);
     FixedGapTermsIndexReader.files(dir, segmentInfo, codecId, files);
-    DefaultDocValuesConsumer.files(dir, segmentInfo, codecId, files);
+    DefaultDocValuesConsumer.files(dir, segmentInfo, codecId, files, getDocValuesUseCFS());
   }
 
   @Override
@@ -238,16 +238,16 @@
     SepPostingsWriterImpl.getExtensions(extensions);
     BlockTermsReader.getExtensions(extensions);
     FixedGapTermsIndexReader.getIndexExtensions(extensions);
-    DefaultDocValuesConsumer.getDocValuesExtensions(extensions);
+    DefaultDocValuesConsumer.getDocValuesExtensions(extensions, getDocValuesUseCFS());
   }
   
   @Override
   public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-    return new DefaultDocValuesConsumer(state, BytesRef.getUTF8SortedAsUnicodeComparator());
+    return new DefaultDocValuesConsumer(state, getDocValuesSortComparator(), getDocValuesUseCFS());
   }
 
   @Override
   public PerDocValues docsProducer(SegmentReadState state) throws IOException {
-    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, state.context);
+    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, getDocValuesUseCFS(), getDocValuesSortComparator(), state.context);
   }
 }
diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java
index d147694..c15865c 100644
--- a/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java
+++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/mockrandom/MockRandomCodec.java
@@ -76,9 +76,9 @@
 
   private final Random seedRandom;
   private final String SEED_EXT = "sd";
-
+  
   public MockRandomCodec(Random random) {
-    name = "MockRandom";
+    super("MockRandom");
     this.seedRandom = new Random(random.nextLong());
   }
 
@@ -355,7 +355,7 @@
     BlockTermsReader.files(dir, segmentInfo, codecId, files);
     FixedGapTermsIndexReader.files(dir, segmentInfo, codecId, files);
     VariableGapTermsIndexReader.files(dir, segmentInfo, codecId, files);
-    DefaultDocValuesConsumer.files(dir, segmentInfo, codecId, files);
+    DefaultDocValuesConsumer.files(dir, segmentInfo, codecId, files, getDocValuesUseCFS());
     // hackish!
     Iterator<String> it = files.iterator();
     while(it.hasNext()) {
@@ -373,7 +373,7 @@
     BlockTermsReader.getExtensions(extensions);
     FixedGapTermsIndexReader.getIndexExtensions(extensions);
     VariableGapTermsIndexReader.getIndexExtensions(extensions);
-    DefaultDocValuesConsumer.getDocValuesExtensions(extensions);
+    DefaultDocValuesConsumer.getDocValuesExtensions(extensions, getDocValuesUseCFS());
     extensions.add(SEED_EXT);
     //System.out.println("MockRandom.getExtensions return " + extensions);
   }
@@ -381,11 +381,11 @@
   // can we make this more evil?
   @Override
   public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-    return new DefaultDocValuesConsumer(state, BytesRef.getUTF8SortedAsUnicodeComparator());
+    return new DefaultDocValuesConsumer(state, getDocValuesSortComparator(), getDocValuesUseCFS());
   }
 
   @Override
   public PerDocValues docsProducer(SegmentReadState state) throws IOException {
-    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, state.context);
+    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, getDocValuesUseCFS(), getDocValuesSortComparator(), state.context);
   }
 }
diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSepCodec.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSepCodec.java
index 7454cab..30cd364 100644
--- a/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSepCodec.java
+++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/mocksep/MockSepCodec.java
@@ -54,7 +54,7 @@
 public class MockSepCodec extends Codec {
 
   public MockSepCodec() {
-    name = "MockSep";
+    super("MockSep");
   }
 
   @Override
@@ -139,13 +139,13 @@
     SepPostingsReaderImpl.files(segmentInfo, codecId, files);
     BlockTermsReader.files(dir, segmentInfo, codecId, files);
     FixedGapTermsIndexReader.files(dir, segmentInfo, codecId, files);
-    DefaultDocValuesConsumer.files(dir, segmentInfo, codecId, files);
+    DefaultDocValuesConsumer.files(dir, segmentInfo, codecId, files, getDocValuesUseCFS());
   }
 
   @Override
   public void getExtensions(Set<String> extensions) {
     getSepExtensions(extensions);
-    DefaultDocValuesConsumer.getDocValuesExtensions(extensions);
+    DefaultDocValuesConsumer.getDocValuesExtensions(extensions, getDocValuesUseCFS());
   }
 
   public static void getSepExtensions(Set<String> extensions) {
@@ -156,11 +156,11 @@
   
   @Override
   public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-    return new DefaultDocValuesConsumer(state, BytesRef.getUTF8SortedAsUnicodeComparator());
+    return new DefaultDocValuesConsumer(state, getDocValuesSortComparator(), getDocValuesUseCFS());
   }
 
   @Override
   public PerDocValues docsProducer(SegmentReadState state) throws IOException {
-    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, state.context);
+    return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, getDocValuesUseCFS(), getDocValuesSortComparator(), state.context);
   }
 }
diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/preflexrw/PreFlexRWCodec.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/preflexrw/PreFlexRWCodec.java
index d2edcca..f911ef2 100644
--- a/lucene/src/test-framework/org/apache/lucene/index/codecs/preflexrw/PreFlexRWCodec.java
+++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/preflexrw/PreFlexRWCodec.java
@@ -37,7 +37,6 @@
   public PreFlexRWCodec() {
     // NOTE: we impersonate the PreFlex codec so that it can
     // read the segments we write!
-    super();
   }
   
   @Override
diff --git a/lucene/src/test-framework/org/apache/lucene/search/AssertingIndexSearcher.java b/lucene/src/test-framework/org/apache/lucene/search/AssertingIndexSearcher.java
index 4154126..3573ffc 100644
--- a/lucene/src/test-framework/org/apache/lucene/search/AssertingIndexSearcher.java
+++ b/lucene/src/test-framework/org/apache/lucene/search/AssertingIndexSearcher.java
@@ -62,12 +62,7 @@
       }
 
       @Override
-      public float getValue() {
-        return w.getValue();
-      }
-
-      @Override
-      public void normalize(float norm) {
+      public void normalize(float norm, float topLevelBoost) {
         throw new IllegalStateException("Weight already normalized.");
       }
 
@@ -77,7 +72,7 @@
       }
 
       @Override
-      public float sumOfSquaredWeights() throws IOException {
+      public float getValueForNormalization() throws IOException {
         throw new IllegalStateException("Weight already normalized.");
       }
 
diff --git a/lucene/src/test-framework/org/apache/lucene/search/CheckHits.java b/lucene/src/test-framework/org/apache/lucene/search/CheckHits.java
index 6f1d333..3636255 100644
--- a/lucene/src/test-framework/org/apache/lucene/search/CheckHits.java
+++ b/lucene/src/test-framework/org/apache/lucene/search/CheckHits.java
@@ -329,9 +329,10 @@
     Explanation detail[] = expl.getDetails();
     if (detail!=null) {
       if (detail.length==1) {
-        // simple containment, no matter what the description says, 
+        // simple containment, unless its a freq of: (which lets a query explain how the freq is calculated), 
         // just verify contained expl has same score
-        verifyExplanation(q,doc,score,deep,detail[0]);
+        if (!expl.getDescription().endsWith("with freq of:"))
+          verifyExplanation(q,doc,score,deep,detail[0]);
       } else {
         // explanation must either:
         // - end with one of: "product of:", "sum of:", "max of:", or
@@ -357,6 +358,7 @@
             }
           }
         }
+        // TODO: this is a TERRIBLE assertion!!!!
         Assert.assertTrue(
             q+": multi valued explanation description=\""+descr
             +"\" must be 'max of plus x times others' or end with 'product of'"
diff --git a/lucene/src/test-framework/org/apache/lucene/store/MockCompoundFileDirectoryWrapper.java b/lucene/src/test-framework/org/apache/lucene/store/MockCompoundFileDirectoryWrapper.java
index c7a592e..cc46535 100644
--- a/lucene/src/test-framework/org/apache/lucene/store/MockCompoundFileDirectoryWrapper.java
+++ b/lucene/src/test-framework/org/apache/lucene/store/MockCompoundFileDirectoryWrapper.java
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 import java.util.Collection;
-import java.util.Collections;
 
 public class MockCompoundFileDirectoryWrapper extends CompoundFileDirectory {
   private final MockDirectoryWrapper parent;
@@ -31,11 +30,7 @@
     this.name = name;
     this.parent = parent;
     this.delegate = delegate;
-    if (forWrite) {
-      super.initForWrite();
-    } else {
-      super.initForRead(Collections.<String,FileEntry>emptyMap());
-    }
+    // don't initialize here since we delegate everything - if not initialized a direct call will cause an assert to fail!
     parent.addFileHandle(this, name, !forWrite);
   }
   
@@ -51,12 +46,8 @@
 
   @Override
   public synchronized void close() throws IOException {
-    try {
-      delegate.close();
-      parent.removeOpenFile(this, name);
-    } finally {
-      super.close();
-    }
+    delegate.close();
+    parent.removeOpenFile(this, name);
   }
 
   @Override
@@ -148,4 +139,11 @@
   public CompoundFileDirectory createCompoundOutput(String name, IOContext context) throws IOException {
     return delegate.createCompoundOutput(name, context);
   }
+
+  @Override
+  public CompoundFileDirectory openCompoundInput(String name, IOContext context)
+      throws IOException {
+    return delegate.openCompoundInput(name, context);
+  }
+
 }
diff --git a/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java b/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
index cee7d54..c8a016f 100644
--- a/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
@@ -242,7 +242,7 @@
     if (prior != null) {
       cp.unregister(prior);
     }
-    cp.register(c);
+    cp.register(randomizCodec(random, c));
   }
 
   // returns current default codec
@@ -280,7 +280,7 @@
     }
 
     swapCodec(new MockSepCodec(), cp);
-    swapCodec(new PulsingCodec(codecHasParam && "Pulsing".equals(codec) ? codecParam : _TestUtil.nextInt(random, 1, 20)), cp);
+    swapCodec(new PulsingCodec(codecHasParam && "Pulsing".equals(codec) ? codecParam : 1 + random.nextInt(20)), cp);
     swapCodec(new MockFixedIntBlockCodec(codecHasParam && "MockFixedIntBlock".equals(codec) ? codecParam : _TestUtil.nextInt(random, 1, 2000)), cp);
     // baseBlockSize cannot be over 127:
     swapCodec(new MockVariableIntBlockCodec(codecHasParam && "MockVariableIntBlock".equals(codec) ? codecParam : _TestUtil.nextInt(random, 1, 127)), cp);
@@ -288,6 +288,11 @@
 
     return cp.lookup(codec);
   }
+  
+  public static Codec randomizCodec(Random random, Codec codec) {
+    codec.setDocValuesUseCFS(random.nextBoolean());
+    return codec;
+  }
 
   // returns current PreFlex codec
   static void removeTestCodecs(Codec codec, CodecProvider cp) {
@@ -1493,11 +1498,11 @@
 
     RandomCodecProvider(Random random) {
       this.perFieldSeed = random.nextInt();
-      register(new StandardCodec());
-      register(new PreFlexCodec());
-      register(new PulsingCodec(1));
-      register(new SimpleTextCodec());
-      register(new MemoryCodec());
+      register(randomizCodec(random, new StandardCodec()));
+      register(randomizCodec(random, new PreFlexCodec()));
+      register(randomizCodec(random, new PulsingCodec( 1 + random.nextInt(20))));
+      register(randomizCodec(random, new SimpleTextCodec()));
+      register(randomizCodec(random, new MemoryCodec()));
       Collections.shuffle(knownCodecs, random);
     }
 
diff --git a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
index c3625ab..50485853 100644
--- a/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
+++ b/lucene/src/test/org/apache/lucene/TestExternalCodecs.java
@@ -24,8 +24,6 @@
 import org.apache.lucene.search.*;
 import org.apache.lucene.analysis.*;
 import org.apache.lucene.index.codecs.*;
-import org.apache.lucene.index.codecs.standard.*;
-import org.apache.lucene.index.codecs.pulsing.*;
 import org.apache.lucene.store.*;
 import java.util.*;
 import java.io.*;
@@ -75,7 +73,7 @@
   public static class RAMOnlyCodec extends Codec {
     
     public RAMOnlyCodec() {
-      name = "RamOnly";
+      super("RamOnly");
     }
     // Postings state:
     static class RAMPostings extends FieldsProducer {
diff --git a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
index 7dc161e..a2c4ec4 100755
--- a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
+++ b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
@@ -1161,7 +1161,7 @@
       IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
           new MockAnalyzer(random));
       CodecProvider provider = new CodecProvider();
-      provider.register(new PulsingCodec(1 + random.nextInt(10)));
+      provider.register(new PulsingCodec(1 + random.nextInt(20)));
       conf.setCodecProvider(provider);
       IndexWriter w = new IndexWriter(dir, conf);
       try {
@@ -1182,7 +1182,7 @@
       IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
           new MockAnalyzer(random));
       CodecProvider provider = new CodecProvider();
-      provider.register(new PulsingCodec(1 + random.nextInt(10)));
+      provider.register(new PulsingCodec(1 + random.nextInt(20)));
       conf.setCodecProvider(provider);
       IndexWriter w = new IndexWriter(dir, conf);
       IndexReader indexReader = IndexReader.open(toAdd);
diff --git a/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index 44bff09..5a629da 100644
--- a/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -38,7 +38,6 @@
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.ScoreDoc;
-import org.apache.lucene.search.Similarity;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.store.CompoundFileDirectory;
 import org.apache.lucene.store.Directory;
@@ -375,7 +374,8 @@
     Term searchTerm = new Term("id", "6");
     int delCount = reader.deleteDocuments(searchTerm);
     assertEquals("wrong delete count", 1, delCount);
-    reader.setNorm(searcher.search(new TermQuery(new Term("id", "22")), 10).scoreDocs[0].doc, "content", searcher.getSimilarityProvider().get("content").encodeNormValue(2.0f));
+    DefaultSimilarity sim = new DefaultSimilarity();
+    reader.setNorm(searcher.search(new TermQuery(new Term("id", "22")), 10).scoreDocs[0].doc, "content", sim.encodeNormValue(2.0f));
     reader.close();
     searcher.close();
 
@@ -421,7 +421,8 @@
     Term searchTerm = new Term("id", "6");
     int delCount = reader.deleteDocuments(searchTerm);
     assertEquals("wrong delete count", 1, delCount);
-    reader.setNorm(22, "content", searcher.getSimilarityProvider().get("content").encodeNormValue(2.0f));
+    DefaultSimilarity sim = new DefaultSimilarity();
+    reader.setNorm(22, "content", sim.encodeNormValue(2.0f));
     reader.close();
 
     // make sure they "took":
@@ -483,7 +484,8 @@
       assertEquals("didn't delete the right number of documents", 1, delCount);
 
       // Set one norm so we get a .s0 file:
-      reader.setNorm(21, "content", conf.getSimilarityProvider().get("content").encodeNormValue(1.5f));
+      DefaultSimilarity sim = new DefaultSimilarity();
+      reader.setNorm(21, "content", sim.encodeNormValue(1.5f));
       reader.close();
     }
     
@@ -526,7 +528,7 @@
       assertEquals("didn't delete the right number of documents", 1, delCount);
 
       // Set one norm so we get a .s0 file:
-      Similarity sim = new DefaultSimilarity();
+      DefaultSimilarity sim = new DefaultSimilarity();
       reader.setNorm(21, "content", sim.encodeNormValue(1.5f));
       reader.close();
 
diff --git a/lucene/src/test/org/apache/lucene/index/TestCompoundFile.java b/lucene/src/test/org/apache/lucene/index/TestCompoundFile.java
index 262a1a3..f6acb3b 100644
--- a/lucene/src/test/org/apache/lucene/index/TestCompoundFile.java
+++ b/lucene/src/test/org/apache/lucene/index/TestCompoundFile.java
@@ -21,10 +21,9 @@
 import java.io.File;
 
 import org.apache.lucene.util.LuceneTestCase;
-import junit.framework.TestSuite;
-import junit.textui.TestRunner;
 
 import org.apache.lucene.store.CompoundFileDirectory;
+import org.apache.lucene.store.IOContext;
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IndexInput;
@@ -35,27 +34,8 @@
 
 public class TestCompoundFile extends LuceneTestCase
 {
-    /** Main for running test case by itself. */
-    public static void main(String args[]) {
-        TestRunner.run (new TestSuite(TestCompoundFile.class));
-//        TestRunner.run (new TestCompoundFile("testSingleFile"));
-//        TestRunner.run (new TestCompoundFile("testTwoFiles"));
-//        TestRunner.run (new TestCompoundFile("testRandomFiles"));
-//        TestRunner.run (new TestCompoundFile("testClonedStreamsClosing"));
-//        TestRunner.run (new TestCompoundFile("testReadAfterClose"));
-//        TestRunner.run (new TestCompoundFile("testRandomAccess"));
-//        TestRunner.run (new TestCompoundFile("testRandomAccessClones"));
-//        TestRunner.run (new TestCompoundFile("testFileNotFound"));
-//        TestRunner.run (new TestCompoundFile("testReadPastEOF"));
-
-//        TestRunner.run (new TestCompoundFile("testIWCreate"));
-
-    }
-
-
     private Directory dir;
 
-
     @Override
     public void setUp() throws Exception {
        super.setUp();
@@ -323,13 +303,13 @@
     throws IOException
     {
         // Setup the test file - we need more than 1024 bytes
-        IndexOutput os = fsdir.createOutput(file, newIOContext(random));
+        IndexOutput os = fsdir.createOutput(file, IOContext.DEFAULT);
         for(int i=0; i<2000; i++) {
             os.writeByte((byte) i);
         }
         os.close();
 
-        IndexInput in = fsdir.openInput(file, newIOContext(random));
+        IndexInput in = fsdir.openInput(file, IOContext.DEFAULT);
 
         // This read primes the buffer in IndexInput
         in.readByte();
@@ -717,5 +697,74 @@
     cfr.close();
     newDir.close();
   }
+  
+  public void testEmptyCFS() throws IOException {
+    Directory newDir = newDirectory();
+    CompoundFileDirectory csw = newDir.createCompoundOutput("d.cfs", newIOContext(random));
+    csw.close();
 
+    CompoundFileDirectory csr = newDir.openCompoundInput("d.cfs", newIOContext(random));
+    assertEquals(0, csr.listAll().length);
+    csr.close();
+
+    newDir.close();
+  }
+  
+  public void testReadNestedCFP() throws IOException {
+    Directory newDir = newDirectory();
+    CompoundFileDirectory csw = newDir.createCompoundOutput("d.cfs", newIOContext(random));
+    CompoundFileDirectory nested = newDir.createCompoundOutput("b.cfs", newIOContext(random));
+    IndexOutput out = nested.createOutput("b.xyz", newIOContext(random));
+    IndexOutput out1 = nested.createOutput("b_1.xyz", newIOContext(random));
+    out.writeInt(0);
+    out1.writeInt(1);
+    out.close();
+    out1.close();
+    nested.close();
+    newDir.copy(csw, "b.cfs", "b.cfs", newIOContext(random));
+    newDir.copy(csw, "b.cfe", "b.cfe", newIOContext(random));
+    newDir.deleteFile("b.cfs");
+    newDir.deleteFile("b.cfe");
+    csw.close();
+    
+    assertEquals(2, newDir.listAll().length);
+    csw = newDir.openCompoundInput("d.cfs", newIOContext(random));
+    
+    assertEquals(2, csw.listAll().length);
+    nested = csw.openCompoundInput("b.cfs", newIOContext(random));
+    
+    assertEquals(2, nested.listAll().length);
+    IndexInput openInput = nested.openInput("b.xyz", newIOContext(random));
+    assertEquals(0, openInput.readInt());
+    openInput.close();
+    openInput = nested.openInput("b_1.xyz", newIOContext(random));
+    assertEquals(1, openInput.readInt());
+    openInput.close();
+    nested.close();
+    csw.close();
+    newDir.close();
+  }
+  
+  public void testDoubleClose() throws IOException {
+    Directory newDir = newDirectory();
+    CompoundFileDirectory csw = newDir.createCompoundOutput("d.cfs", newIOContext(random));
+    IndexOutput out = csw.createOutput("d.xyz", newIOContext(random));
+    out.writeInt(0);
+    out.close();
+    
+    csw.close();
+    // close a second time - must have no effect according to Closeable
+    csw.close();
+    
+    csw = newDir.openCompoundInput("d.cfs", newIOContext(random));
+    IndexInput openInput = csw.openInput("d.xyz", newIOContext(random));
+    assertEquals(0, openInput.readInt());
+    openInput.close();
+    csw.close();
+    // close a second time - must have no effect according to Closeable
+    csw.close();
+    
+    newDir.close();
+    
+  }
 }
diff --git a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java
index f46fd29..9c4994e 100644
--- a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java
+++ b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java
@@ -27,6 +27,7 @@
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.DefaultSimilarity;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
@@ -655,7 +656,8 @@
         writer.close();
         IndexReader reader = IndexReader.open(dir, policy, false);
         reader.deleteDocument(3*i+1);
-        reader.setNorm(4*i+1, "content", conf.getSimilarityProvider().get("content").encodeNormValue(2.0F));
+        DefaultSimilarity sim = new DefaultSimilarity();
+        reader.setNorm(4*i+1, "content", sim.encodeNormValue(2.0F));
         IndexSearcher searcher = newSearcher(reader);
         ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
         assertEquals(16*(1+i), hits.length);
@@ -781,7 +783,8 @@
         writer.close();
         IndexReader reader = IndexReader.open(dir, policy, false);
         reader.deleteDocument(3);
-        reader.setNorm(5, "content", conf.getSimilarityProvider().get("content").encodeNormValue(2.0F));
+        DefaultSimilarity sim = new DefaultSimilarity();
+        reader.setNorm(5, "content", sim.encodeNormValue(2.0F));
         IndexSearcher searcher = newSearcher(reader);
         ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
         assertEquals(16, hits.length);
diff --git a/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java b/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java
index d1affdc..b568279 100644
--- a/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java
+++ b/lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java
@@ -105,8 +105,9 @@
   }
 
   private static class StandardCodecWithOrds extends Codec {
+    
     public StandardCodecWithOrds() {
-      name = "StandardOrds";
+      super("StandardOrds");
     }
 
     @Override
@@ -200,13 +201,13 @@
       StandardPostingsReader.files(dir, segmentInfo, id, files);
       BlockTermsReader.files(dir, segmentInfo, id, files);
       FixedGapTermsIndexReader.files(dir, segmentInfo, id, files);
-      DefaultDocValuesConsumer.files(dir, segmentInfo, id, files);
+      DefaultDocValuesConsumer.files(dir, segmentInfo, id, files, getDocValuesUseCFS());
     }
 
     @Override
     public void getExtensions(Set<String> extensions) {
       getStandardExtensions(extensions);
-      DefaultDocValuesConsumer.getDocValuesExtensions(extensions);
+      DefaultDocValuesConsumer.getDocValuesExtensions(extensions, getDocValuesUseCFS());
     }
 
     public static void getStandardExtensions(Set<String> extensions) {
@@ -218,12 +219,12 @@
     
     @Override
     public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
-      return new DefaultDocValuesConsumer(state, BytesRef.getUTF8SortedAsUnicodeComparator());
+      return new DefaultDocValuesConsumer(state, getDocValuesSortComparator(), getDocValuesUseCFS());
     }
 
     @Override
     public PerDocValues docsProducer(SegmentReadState state) throws IOException {
-      return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, state.context);
+      return new DefaultDocValuesProducer(state.segmentInfo, state.dir, state.fieldInfos, state.codecId, getDocValuesUseCFS(), getDocValuesSortComparator(), state.context);
     }
   }
 
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java b/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
index 9561b51..a87bcb9 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
@@ -71,7 +71,7 @@
     Term searchTerm = new Term("id", "7");
     int delCount = reader.deleteDocuments(searchTerm);
     assertEquals("didn't delete the right number of documents", 1, delCount);
-    Similarity sim = new DefaultSimilarity();
+    DefaultSimilarity sim = new DefaultSimilarity();
     // Set one norm so we get a .s0 file:
     reader.setNorm(21, "content", sim.encodeNormValue(1.5f));
     reader.close();
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java
index 05b2f3c..7965406 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexReader.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexReader.java
@@ -421,7 +421,7 @@
           // expected
         }
 
-        Similarity sim = new DefaultSimilarity();
+        DefaultSimilarity sim = new DefaultSimilarity();
         try {
           reader.setNorm(5, "aaa", sim.encodeNormValue(2.0f));
           fail("setNorm after close failed to throw IOException");
@@ -462,7 +462,7 @@
           // expected
         }
 
-        Similarity sim = new DefaultSimilarity();
+        DefaultSimilarity sim = new DefaultSimilarity();
         try {
           reader.setNorm(5, "aaa", sim.encodeNormValue(2.0f));
           fail("setNorm should have hit LockObtainFailedException");
@@ -494,7 +494,7 @@
 
         //  now open reader & set norm for doc 0
         IndexReader reader = IndexReader.open(dir, false);
-        Similarity sim = new DefaultSimilarity();
+        DefaultSimilarity sim = new DefaultSimilarity();
         reader.setNorm(0, "content", sim.encodeNormValue(2.0f));
 
         // we should be holding the write lock now:
@@ -539,7 +539,7 @@
         addDoc(writer, searchTerm.text());
         writer.close();
 
-        Similarity sim = new DefaultSimilarity();
+        DefaultSimilarity sim = new DefaultSimilarity();
         //  now open reader & set norm for doc 0 (writes to
         //  _0_1.s0)
         reader = IndexReader.open(dir, false);
@@ -738,7 +738,7 @@
       }
 
       reader = IndexReader.open(dir, false);
-      Similarity sim = new DefaultSimilarity();
+      DefaultSimilarity sim = new DefaultSimilarity();
       try {
         reader.setNorm(1, "content", sim.encodeNormValue(2.0f));
         fail("did not hit exception when calling setNorm on an invalid doc number");
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java b/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java
index bcbf857..9b6c4d2 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java
@@ -273,7 +273,7 @@
    * @throws Exception
    */
   private void performDefaultTests(IndexReader r1) throws Exception {
-    Similarity sim = new DefaultSimilarity();
+    DefaultSimilarity sim = new DefaultSimilarity();
     float norm1 = sim.decodeNormValue(MultiNorms.norms(r1, "field1")[4]);
 
     IndexReader pr1Clone = (IndexReader) r1.clone();
@@ -329,7 +329,7 @@
     TestIndexReaderReopen.createIndex(random, dir1, false);
     SegmentReader origSegmentReader = getOnlySegmentReader(IndexReader.open(dir1, false));
     origSegmentReader.deleteDocument(1);
-    Similarity sim = new DefaultSimilarity();
+    DefaultSimilarity sim = new DefaultSimilarity();
     origSegmentReader.setNorm(4, "field1", sim.encodeNormValue(0.5f));
 
     SegmentReader clonedSegmentReader = (SegmentReader) origSegmentReader
@@ -429,7 +429,7 @@
     final Directory dir1 = newDirectory();
     TestIndexReaderReopen.createIndex(random, dir1, false);
     IndexReader orig = IndexReader.open(dir1, false);
-    Similarity sim = new DefaultSimilarity();
+    DefaultSimilarity sim = new DefaultSimilarity();
     orig.setNorm(1, "field1", sim.encodeNormValue(17.0f));
     final byte encoded = sim.encodeNormValue(17.0f);
     assertEquals(encoded, MultiNorms.norms(orig, "field1")[1]);
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java b/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
index 32cef3e..228d033 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
@@ -47,9 +47,9 @@
     public Similarity get(String field) {
       return new DefaultSimilarity() {
         @Override
-        public float computeNorm(FieldInvertState state) {
+        public byte computeNorm(FieldInvertState state) {
           // diable length norm
-          return state.getBoost();
+          return encodeNormValue(state.getBoost());
         }
       };
     } 
@@ -217,7 +217,7 @@
     IndexReader reader4C = (IndexReader) reader3C.clone();
     SegmentReader segmentReader4C = getOnlySegmentReader(reader4C);
     assertEquals(4, reader3CCNorm.bytesRef().get());
-    Similarity sim = new DefaultSimilarity();
+    DefaultSimilarity sim = new DefaultSimilarity();
     reader4C.setNorm(5, "field1", sim.encodeNormValue(0.33f));
     
     // generate a cannot update exception in reader1
@@ -278,7 +278,7 @@
       // System.out.println(" and: for "+k+" from "+newNorm+" to "+origNorm);
       modifiedNorms.set(i, Float.valueOf(newNorm));
       modifiedNorms.set(k, Float.valueOf(origNorm));
-      Similarity sim = new DefaultSimilarity();
+      DefaultSimilarity sim = new DefaultSimilarity();
       ir.setNorm(i, "f" + 1, sim.encodeNormValue(newNorm));
       ir.setNorm(k, "f" + 1, sim.encodeNormValue(origNorm));
       // System.out.println("setNorm i: "+i);
@@ -300,7 +300,7 @@
       assertEquals("number of norms mismatches", numDocNorms, b.length);
       ArrayList<Float> storedNorms = (i == 1 ? modifiedNorms : norms);
       for (int j = 0; j < b.length; j++) {
-        Similarity sim = new DefaultSimilarity();
+        DefaultSimilarity sim = new DefaultSimilarity();
         float norm = sim.decodeNormValue(b[j]);
         float norm1 =  storedNorms.get(j).floatValue();
         assertEquals("stored norm value of " + field + " for doc " + j + " is "
@@ -340,7 +340,7 @@
   // return unique norm values that are unchanged by encoding/decoding
   private float nextNorm(String fname) {
     float norm = lastNorm + normDelta;
-    Similarity sim = new DefaultSimilarity();
+    DefaultSimilarity sim = new DefaultSimilarity();
     do {
       float norm1 = sim.decodeNormValue(
     		  sim.encodeNormValue(norm));
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java b/lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java
index 86ea0f7..058939e 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java
@@ -131,7 +131,7 @@
 
         dir.setMaxSizeInBytes(thisDiskFree);
         dir.setRandomIOExceptionRate(rate);
-        Similarity sim = new DefaultSimilarity();
+        DefaultSimilarity sim = new DefaultSimilarity();
         try {
           if (0 == x) {
             int docId = 12;
diff --git a/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java b/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
index f9277ea..3200204 100644
--- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
+++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
@@ -606,7 +606,7 @@
     
     IndexReader reader2 = reader1.reopen();
     modifier = IndexReader.open(dir1, false);
-    Similarity sim = new DefaultSimilarity();
+    DefaultSimilarity sim = new DefaultSimilarity();
     modifier.setNorm(1, "field1", sim.encodeNormValue(50f));
     modifier.setNorm(1, "field2", sim.encodeNormValue(50f));
     modifier.close();
@@ -702,7 +702,7 @@
       protected void modifyIndex(int i) throws IOException {
         if (i % 3 == 0) {
           IndexReader modifier = IndexReader.open(dir, false);
-          Similarity sim = new DefaultSimilarity();
+          DefaultSimilarity sim = new DefaultSimilarity();
           modifier.setNorm(i, "field1", sim.encodeNormValue(50f));
           modifier.close();
         } else if (i % 3 == 1) {
@@ -983,7 +983,7 @@
       }
       case 1: {
         IndexReader reader = IndexReader.open(dir, false);
-        Similarity sim = new DefaultSimilarity();
+        DefaultSimilarity sim = new DefaultSimilarity();
         reader.setNorm(4, "field1", sim.encodeNormValue(123f));
         reader.setNorm(44, "field2", sim.encodeNormValue(222f));
         reader.setNorm(44, "field4", sim.encodeNormValue(22f));
@@ -1007,7 +1007,7 @@
       }
       case 4: {
         IndexReader reader = IndexReader.open(dir, false);
-        Similarity sim = new DefaultSimilarity();
+        DefaultSimilarity sim = new DefaultSimilarity();
         reader.setNorm(5, "field1", sim.encodeNormValue(123f));
         reader.setNorm(55, "field2", sim.encodeNormValue(222f));
         reader.close();
diff --git a/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java b/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
index d81d3a4..9744008 100644
--- a/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
+++ b/lucene/src/test/org/apache/lucene/index/TestMaxTermFrequency.java
@@ -116,8 +116,8 @@
     }
 
     @Override
-    public float computeNorm(FieldInvertState state) {
-      return (float) state.getMaxTermFrequency();
+    public byte computeNorm(FieldInvertState state) {
+      return encodeNormValue((float) state.getMaxTermFrequency());
     }
   }
 }
diff --git a/lucene/src/test/org/apache/lucene/index/TestNorms.java b/lucene/src/test/org/apache/lucene/index/TestNorms.java
index 3a8b295..372ae2e 100755
--- a/lucene/src/test/org/apache/lucene/index/TestNorms.java
+++ b/lucene/src/test/org/apache/lucene/index/TestNorms.java
@@ -46,9 +46,9 @@
     public Similarity get(String field) {
       return new DefaultSimilarity() {
         @Override
-        public float computeNorm(FieldInvertState state) {
+        public byte computeNorm(FieldInvertState state) {
           // diable length norm
-          return state.getBoost();
+          return encodeNormValue(state.getBoost());
         }
       };
     } 
@@ -177,7 +177,7 @@
       //System.out.println("      and: for "+k+" from "+newNorm+" to "+origNorm);
       modifiedNorms.set(i, Float.valueOf(newNorm));
       modifiedNorms.set(k, Float.valueOf(origNorm));
-      Similarity sim = new DefaultSimilarity();
+      DefaultSimilarity sim = new DefaultSimilarity();
       ir.setNorm(i, "f"+1, sim.encodeNormValue(newNorm)); 
       ir.setNorm(k, "f"+1, sim.encodeNormValue(origNorm)); 
     }
@@ -192,8 +192,9 @@
       byte b[] = MultiNorms.norms(ir, field);
       assertEquals("number of norms mismatches",numDocNorms,b.length);
       ArrayList<Float> storedNorms = (i==1 ? modifiedNorms : norms);
+      DefaultSimilarity sim = (DefaultSimilarity) similarityProviderOne.get(field);
       for (int j = 0; j < b.length; j++) {
-        float norm = similarityProviderOne.get(field).decodeNormValue(b[j]);
+        float norm = sim.decodeNormValue(b[j]);
         float norm1 = storedNorms.get(j).floatValue();
         assertEquals("stored norm value of "+field+" for doc "+j+" is "+norm+" - a mismatch!", norm, norm1, 0.000001);
       }
@@ -229,7 +230,7 @@
   // return unique norm values that are unchanged by encoding/decoding
   private float nextNorm(String fname) {
     float norm = lastNorm + normDelta;
-    Similarity similarity = similarityProviderOne.get(fname);
+    DefaultSimilarity similarity = (DefaultSimilarity) similarityProviderOne.get(fname);
     do {
 			float norm1 = similarity.decodeNormValue(similarity.encodeNormValue(norm));
       if (norm1 > lastNorm) {
@@ -259,8 +260,8 @@
     }
 
     @Override
-    public float computeNorm(FieldInvertState state) {
-      return (float) state.getLength();
+    public byte computeNorm(FieldInvertState state) {
+      return encodeNormValue((float) state.getLength());
     }
   }
   
diff --git a/lucene/src/test/org/apache/lucene/index/TestOmitTf.java b/lucene/src/test/org/apache/lucene/index/TestOmitTf.java
index cf7ecbd..efef48a 100644
--- a/lucene/src/test/org/apache/lucene/index/TestOmitTf.java
+++ b/lucene/src/test/org/apache/lucene/index/TestOmitTf.java
@@ -18,9 +18,9 @@
  */
 
 import java.io.IOException;
-import java.util.Collection;
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.util._TestUtil;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
@@ -30,7 +30,6 @@
 import org.apache.lucene.search.*;
 import org.apache.lucene.search.BooleanClause.Occur;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.search.Explanation.IDFExplanation;
 
 
 public class TestOmitTf extends LuceneTestCase {
@@ -39,23 +38,14 @@
     public float queryNorm(float sumOfSquaredWeights) { return 1.0f; }
     public float coord(int overlap, int maxOverlap) { return 1.0f; }
     public Similarity get(String field) {
-      return new Similarity() {
+      return new TFIDFSimilarity() {
 
-        @Override public float computeNorm(FieldInvertState state) { return state.getBoost(); }
+        @Override public byte computeNorm(FieldInvertState state) { return encodeNormValue(state.getBoost()); }
         @Override public float tf(float freq) { return freq; }
         @Override public float sloppyFreq(int distance) { return 2.0f; }
         @Override public float idf(int docFreq, int numDocs) { return 1.0f; }
-        @Override public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException {
-          return new IDFExplanation() {
-            @Override
-            public float getIdf() {
-              return 1.0f;
-            }
-            @Override
-            public String explain() {
-              return "Inexplicable";
-            }
-          };
+        @Override public Explanation idfExplain(TermContext[] terms, IndexSearcher searcher) throws IOException {
+          return new Explanation(1.0f, "Inexplicable");
         }
       };
     }
diff --git a/lucene/src/test/org/apache/lucene/index/TestParallelReader.java b/lucene/src/test/org/apache/lucene/index/TestParallelReader.java
index 6b5dc4e..b2d0b3c 100644
--- a/lucene/src/test/org/apache/lucene/index/TestParallelReader.java
+++ b/lucene/src/test/org/apache/lucene/index/TestParallelReader.java
@@ -149,7 +149,7 @@
     
     assertTrue(pr.isCurrent());
     IndexReader modifier = IndexReader.open(dir1, false);
-    Similarity sim = new DefaultSimilarity();
+    DefaultSimilarity sim = new DefaultSimilarity();
     modifier.setNorm(0, "f1", sim.encodeNormValue(100f));
     modifier.close();
     
diff --git a/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java b/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java
index 2108b68..a8e7d79 100644
--- a/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java
+++ b/lucene/src/test/org/apache/lucene/index/TestPerFieldCodecSupport.java
@@ -279,7 +279,7 @@
       CodecProvider provider = new CodecProvider();
       Codec[] codecs = new Codec[] { new StandardCodec(),
           new SimpleTextCodec(), new MockSepCodec(),
-          new PulsingCodec(1 + random.nextInt(10)),
+          new PulsingCodec(1 + random.nextInt(20)),
           new MockVariableIntBlockCodec(1 + random.nextInt(10)),
           new MockFixedIntBlockCodec(1 + random.nextInt(10)) };
       for (Codec codec : codecs) {
diff --git a/lucene/src/test/org/apache/lucene/index/values/TestDocValues.java b/lucene/src/test/org/apache/lucene/index/values/TestDocValues.java
index d4981aa..2fc02a2 100644
--- a/lucene/src/test/org/apache/lucene/index/values/TestDocValues.java
+++ b/lucene/src/test/org/apache/lucene/index/values/TestDocValues.java
@@ -81,7 +81,7 @@
     w.finish(maxDoc);
     assertEquals(0, trackBytes.get());
 
-    IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, newIOContext(random));
+    IndexDocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, comp, newIOContext(random));
     for (int iter = 0; iter < 2; iter++) {
       ValuesEnum bytesEnum = getEnum(r);
       assertNotNull("enum is null", bytesEnum);
@@ -105,7 +105,8 @@
       Source s;
       IndexDocValues.SortedSource ss;
       if (mode == Bytes.Mode.SORTED) {
-        s = ss = getSortedSource(r, comp);
+        // default is unicode so we can simply pass null here
+        s = ss = getSortedSource(r, random.nextBoolean() ? comp : null);  
       } else {
         s = getSource(r);
         ss = null;
diff --git a/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java b/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java
index 13e7f81..167d10e 100644
--- a/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java
+++ b/lucene/src/test/org/apache/lucene/search/JustCompileSearch.java
@@ -20,7 +20,11 @@
 import java.io.IOException;
 
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.search.Similarity.ExactDocScorer;
+import org.apache.lucene.search.Similarity.SloppyDocScorer;
+import org.apache.lucene.search.Similarity.Stats;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.util.PriorityQueue;
 
@@ -187,8 +191,8 @@
   static final class JustCompilePhraseScorer extends PhraseScorer {
 
     JustCompilePhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
-        Similarity similarity, byte[] norms) {
-      super(weight, postings, similarity, norms);
+        Similarity.SloppyDocScorer docScorer) throws IOException {
+      super(weight, postings, docScorer);
     }
 
     @Override
@@ -243,12 +247,22 @@
   static final class JustCompileSimilarity extends Similarity {
 
     @Override
-    public float idf(int docFreq, int numDocs) {
+    public Stats computeStats(IndexSearcher searcher, String fieldName, float queryBoost, TermContext... termContexts) throws IOException {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
 
     @Override
-    public float computeNorm(FieldInvertState state) {
+    public ExactDocScorer exactDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public SloppyDocScorer sloppyDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
+      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
+    }
+
+    @Override
+    public byte computeNorm(FieldInvertState state) {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
 
@@ -256,11 +270,6 @@
     public float sloppyFreq(int distance) {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
-
-    @Override
-    public float tf(float freq) {
-      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
-    }  
   }
   
   static final class JustCompileSimilarityProvider implements SimilarityProvider {
@@ -348,17 +357,12 @@
     }
 
     @Override
-    public float getValue() {
+    public void normalize(float norm, float topLevelBoost) {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
 
     @Override
-    public void normalize(float norm) {
-      throw new UnsupportedOperationException(UNSUPPORTED_MSG);
-    }
-
-    @Override
-    public float sumOfSquaredWeights() throws IOException {
+    public float getValueForNormalization() throws IOException {
       throw new UnsupportedOperationException(UNSUPPORTED_MSG);
     }
 
diff --git a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
index e8a6b69..71b96a4 100644
--- a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
@@ -62,9 +62,9 @@
     }
     
     @Override
-    public float computeNorm(FieldInvertState state) {
+    public byte computeNorm(FieldInvertState state) {
       // Disable length norm
-      return state.getBoost();
+      return encodeNormValue(state.getBoost());
     }
     
     @Override
diff --git a/lucene/src/test/org/apache/lucene/search/TestDocValuesScoring.java b/lucene/src/test/org/apache/lucene/search/TestDocValuesScoring.java
new file mode 100644
index 0000000..2281000
--- /dev/null
+++ b/lucene/src/test/org/apache/lucene/search/TestDocValuesScoring.java
@@ -0,0 +1,203 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IndexDocValuesField;
+import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.codecs.CodecProvider;
+import org.apache.lucene.index.values.IndexDocValues.Source;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TermContext;
+
+/**
+ * Tests the use of indexdocvalues in scoring.
+ * 
+ * In the example, a docvalues field is used as a per-document boost (separate from the norm)
+ * @lucene.experimental
+ */
+public class TestDocValuesScoring extends LuceneTestCase {
+  private static final float SCORE_EPSILON = 0.001f; /* for comparing floats */
+
+  public void testSimple() throws Exception {
+    assumeFalse("PreFlex codec cannot work with IndexDocValues!", 
+        "PreFlex".equals(CodecProvider.getDefault().getDefaultFieldCodec()));
+    
+    Directory dir = newDirectory();
+    RandomIndexWriter iw = new RandomIndexWriter(random, dir);
+    Document doc = new Document();
+    Field field = newField("foo", "", Field.Store.NO, Field.Index.ANALYZED);
+    doc.add(field);
+    IndexDocValuesField dvField = new IndexDocValuesField("foo_boost");
+    doc.add(dvField);
+    Field field2 = newField("bar", "", Field.Store.NO, Field.Index.ANALYZED);
+    doc.add(field2);
+    
+    field.setValue("quick brown fox");
+    field2.setValue("quick brown fox");
+    dvField.setFloat(2f); // boost x2
+    iw.addDocument(doc);
+    field.setValue("jumps over lazy brown dog");
+    field2.setValue("jumps over lazy brown dog");
+    dvField.setFloat(4f); // boost x4
+    iw.addDocument(doc);
+    IndexReader ir = iw.getReader();
+    iw.close();
+    
+    // no boosting
+    IndexSearcher searcher1 = newSearcher(ir);
+    // boosting
+    IndexSearcher searcher2 = newSearcher(ir);
+    searcher2.setSimilarityProvider(new DefaultSimilarityProvider() {
+      final Similarity fooSim = new BoostingSimilarity(super.get("foo"), "foo_boost");
+
+      public Similarity get(String field) {
+        return "foo".equals(field) ? fooSim : super.get(field);
+      }
+    });
+    
+    // in this case, we searched on field "foo". first document should have 2x the score.
+    TermQuery tq = new TermQuery(new Term("foo", "quick"));
+    QueryUtils.check(random, tq, searcher1);
+    QueryUtils.check(random, tq, searcher2);
+    
+    TopDocs noboost = searcher1.search(tq, 10);
+    TopDocs boost = searcher2.search(tq, 10);
+    assertEquals(1, noboost.totalHits);
+    assertEquals(1, boost.totalHits);
+    
+    //System.out.println(searcher2.explain(tq, boost.scoreDocs[0].doc));
+    assertEquals(boost.scoreDocs[0].score, noboost.scoreDocs[0].score*2f, SCORE_EPSILON);
+    
+    // this query matches only the second document, which should have 4x the score.
+    tq = new TermQuery(new Term("foo", "jumps"));
+    QueryUtils.check(random, tq, searcher1);
+    QueryUtils.check(random, tq, searcher2);
+    
+    noboost = searcher1.search(tq, 10);
+    boost = searcher2.search(tq, 10);
+    assertEquals(1, noboost.totalHits);
+    assertEquals(1, boost.totalHits);
+    
+    assertEquals(boost.scoreDocs[0].score, noboost.scoreDocs[0].score*4f, SCORE_EPSILON);
+    
+    // search on on field bar just for kicks, nothing should happen, since we setup
+    // our sim provider to only use foo_boost for field foo.
+    tq = new TermQuery(new Term("bar", "quick"));
+    QueryUtils.check(random, tq, searcher1);
+    QueryUtils.check(random, tq, searcher2);
+    
+    noboost = searcher1.search(tq, 10);
+    boost = searcher2.search(tq, 10);
+    assertEquals(1, noboost.totalHits);
+    assertEquals(1, boost.totalHits);
+    
+    assertEquals(boost.scoreDocs[0].score, noboost.scoreDocs[0].score, SCORE_EPSILON);
+
+    
+    searcher1.close();
+    searcher2.close();
+    ir.close();
+    dir.close();
+  }
+  
+  /**
+   * Similarity that wraps another similarity and boosts the final score
+   * according to whats in a docvalues field.
+   * 
+   * @lucene.experimental
+   */
+  static class BoostingSimilarity extends Similarity {
+    private final Similarity sim;
+    private final String boostField;
+    
+    public BoostingSimilarity(Similarity sim, String boostField) {
+      this.sim = sim;
+      this.boostField = boostField;
+    }
+    
+    @Override
+    public byte computeNorm(FieldInvertState state) {
+      return sim.computeNorm(state);
+    }
+
+    @Override
+    public float sloppyFreq(int distance) {
+      return sim.sloppyFreq(distance);
+    }
+
+    @Override
+    public Stats computeStats(IndexSearcher searcher, String fieldName, float queryBoost, TermContext... termContexts) throws IOException {
+      return sim.computeStats(searcher, fieldName, queryBoost, termContexts);
+    }
+
+    @Override
+    public ExactDocScorer exactDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
+      final ExactDocScorer sub = sim.exactDocScorer(stats, fieldName, context);
+      final Source values = context.reader.docValues(boostField).getSource();
+
+      return new ExactDocScorer() {
+        @Override
+        public float score(int doc, int freq) {
+          return (float) values.getFloat(doc) * sub.score(doc, freq);
+        }
+
+        @Override
+        public Explanation explain(int doc, Explanation freq) {
+          Explanation boostExplanation = new Explanation((float) values.getFloat(doc), "indexDocValue(" + boostField + ")");
+          Explanation simExplanation = sub.explain(doc, freq);
+          Explanation expl = new Explanation(boostExplanation.getValue() * simExplanation.getValue(), "product of:");
+          expl.addDetail(boostExplanation);
+          expl.addDetail(simExplanation);
+          return expl;
+        }
+      };
+    }
+
+    @Override
+    public SloppyDocScorer sloppyDocScorer(Stats stats, String fieldName, AtomicReaderContext context) throws IOException {
+      final SloppyDocScorer sub = sim.sloppyDocScorer(stats, fieldName, context);
+      final Source values = context.reader.docValues(boostField).getSource();
+      
+      return new SloppyDocScorer() {
+        @Override
+        public float score(int doc, float freq) {
+          return (float) values.getFloat(doc) * sub.score(doc, freq);
+        }
+        
+        @Override
+        public Explanation explain(int doc, Explanation freq) {
+          Explanation boostExplanation = new Explanation((float) values.getFloat(doc), "indexDocValue(" + boostField + ")");
+          Explanation simExplanation = sub.explain(doc, freq);
+          Explanation expl = new Explanation(boostExplanation.getValue() * simExplanation.getValue(), "product of:");
+          expl.addDetail(boostExplanation);
+          expl.addDetail(simExplanation);
+          return expl;
+        }
+      };
+    }
+  }
+}
diff --git a/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java b/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
index 6344358..c60a8be 100644
--- a/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
@@ -49,34 +49,12 @@
     IndexSearcher is = newSearcher(ir);
     ScoreDoc[] hits;
 
-    // assert with norms scoring turned off
-
     hits = is.search(new MatchAllDocsQuery(), null, 1000).scoreDocs;
     assertEquals(3, hits.length);
     assertEquals("one", is.doc(hits[0].doc).get("key"));
     assertEquals("two", is.doc(hits[1].doc).get("key"));
     assertEquals("three four", is.doc(hits[2].doc).get("key"));
 
-    // assert with norms scoring turned on
-
-    MatchAllDocsQuery normsQuery = new MatchAllDocsQuery("key");
-    hits = is.search(normsQuery, null, 1000).scoreDocs;
-    assertEquals(3, hits.length);
-
-    assertEquals("three four", is.doc(hits[0].doc).get("key"));    
-    assertEquals("two", is.doc(hits[1].doc).get("key"));
-    assertEquals("one", is.doc(hits[2].doc).get("key"));
-
-    // change norm & retest
-    is.getIndexReader().setNorm(0, "key", is.getSimilarityProvider().get("key").encodeNormValue(400f));
-    normsQuery = new MatchAllDocsQuery("key");
-    hits = is.search(normsQuery, null, 1000).scoreDocs;
-    assertEquals(3, hits.length);
-
-    assertEquals("one", is.doc(hits[0].doc).get("key"));
-    assertEquals("three four", is.doc(hits[1].doc).get("key"));    
-    assertEquals("two", is.doc(hits[2].doc).get("key"));
-    
     // some artificial queries to trigger the use of skipTo():
     
     BooleanQuery bq = new BooleanQuery();
diff --git a/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java b/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
index 02b876e..c434b1d 100644
--- a/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
@@ -24,9 +24,9 @@
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.MultiFields;
 import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.search.Explanation.IDFExplanation;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.TermContext;
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.analysis.Tokenizer;
@@ -312,21 +312,9 @@
         return new DefaultSimilarity() {
           
           @Override
-          public IDFExplanation idfExplain(Collection<Term> terms,
+          public Explanation idfExplain(TermContext stats[],
               IndexSearcher searcher) throws IOException {
-            return new IDFExplanation() {
-
-              @Override
-              public float getIdf() {
-                return 10f;
-              }
-
-              @Override
-              public String explain() {
-                return "just a test";
-              }
-              
-            };
+            return new Explanation(10f, "just a test");
           } 
         };
       }
@@ -336,7 +324,7 @@
     query.add(new Term[] { new Term("body", "this"), new Term("body", "that") });
     query.add(new Term("body", "is"));
     Weight weight = query.createWeight(searcher);
-    assertEquals(10f * 10f, weight.sumOfSquaredWeights(), 0.001f);
+    assertEquals(10f * 10f, weight.getValueForNormalization(), 0.001f);
 
     writer.close();
     searcher.close();
diff --git a/lucene/src/test/org/apache/lucene/search/TestSetNorm.java b/lucene/src/test/org/apache/lucene/search/TestSetNorm.java
index 906aeb0..72245e1 100644
--- a/lucene/src/test/org/apache/lucene/search/TestSetNorm.java
+++ b/lucene/src/test/org/apache/lucene/search/TestSetNorm.java
@@ -50,7 +50,7 @@
 
     // reset the boost of each instance of this document
     IndexReader reader = IndexReader.open(store, false);
-    Similarity similarity = new DefaultSimilarity();
+    DefaultSimilarity similarity = new DefaultSimilarity();
     reader.setNorm(0, "field", similarity.encodeNormValue(1.0f));
     reader.setNorm(1, "field", similarity.encodeNormValue(2.0f));
     reader.setNorm(2, "field", similarity.encodeNormValue(4.0f));
diff --git a/lucene/src/test/org/apache/lucene/search/TestSimilarity.java b/lucene/src/test/org/apache/lucene/search/TestSimilarity.java
index 3afeb25..55c6224 100644
--- a/lucene/src/test/org/apache/lucene/search/TestSimilarity.java
+++ b/lucene/src/test/org/apache/lucene/search/TestSimilarity.java
@@ -18,8 +18,9 @@
  */
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TermContext;
+
 import java.io.IOException;
-import java.util.Collection;
 
 import org.apache.lucene.index.FieldInvertState;
 import org.apache.lucene.index.IndexReader;
@@ -30,7 +31,6 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.search.Explanation.IDFExplanation;
 
 /** Similarity unit test.
  *
@@ -42,22 +42,13 @@
     public float queryNorm(float sumOfSquaredWeights) { return 1.0f; }
     public float coord(int overlap, int maxOverlap) { return 1.0f; }
     public Similarity get(String field) {
-      return new Similarity() {
-        @Override public float computeNorm(FieldInvertState state) { return state.getBoost(); }
+      return new DefaultSimilarity() {
+        @Override public byte computeNorm(FieldInvertState state) { return encodeNormValue(state.getBoost()); }
         @Override public float tf(float freq) { return freq; }
         @Override public float sloppyFreq(int distance) { return 2.0f; }
         @Override public float idf(int docFreq, int numDocs) { return 1.0f; }
-        @Override public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException {
-          return new IDFExplanation() {
-            @Override
-            public float getIdf() {
-              return 1.0f;
-            }
-            @Override
-            public String explain() {
-              return "Inexplicable";
-            }
-          };
+        @Override public Explanation idfExplain(TermContext[] stats, IndexSearcher searcher) throws IOException {
+          return new Explanation(1.0f, "Inexplicable"); 
         }
       };
     }
diff --git a/lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java b/lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java
index 7a9d641..1bf30e3 100644
--- a/lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java
+++ b/lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java
@@ -105,10 +105,10 @@
     }
   }
   
-  private class Sim1 extends Similarity {
+  private class Sim1 extends TFIDFSimilarity {
     @Override
-    public float computeNorm(FieldInvertState state) {
-      return 1f;
+    public byte computeNorm(FieldInvertState state) {
+      return encodeNormValue(1f);
     }
 
     @Override
@@ -127,10 +127,10 @@
     }
   }
   
-  private class Sim2 extends Similarity {
+  private class Sim2 extends TFIDFSimilarity {
     @Override
-    public float computeNorm(FieldInvertState state) {
-      return 10f;
+    public byte computeNorm(FieldInvertState state) {
+      return encodeNormValue(10f);
     }
 
     @Override
diff --git a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
index 5c115d5..962eab0 100644
--- a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
@@ -17,7 +17,6 @@
  */
 import java.io.IOException;
 import java.io.Reader;
-import java.util.Collection;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockTokenizer;
@@ -45,7 +44,7 @@
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.English;
 import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.search.Explanation.IDFExplanation;
+import org.apache.lucene.util.TermContext;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
@@ -325,8 +324,8 @@
         //Make everything else 1 so we see the effect of the payload
         //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
         @Override 
-        public float computeNorm(FieldInvertState state) {
-          return state.getBoost();
+        public byte computeNorm(FieldInvertState state) {
+          return encodeNormValue(state.getBoost());
         }
 
         @Override 
@@ -341,18 +340,8 @@
     
         // idf used for phrase queries
         @Override 
-        public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException {
-          return new IDFExplanation() {
-            @Override
-            public float getIdf() {
-              return 1.0f;
-            }
-        
-            @Override
-            public String explain() {
-              return "Inexplicable";
-            }
-          };
+        public Explanation idfExplain(TermContext states[], IndexSearcher searcher) throws IOException {
+          return new Explanation(1.0f, "Inexplicable");
         }
       };
     }
diff --git a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
index 9ed0db3..ea35f60 100644
--- a/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
@@ -318,8 +318,8 @@
         //Make everything else 1 so we see the effect of the payload
         //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
         @Override
-        public float computeNorm(FieldInvertState state) {
-          return state.getBoost();
+        public byte computeNorm(FieldInvertState state) {
+          return encodeNormValue(state.getBoost());
         }
 
         @Override
diff --git a/lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java b/lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
index ac0f45d..4adceca 100644
--- a/lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
+++ b/lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
@@ -135,8 +135,8 @@
   static final class JustCompileSpanScorer extends SpanScorer {
 
     protected JustCompileSpanScorer(Spans spans, Weight weight,
-        Similarity similarity, byte[] norms) throws IOException {
-      super(spans, weight, similarity, norms);
+        Similarity similarity, Similarity.SloppyDocScorer docScorer) throws IOException {
+      super(spans, weight, similarity, docScorer);
     }
 
     @Override
diff --git a/modules/join/src/java/org/apache/lucene/search/join/BlockJoinQuery.java b/modules/join/src/java/org/apache/lucene/search/join/BlockJoinQuery.java
index a066d1e..edc1516 100644
--- a/modules/join/src/java/org/apache/lucene/search/join/BlockJoinQuery.java
+++ b/modules/join/src/java/org/apache/lucene/search/join/BlockJoinQuery.java
@@ -133,18 +133,13 @@
     }
 
     @Override
-    public float getValue() {
-      return childWeight.getValue();
+    public float getValueForNormalization() throws IOException {
+      return childWeight.getValueForNormalization();
     }
 
     @Override
-    public float sumOfSquaredWeights() throws IOException {
-      return childWeight.sumOfSquaredWeights();
-    }
-
-    @Override
-    public void normalize(float norm) {
-      childWeight.normalize(norm);
+    public void normalize(float norm, float topLevelBoost) {
+      childWeight.normalize(norm, topLevelBoost);
     }
 
     @Override
diff --git a/modules/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java b/modules/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
index 7842a90..0d24612 100755
--- a/modules/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
+++ b/modules/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
@@ -195,21 +195,14 @@
       return CustomScoreQuery.this;
     }
 
-    /*(non-Javadoc) @see org.apache.lucene.search.Weight#getValue() */
     @Override
-    public float getValue() {
-      return getBoost();
-    }
-
-    /*(non-Javadoc) @see org.apache.lucene.search.Weight#sumOfSquaredWeights() */
-    @Override
-    public float sumOfSquaredWeights() throws IOException {
-      float sum = subQueryWeight.sumOfSquaredWeights();
+    public float getValueForNormalization() throws IOException {
+      float sum = subQueryWeight.getValueForNormalization();
       for(int i = 0; i < valSrcWeights.length; i++) {
         if (qStrict) {
-          valSrcWeights[i].sumOfSquaredWeights(); // do not include ValueSource part in the query normalization
+          valSrcWeights[i].getValueForNormalization(); // do not include ValueSource part in the query normalization
         } else {
-          sum += valSrcWeights[i].sumOfSquaredWeights();
+          sum += valSrcWeights[i].getValueForNormalization();
         }
       }
       sum *= getBoost() * getBoost(); // boost each sub-weight
@@ -218,14 +211,14 @@
 
     /*(non-Javadoc) @see org.apache.lucene.search.Weight#normalize(float) */
     @Override
-    public void normalize(float norm) {
-      norm *= getBoost(); // incorporate boost
-      subQueryWeight.normalize(norm);
+    public void normalize(float norm, float topLevelBoost) {
+      topLevelBoost *= getBoost(); // incorporate boost
+      subQueryWeight.normalize(norm, topLevelBoost);
       for(int i = 0; i < valSrcWeights.length; i++) {
         if (qStrict) {
-          valSrcWeights[i].normalize(1); // do not normalize the ValueSource part
+          valSrcWeights[i].normalize(1, 1); // do not normalize the ValueSource part
         } else {
-          valSrcWeights[i].normalize(norm);
+          valSrcWeights[i].normalize(norm, topLevelBoost);
         }
       }
     }
@@ -245,7 +238,7 @@
       for(int i = 0; i < valSrcScorers.length; i++) {
          valSrcScorers[i] = valSrcWeights[i].scorer(context, scorerContext.scoreDocsInOrder(true));
       }
-      return new CustomScorer(CustomScoreQuery.this.getCustomScoreProvider(context), this, subQueryScorer, valSrcScorers);
+      return new CustomScorer(CustomScoreQuery.this.getCustomScoreProvider(context), this, getBoost(), subQueryScorer, valSrcScorers);
     }
 
     @Override
@@ -265,11 +258,11 @@
         valSrcExpls[i] = valSrcWeights[i].explain(info, doc);
       }
       Explanation customExp = CustomScoreQuery.this.getCustomScoreProvider(info).customExplain(doc,subQueryExpl,valSrcExpls);
-      float sc = getValue() * customExp.getValue();
+      float sc = getBoost() * customExp.getValue();
       Explanation res = new ComplexExplanation(
         true, sc, CustomScoreQuery.this.toString() + ", product of:");
       res.addDetail(customExp);
-      res.addDetail(new Explanation(getValue(), "queryBoost")); // actually using the q boost as q weight (== weight value)
+      res.addDetail(new Explanation(getBoost(), "queryBoost")); // actually using the q boost as q weight (== weight value)
       return res;
     }
 
@@ -294,10 +287,10 @@
     private float vScores[]; // reused in score() to avoid allocating this array for each doc 
 
     // constructor
-    private CustomScorer(CustomScoreProvider provider, CustomWeight w,
+    private CustomScorer(CustomScoreProvider provider, CustomWeight w, float qWeight,
         Scorer subQueryScorer, Scorer[] valSrcScorers) throws IOException {
       super(w);
-      this.qWeight = w.getValue();
+      this.qWeight = qWeight;
       this.subQueryScorer = subQueryScorer;
       this.valSrcScorers = valSrcScorers;
       this.vScores = new float[valSrcScorers.length];
diff --git a/modules/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java b/modules/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
index 1fafb07..3e04f55 100755
--- a/modules/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
+++ b/modules/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
@@ -78,21 +78,16 @@
     }
 
     @Override
-    public float getValue() {
-      return getBoost();
-    }
-
-    @Override
-    public float sumOfSquaredWeights() throws IOException {
-      float sum = qWeight.sumOfSquaredWeights();
+    public float getValueForNormalization() throws IOException {
+      float sum = qWeight.getValueForNormalization();
       sum *= getBoost() * getBoost();
       return sum ;
     }
 
     @Override
-    public void normalize(float norm) {
-      norm *= getBoost();
-      qWeight.normalize(norm);
+    public void normalize(float norm, float topLevelBoost) {
+      topLevelBoost *= getBoost();
+      qWeight.normalize(norm, topLevelBoost);
     }
 
     @Override
@@ -101,7 +96,7 @@
       if(subQueryScorer == null) {
         return null;
       }
-      return new BoostedQuery.CustomScorer(context, this, subQueryScorer, boostVal);
+      return new BoostedQuery.CustomScorer(context, this, getBoost(), subQueryScorer, boostVal);
     }
 
     @Override
@@ -128,11 +123,11 @@
     private final DocValues vals;
     private final AtomicReaderContext readerContext;
 
-    private CustomScorer(AtomicReaderContext readerContext, BoostedQuery.BoostedWeight w,
+    private CustomScorer(AtomicReaderContext readerContext, BoostedQuery.BoostedWeight w, float qWeight,
         Scorer scorer, ValueSource vs) throws IOException {
       super(w);
       this.weight = w;
-      this.qWeight = w.getValue();
+      this.qWeight = qWeight;
       this.scorer = scorer;
       this.readerContext = readerContext;
       this.vals = vs.getValues(weight.fcontext, readerContext);
diff --git a/modules/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java b/modules/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
index ffeba13..6538375 100644
--- a/modules/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
+++ b/modules/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
@@ -77,25 +77,20 @@
     }
 
     @Override
-    public float getValue() {
-      return queryWeight;
-    }
-
-    @Override
-    public float sumOfSquaredWeights() throws IOException {
+    public float getValueForNormalization() throws IOException {
       queryWeight = getBoost();
       return queryWeight * queryWeight;
     }
 
     @Override
-    public void normalize(float norm) {
-      this.queryNorm = norm;
+    public void normalize(float norm, float topLevelBoost) {
+      this.queryNorm = norm * topLevelBoost;
       queryWeight *= this.queryNorm;
     }
 
     @Override
     public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
-      return new AllScorer(context, this);
+      return new AllScorer(context, this, queryWeight);
     }
 
     @Override
@@ -114,10 +109,10 @@
     final boolean hasDeletions;
     final Bits liveDocs;
 
-    public AllScorer(AtomicReaderContext context, FunctionWeight w) throws IOException {
+    public AllScorer(AtomicReaderContext context, FunctionWeight w, float qWeight) throws IOException {
       super(w);
       this.weight = w;
-      this.qWeight = w.getValue();
+      this.qWeight = qWeight;
       this.reader = context.reader;
       this.maxDoc = reader.maxDoc();
       this.hasDeletions = reader.hasDeletions();
diff --git a/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/IDFValueSource.java b/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/IDFValueSource.java
index 23ccd22..b6a5341 100755
--- a/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/IDFValueSource.java
+++ b/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/IDFValueSource.java
@@ -22,6 +22,7 @@
 import org.apache.lucene.queries.function.DocValues;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TFIDFSimilarity;
 import org.apache.lucene.util.BytesRef;
 
 import java.io.IOException;
@@ -42,9 +43,11 @@
   public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
     IndexSearcher searcher = (IndexSearcher)context.get("searcher");
     Similarity sim = searcher.getSimilarityProvider().get(field);
-    // todo: we need docFreq that takes a BytesRef
-    int docfreq = searcher.docFreq(new Term(indexedField, indexedBytes.utf8ToString()));
-    float idf = sim.idf(docfreq, searcher.maxDoc());
+    if (!(sim instanceof TFIDFSimilarity)) {
+      throw new UnsupportedOperationException("requires a TFIDFSimilarity (such as DefaultSimilarity)");
+    }
+    int docfreq = searcher.docFreq(new Term(indexedField, indexedBytes));
+    float idf = ((TFIDFSimilarity)sim).idf(docfreq, searcher.maxDoc());
     return new ConstDoubleDocValues(idf, this);
   }
 }
diff --git a/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/NormValueSource.java b/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/NormValueSource.java
index 5a515ad..f2b5436 100755
--- a/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/NormValueSource.java
+++ b/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/NormValueSource.java
@@ -23,6 +23,8 @@
 import org.apache.lucene.queries.function.docvalues.FloatDocValues;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TFIDFSimilarity;
+
 import java.io.IOException;
 import java.util.Map;
 
@@ -49,7 +51,11 @@
   @Override
   public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
     IndexSearcher searcher = (IndexSearcher)context.get("searcher");
-    final Similarity similarity = searcher.getSimilarityProvider().get(field);
+    Similarity sim = searcher.getSimilarityProvider().get(field);
+    if (!(sim instanceof TFIDFSimilarity)) {
+      throw new UnsupportedOperationException("requires a TFIDFSimilarity (such as DefaultSimilarity)");
+    }
+    final TFIDFSimilarity similarity = (TFIDFSimilarity) sim;
     final byte[] norms = readerContext.reader.norms(field);
     if (norms == null) {
       return new ConstDoubleDocValues(0.0, this);
diff --git a/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java b/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
index d868456..90b605b 100755
--- a/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
+++ b/modules/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
@@ -24,6 +24,7 @@
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TFIDFSimilarity;
 import org.apache.lucene.util.BytesRef;
 
 import java.io.IOException;
@@ -43,7 +44,11 @@
   public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
     Fields fields = readerContext.reader.fields();
     final Terms terms = fields.terms(field);
-    final Similarity similarity = ((IndexSearcher)context.get("searcher")).getSimilarityProvider().get(field);
+    final Similarity sim = ((IndexSearcher)context.get("searcher")).getSimilarityProvider().get(field);
+    if (!(sim instanceof TFIDFSimilarity)) {
+      throw new UnsupportedOperationException("requires a TFIDFSimilarity (such as DefaultSimilarity)");
+    }
+    final TFIDFSimilarity similarity = (TFIDFSimilarity) sim;
 
     return new FloatDocValues(this) {
       DocsEnum docs ;
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 5893720..e0da92c 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -302,6 +302,14 @@
 
 * SOLR-2636: Fix explain functionality for negative queries. (Tom Hill via yonik)
 
+* SOLR-2538: Range Faceting on long/double fields could overflow if values 
+  bigger then the max int/float were used.
+  (Erbi Hanka, hossman)
+
+* SOLR-2230: CommonsHttpSolrServer.addFile could not be used to send 
+  multiple files in a single request.
+  (Stephan Günther, hossman)
+
 Other Changes
 ----------------------
 
diff --git a/solr/common-build.xml b/solr/common-build.xml
index 168a700..ae3ce3b 100644
--- a/solr/common-build.xml
+++ b/solr/common-build.xml
@@ -162,8 +162,9 @@
        into your local repository).  If you wish to deploy to a remote
        repository, set this property to the URL of that repository.  In
        addition, if the repository requires authentication, you can set
-       properties "m2.repository.username" and "m2.repository.private.key"
-       to define your credentials.
+       properties "m2.repository.username" and either
+       "m2.repository.private.key" or "m2.repository.password" to define
+       your credentials.
   -->
   <property name="m2.repository.url" value="file://${maven.dist.dir}"/>
   <property name="m2.repository.private.key" value="${user.home}/.ssh/id_dsa"/>
@@ -423,7 +424,7 @@
       <artifact:deploy file="@{jar.file}">
         <artifact-attachments/>
         <remoteRepository url="${m2.repository.url}">
-          <authentication username="${m2.repository.username}" privateKey="${m2.repository.private.key}"/>
+          <authentication username="${m2.repository.username}" privateKey="${m2.repository.private.key}" password="${m2.repository.password}"/>
         </remoteRepository>
         <pom refid="maven.project"/>
       </artifact:deploy>
@@ -443,7 +444,7 @@
       <artifact:pom id="maven.project" file="${maven.build.dir}/@{pom.xml}" />
       <artifact:deploy file="@{jar.file}">
         <remoteRepository url="${m2.repository.url}">
-          <authentication username="${m2.repository.username}" privateKey="${m2.repository.private.key}"/>
+          <authentication username="${m2.repository.username}" privateKey="${m2.repository.private.key}" password="${m2.repository.password}"/>
         </remoteRepository>
         <pom refid="maven.project"/>
       </artifact:deploy>
diff --git a/solr/site/features.pdf b/solr/site/features.pdf
index 009dacf..ccc35bb 100755
--- a/solr/site/features.pdf
+++ b/solr/site/features.pdf
Binary files differ
diff --git a/solr/site/index.html b/solr/site/index.html
index 9cd2d7d..5330ae5 100755
--- a/solr/site/index.html
+++ b/solr/site/index.html
@@ -232,6 +232,9 @@
 <a href="#news">News</a>
 <ul class="minitoc">
 <li>
+<a href="#July+2011+-+Solr+3.3+Released">July 2011 - Solr 3.3 Released</a>
+</li>
+<li>
 <a href="#May+2011+-+Solr+3.2+Released">May 2011 - Solr 3.2 Released</a>
 </li>
 <li>
@@ -352,7 +355,37 @@
 <a name="N1005A"></a><a name="news"></a>
 <h2 class="boxed">News</h2>
 <div class="section">
-<a name="N10060"></a><a name="May+2011+-+Solr+3.2+Released"></a>
+<a name="N10060"></a><a name="July+2011+-+Solr+3.3+Released"></a>
+<h3 class="boxed">July 2011 - Solr 3.3 Released</h3>
+<p>The Lucene PMC is pleased to announce the release of <a href="http://www.apache.org/dyn/closer.cgi/lucene/solr">Apache Solr 3.3</a>!
+  </p>
+<p>
+    Solr's version number was synced with Lucene following the Lucene/Solr merge, so Solr 3.3 contains Lucene 3.3.
+   </p>
+<p>
+   Solr 3.3 release highlights include
+ </p>
+<ul>
+    
+<li>Grouping / Field Collapsing</li>
+    
+<li>A new, automaton-based suggest/autocomplete implementation offering an
+        order of magnitude smaller RAM consumption.</li>
+    
+<li>KStemFilterFactory, an optimized implementation of a less aggressive
+        stemmer for English.</li>
+    
+<li>Solr defaults to a new, more efficient merge policy (TieredMergePolicy).
+        See <a href="http://s.apache.org/merging">http://s.apache.org/merging</a> for more information.</li>
+    
+<li>Important bugfixes, including extremely high RAM usage in spellchecking.</li>
+    
+<li>Bugfixes and improvements from Apache Lucene 3.3</li>
+  
+</ul>
+<p>See the <a href="http://svn.apache.org/repos/asf/lucene/dev/tags/lucene_solr_3_3/solr/CHANGES.txt">release notes</a> for a more complete list of all the new features, improvements, and bugfixes.
+ </p>
+<a name="N10094"></a><a name="May+2011+-+Solr+3.2+Released"></a>
 <h3 class="boxed">May 2011 - Solr 3.2 Released</h3>
 <p>The Lucene PMC is pleased to announce the release of <a href="http://www.apache.org/dyn/closer.cgi/lucene/solr">Apache Solr 3.2</a>!
   </p>
@@ -378,7 +411,7 @@
 </ul>
 <p>See the <a href="http://svn.apache.org/repos/asf/lucene/dev/tags/lucene_solr_3_2/solr/CHANGES.txt">release notes</a> for a more complete list of all the new features, improvements, and bugfixes.
  </p>
-<a name="N1008D"></a><a name="March+2011+-+Solr+3.1+Released"></a>
+<a name="N100C1"></a><a name="March+2011+-+Solr+3.1+Released"></a>
 <h3 class="boxed">March 2011 - Solr 3.1 Released</h3>
 <p>The Lucene PMC is pleased to announce the release of <a href="http://www.apache.org/dyn/closer.cgi/lucene/solr">Apache Solr 3.1</a>!
   </p>
@@ -426,7 +459,7 @@
 </ul>
 <p>See the <a href="http://svn.apache.org/repos/asf/lucene/dev/tags/lucene_solr_3_1/solr/CHANGES.txt">release notes</a> for a more complete list of all the new features, improvements, and bugfixes.
  </p>
-<a name="N100F8"></a><a name="25+June+2010+-+Solr+1.4.1+Released"></a>
+<a name="N1012C"></a><a name="25+June+2010+-+Solr+1.4.1+Released"></a>
 <h3 class="boxed">25 June 2010 - Solr 1.4.1 Released</h3>
 <p>
            Solr 1.4.1 has been released and is now available for public
@@ -439,7 +472,7 @@
           See the <a href="http://svn.apache.org/repos/asf/lucene/solr/tags/release-1.4.1/CHANGES.txt">release notes</a>
           for more details.
           </p>
-<a name="N1010D"></a><a name="7+May+2010+-+Apache+Lucene+Eurocon+2010+Coming+to+Prague+May+18-21"></a>
+<a name="N10141"></a><a name="7+May+2010+-+Apache+Lucene+Eurocon+2010+Coming+to+Prague+May+18-21"></a>
 <h3 class="boxed">7 May 2010 - Apache Lucene Eurocon 2010 Coming to Prague May 18-21</h3>
 <p>
           On May 18th to the 21st Prague will play host to the first
@@ -511,7 +544,7 @@
 </li>
         
 </ul>
-<a name="N10159"></a><a name="10+November+2009+-+Solr+1.4+Released"></a>
+<a name="N1018D"></a><a name="10+November+2009+-+Solr+1.4+Released"></a>
 <h3 class="boxed">10 November 2009 - Solr 1.4 Released</h3>
 <p>
      Solr 1.4 has been released and is now available for public <a href="http://www.apache.org/dyn/closer.cgi/lucene/solr/">download</a>!
@@ -543,7 +576,7 @@
 </ul>
 <p>See the <a href="http://svn.apache.org/repos/asf/lucene/solr/tags/release-1.4.0/CHANGES.txt">release notes</a> for more details.
  </p>
-<a name="N10189"></a><a name="20+August+2009+-+Solr%27s+first+book+is+published%21"></a>
+<a name="N101BD"></a><a name="20+August+2009+-+Solr%27s+first+book+is+published%21"></a>
 <h3 class="boxed">20 August 2009 - Solr's first book is published!</h3>
 <p>
    
@@ -558,7 +591,7 @@
    </p>
 <p>Finally, this book covers various deployment considerations to include indexing strategies and performance-oriented configuration that will enable you to scale Solr to meet the needs of a high-volume site.
    </p>
-<a name="N101A9"></a><a name="18+August+2009+-+Lucene+at+US+ApacheCon"></a>
+<a name="N101DD"></a><a name="18+August+2009+-+Lucene+at+US+ApacheCon"></a>
 <h3 class="boxed">18 August 2009 - Lucene at US ApacheCon</h3>
 <p>
  
@@ -634,7 +667,7 @@
     Search</a> - Jason Rutherglen @ 15:00</li>
 
 </ul>
-<a name="N1021D"></a><a name="09+February+2009+-+Lucene+at+ApacheCon+Europe+2009+in+Amsterdam"></a>
+<a name="N10251"></a><a name="09+February+2009+-+Lucene+at+ApacheCon+Europe+2009+in+Amsterdam"></a>
 <h3 class="boxed">09 February 2009 - Lucene at ApacheCon Europe 2009 in Amsterdam</h3>
 <p>
                
@@ -672,23 +705,23 @@
 
 							              
 </ul>
-<a name="N10266"></a><a name="19+December+2008+-+Solr+Logo+Contest+Results"></a>
+<a name="N1029A"></a><a name="19+December+2008+-+Solr+Logo+Contest+Results"></a>
 <h3 class="boxed">19 December 2008 - Solr Logo Contest Results</h3>
 <p>Many great logos were submitted, but only one could be chosen.  Congratulations Michiel,
 	the creator of the winning logo that is proudly displayed at the top of this page.  
 	</p>
-<a name="N1026F"></a><a name="03+October+2008+-+Solr+Logo+Contest"></a>
+<a name="N102A3"></a><a name="03+October+2008+-+Solr+Logo+Contest"></a>
 <h3 class="boxed">03 October 2008 - Solr Logo Contest</h3>
 <p>By popular demand, Solr is holding a contest to pick a new Solr logo.  Details about how to submit an entry can be found <a href="http://wiki.apache.org/solr/LogoContest">on the wiki</a>.  The Deadline for submissions is November 20th, 2008 @ 11:59PM GMT.
 	</p>
-<a name="N1027C"></a><a name="15+September+2008+-+Solr+1.3.0+Available"></a>
+<a name="N102B0"></a><a name="15+September+2008+-+Solr+1.3.0+Available"></a>
 <h3 class="boxed">15 September 2008 - Solr 1.3.0 Available</h3>
 <p>Solr 1.3.0 is available for public download.  This version contains many enhancements and bug fixes, including distributed search capabilities,
 			Lucene 2.3.x performance improvements and many others.
 		</p>
 <p>See the <a href="http://svn.apache.org/repos/asf/lucene/solr/tags/release-1.3.0/CHANGES.txt">release notes</a> for more details.  Download is
     available from a <a href="http://www.apache.org/dyn/closer.cgi/lucene/solr/">Apache Mirror</a>.</p>
-<a name="N10290"></a><a name="28+August+2008+-+Lucene%2FSolr+at+ApacheCon+New+Orleans"></a>
+<a name="N102C4"></a><a name="28+August+2008+-+Lucene%2FSolr+at+ApacheCon+New+Orleans"></a>
 <h3 class="boxed">28 August 2008 - Lucene/Solr at ApacheCon New Orleans</h3>
 <p>
 	         
@@ -710,7 +743,7 @@
 <li>An <a href="http://us.apachecon.com/c/acus2008/schedule/2008/11/05">entire day of Lucene sessions</a> on November 5th</li>
 	        
 </ul>
-<a name="N102BE"></a><a name="03+September+2007+-+Lucene+at+ApacheCon+Atlanta"></a>
+<a name="N102F2"></a><a name="03+September+2007+-+Lucene+at+ApacheCon+Atlanta"></a>
 <h3 class="boxed">03 September 2007 - Lucene at ApacheCon Atlanta</h3>
 <p>
 <a href="http://www.us.apachecon.com"><img alt="ApacheCon US logo" class="float-right" src="http://www.apache.org/ads/ApacheCon/2007-usa-125x125.png"></a>
@@ -730,7 +763,7 @@
 <li>November 16, 4:00 pm: <a href="http://us.apachecon.com/us2007/program/talk/2017"> Advanced Indexing Techniques with Apache Lucene</a> by Michael Busch. Information on payloads and advanced indexing techniques.</li>
               
 </ul>
-<a name="N102F7"></a><a name="06+June+2007%3A+Release+1.2+available"></a>
+<a name="N1032B"></a><a name="06+June+2007%3A+Release+1.2+available"></a>
 <h3 class="boxed">06 June 2007: Release 1.2 available</h3>
 <p>
         This is the first release since Solr graduated from the Incubator,
@@ -740,40 +773,40 @@
         and more flexible plugins.
       </p>
 <p>See the <a href="http://svn.apache.org/repos/asf/lucene/solr/tags/release-1.2.0/CHANGES.txt">release notes</a> for more details.</p>
-<a name="N10308"></a><a name="17+January+2007%3A+Solr+graduates+from+Incubator"></a>
+<a name="N1033C"></a><a name="17+January+2007%3A+Solr+graduates+from+Incubator"></a>
 <h3 class="boxed">17 January 2007: Solr graduates from Incubator</h3>
 <p>
         Solr has graduated from the Apache Incubator, and is now a sub-project of Lucene.
       </p>
-<a name="N10312"></a><a name="22+December+2006%3A+Release+1.1.0+available"></a>
+<a name="N10346"></a><a name="22+December+2006%3A+Release+1.1.0+available"></a>
 <h3 class="boxed">22 December 2006: Release 1.1.0 available</h3>
 <p>
         This is the first release since Solr joined the Incubator, and brings
         many new features and performance optimizations including highlighting,
         faceted search, and JSON/Python/Ruby response formats.
       </p>
-<a name="N1031C"></a><a name="15+August+2006%3A+Solr+at+ApacheCon+US"></a>
+<a name="N10350"></a><a name="15+August+2006%3A+Solr+at+ApacheCon+US"></a>
 <h3 class="boxed">15 August 2006: Solr at ApacheCon US</h3>
 <p>Chris Hostetter will be presenting
         <strong><a href="http://www.apachecon.com/2006/US/html/sessions.html#FR26">"Faceted Searching With Apache Solr"</a></strong>  
         at ApacheCon US 2006, on October 13th at 4:30pm.
         See the <a href="http://www.us.apachecon.com/">ApacheCon</a> website for more details.
       </p>
-<a name="N1032F"></a><a name="21+April+2006%3A+Solr+at+ApacheCon"></a>
+<a name="N10363"></a><a name="21+April+2006%3A+Solr+at+ApacheCon"></a>
 <h3 class="boxed">21 April 2006: Solr at ApacheCon</h3>
 <p>Yonik Seeley will be presenting
         <strong>"Apache Solr, a Full-Text Search Server based on Lucene"</strong>  
         at ApacheCon Europe 2006, on June 29th at 5:30pm.
         See the <a href="http://www.eu.apachecon.com/">ApacheCon</a> website for more details.
       </p>
-<a name="N10340"></a><a name="21+February+2006%3A+nightly+builds"></a>
+<a name="N10374"></a><a name="21+February+2006%3A+nightly+builds"></a>
 <h3 class="boxed">21 February 2006: nightly builds</h3>
 <p>Solr now has nightly builds.  This automatically creates a
       <a href="http://people.apache.org/builds/lucene/solr/nightly/">downloadable version of Solr every
       night</a>.  All unit tests must pass, or a message is sent to
       the developers mailing list and no new version is created.  This
       also updates the <a href="api/index.html">javadoc</a>.</p>
-<a name="N10352"></a><a name="17+January+2006%3A+Solr+Joins+Apache+Incubator"></a>
+<a name="N10386"></a><a name="17+January+2006%3A+Solr+Joins+Apache+Incubator"></a>
 <h3 class="boxed">17 January 2006: Solr Joins Apache Incubator</h3>
 <p>Solr, a search server based on Lucene, has been accepted into the Apache Incubator.
             Solr was originally developed by CNET Networks, and is widely used within CNET
diff --git a/solr/site/index.pdf b/solr/site/index.pdf
index 4cd19fc..84ed289 100755
--- a/solr/site/index.pdf
+++ b/solr/site/index.pdf
Binary files differ
diff --git a/solr/site/issue_tracking.pdf b/solr/site/issue_tracking.pdf
index 6e494ad..110fefe 100755
--- a/solr/site/issue_tracking.pdf
+++ b/solr/site/issue_tracking.pdf
Binary files differ
diff --git a/solr/site/linkmap.pdf b/solr/site/linkmap.pdf
index 883ba55..2f6a45a 100755
--- a/solr/site/linkmap.pdf
+++ b/solr/site/linkmap.pdf
Binary files differ
diff --git a/solr/site/mailing_lists.pdf b/solr/site/mailing_lists.pdf
index 1a7cd3b..d1f85b8 100755
--- a/solr/site/mailing_lists.pdf
+++ b/solr/site/mailing_lists.pdf
Binary files differ
diff --git a/solr/site/tutorial.pdf b/solr/site/tutorial.pdf
index 275b55e..b5dbf5d 100755
--- a/solr/site/tutorial.pdf
+++ b/solr/site/tutorial.pdf
Binary files differ
diff --git a/solr/site/version_control.pdf b/solr/site/version_control.pdf
index 4ea4657..da680ab 100755
--- a/solr/site/version_control.pdf
+++ b/solr/site/version_control.pdf
Binary files differ
diff --git a/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java b/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
index 5def900..04b26e1 100644
--- a/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
+++ b/solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
@@ -89,7 +89,6 @@
       TokenStream tokenStream = null;
       try {
         tokenStream = analyzer.reusableTokenStream(context.getFieldName(), new StringReader(value));
-        tokenStream.reset();
       } catch (IOException e) {
         throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
       }
diff --git a/solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java b/solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
index 7d2b37d..d2c5b10 100644
--- a/solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
+++ b/solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
@@ -454,8 +454,6 @@
       
       Highlighter highlighter;
       if (Boolean.valueOf(req.getParams().get(HighlightParams.USE_PHRASE_HIGHLIGHTER, "true"))) {
-        // TODO: this is not always necessary - eventually we would like to avoid this wrap
-        //       when it is not needed.
         if (maxCharsToAnalyze < 0) {
           tstream = new CachingTokenFilter(tstream);
         } else {
diff --git a/solr/src/java/org/apache/solr/request/SimpleFacets.java b/solr/src/java/org/apache/solr/request/SimpleFacets.java
index 7eb7037..1593d92 100644
--- a/solr/src/java/org/apache/solr/request/SimpleFacets.java
+++ b/solr/src/java/org/apache/solr/request/SimpleFacets.java
@@ -1317,7 +1317,7 @@
     }
     @Override
     public Double parseAndAddGap(Double value, String gap) {
-      return new Double(value.floatValue() + Double.valueOf(gap).floatValue());
+      return new Double(value.doubleValue() + Double.valueOf(gap).doubleValue());
     }
   }
   private static class IntegerRangeEndpointCalculator 
@@ -1343,7 +1343,7 @@
     }
     @Override
     public Long parseAndAddGap(Long value, String gap) {
-      return new Long(value.intValue() + Long.valueOf(gap).intValue());
+      return new Long(value.longValue() + Long.valueOf(gap).longValue());
     }
   }
   private static class DateRangeEndpointCalculator 
diff --git a/solr/src/java/org/apache/solr/schema/LatLonType.java b/solr/src/java/org/apache/solr/schema/LatLonType.java
index dbcd9588..b395617 100644
--- a/solr/src/java/org/apache/solr/schema/LatLonType.java
+++ b/solr/src/java/org/apache/solr/schema/LatLonType.java
@@ -354,25 +354,20 @@
     }
 
     @Override
-    public float getValue() {
-      return queryWeight;
-    }
-
-    @Override
-    public float sumOfSquaredWeights() throws IOException {
+    public float getValueForNormalization() throws IOException {
       queryWeight = getBoost();
       return queryWeight * queryWeight;
     }
 
     @Override
-    public void normalize(float norm) {
-      this.queryNorm = norm;
+    public void normalize(float norm, float topLevelBoost) {
+      this.queryNorm = norm * topLevelBoost;
       queryWeight *= this.queryNorm;
     }
 
     @Override
     public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
-      return new SpatialScorer(context, this);
+      return new SpatialScorer(context, this, queryWeight);
     }
 
     @Override
@@ -405,10 +400,10 @@
     int lastDistDoc;
     double lastDist;
 
-    public SpatialScorer(AtomicReaderContext readerContext, SpatialWeight w) throws IOException {
+    public SpatialScorer(AtomicReaderContext readerContext, SpatialWeight w, float qWeight) throws IOException {
       super(w);
       this.weight = w;
-      this.qWeight = w.getValue();
+      this.qWeight = qWeight;
       this.reader = readerContext.reader;
       this.maxDoc = reader.maxDoc();
       this.liveDocs = reader.getLiveDocs();
diff --git a/solr/src/java/org/apache/solr/search/Grouping.java b/solr/src/java/org/apache/solr/search/Grouping.java
index af4fa2e..4ee4c80 100755
--- a/solr/src/java/org/apache/solr/search/Grouping.java
+++ b/solr/src/java/org/apache/solr/search/Grouping.java
@@ -261,7 +261,7 @@
 
   public void execute() throws IOException {
     if (commands.isEmpty()) {
-      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specify at least on field, function or query to group by.");
+      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specify at least one field, function or query to group by.");
     }
 
     DocListAndSet out = new DocListAndSet();
diff --git a/solr/src/java/org/apache/solr/search/JoinQParserPlugin.java b/solr/src/java/org/apache/solr/search/JoinQParserPlugin.java
index 4f188ea..61da1b1 100644
--- a/solr/src/java/org/apache/solr/search/JoinQParserPlugin.java
+++ b/solr/src/java/org/apache/solr/search/JoinQParserPlugin.java
@@ -168,19 +168,15 @@
       return JoinQuery.this;
     }
 
-    public float getValue() {
-      return getBoost();
-    }
-
     @Override
-    public float sumOfSquaredWeights() throws IOException {
+    public float getValueForNormalization() throws IOException {
       queryWeight = getBoost();
       return queryWeight * queryWeight;
     }
 
     @Override
-    public void normalize(float norm) {
-      this.queryNorm = norm;
+    public void normalize(float norm, float topLevelBoost) {
+      this.queryNorm = norm * topLevelBoost;
       queryWeight *= this.queryNorm;
     }
 
@@ -223,7 +219,7 @@
 
       DocIdSet readerSet = filter.getDocIdSet(context);
       if (readerSet == null) readerSet=DocIdSet.EMPTY_DOCIDSET;
-      return new JoinScorer(this, readerSet.iterator());
+      return new JoinScorer(this, readerSet.iterator(), getBoost());
     }
 
 
@@ -514,9 +510,9 @@
     final float score;
     int doc = -1;
 
-    public JoinScorer(Weight w, DocIdSetIterator iter) throws IOException {
+    public JoinScorer(Weight w, DocIdSetIterator iter, float score) throws IOException {
       super(w);
-      score = w.getValue();
+      this.score = score;
       this.iter = iter==null ? DocIdSet.EMPTY_DOCIDSET.iterator() : iter;
     }
 
diff --git a/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java b/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
index fd41c32..2880302 100755
--- a/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
+++ b/solr/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
@@ -106,31 +106,26 @@
     }
 
     @Override
-    public float getValue() {
-      return queryWeight;
-    }
-
-    @Override
-    public float sumOfSquaredWeights() throws IOException {
+    public float getValueForNormalization() throws IOException {
       queryWeight = getBoost();
       return queryWeight * queryWeight;
     }
 
     @Override
-    public void normalize(float norm) {
-      this.queryNorm = norm;
+    public void normalize(float norm, float topLevelBoost) {
+      this.queryNorm = norm * topLevelBoost;
       queryWeight *= this.queryNorm;
     }
 
     @Override
     public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
-      return new ConstantScorer(context, this);
+      return new ConstantScorer(context, this, queryWeight);
     }
 
     @Override
     public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
 
-      ConstantScorer cs = new ConstantScorer(context, this);
+      ConstantScorer cs = new ConstantScorer(context, this, queryWeight);
       boolean exists = cs.docIdSetIterator.advance(doc) == doc;
 
       ComplexExplanation result = new ComplexExplanation();
@@ -157,9 +152,9 @@
     final float theScore;
     int doc = -1;
 
-    public ConstantScorer(AtomicReaderContext context, ConstantWeight w) throws IOException {
+    public ConstantScorer(AtomicReaderContext context, ConstantWeight w, float theScore) throws IOException {
       super(w);
-      theScore = w.getValue();
+      this.theScore = theScore;
       DocIdSet docIdSet = filter instanceof SolrFilter ? ((SolrFilter)filter).getDocIdSet(w.context, context) : filter.getDocIdSet(context);
       if (docIdSet == null) {
         docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.iterator();
diff --git a/solr/src/site/src/documentation/content/xdocs/index.xml b/solr/src/site/src/documentation/content/xdocs/index.xml
index b9808bd..00e232d 100755
--- a/solr/src/site/src/documentation/content/xdocs/index.xml
+++ b/solr/src/site/src/documentation/content/xdocs/index.xml
@@ -67,6 +67,32 @@
     <section id="news">
       <title>News</title>
 <section>
+   <title>July 2011 - Solr 3.3 Released</title>
+  <p>The Lucene PMC is pleased to announce the release of <a href="http://www.apache.org/dyn/closer.cgi/lucene/solr">Apache Solr 3.3</a>!
+  </p> 
+   <p>
+    Solr's version number was synced with Lucene following the Lucene/Solr merge, so Solr 3.3 contains Lucene 3.3.
+   </p>
+  <p>
+   Solr 3.3 release highlights include
+ </p>
+  <ul>
+    <li>Grouping / Field Collapsing</li>
+    <li>A new, automaton-based suggest/autocomplete implementation offering an
+        order of magnitude smaller RAM consumption.</li>
+    <li>KStemFilterFactory, an optimized implementation of a less aggressive
+        stemmer for English.</li>
+    <li>Solr defaults to a new, more efficient merge policy (TieredMergePolicy).
+        See <a href="http://s.apache.org/merging">http://s.apache.org/merging</a> for more information.</li>
+    <li>Important bugfixes, including extremely high RAM usage in spellchecking.</li>
+    <li>Bugfixes and improvements from Apache Lucene 3.3</li>
+  </ul>
+     
+  <p>See the <a href="http://svn.apache.org/repos/asf/lucene/dev/tags/lucene_solr_3_3/solr/CHANGES.txt">release notes</a> for a more complete list of all the new features, improvements, and bugfixes.
+ </p>
+
+</section>
+<section>
    <title>May 2011 - Solr 3.2 Released</title>
   <p>The Lucene PMC is pleased to announce the release of <a href="http://www.apache.org/dyn/closer.cgi/lucene/solr">Apache Solr 3.2</a>!
   </p> 
diff --git a/solr/src/solrj/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java b/solr/src/solrj/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java
index 2f1c19a..5a92885 100644
--- a/solr/src/solrj/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java
+++ b/solr/src/solrj/org/apache/solr/client/solrj/impl/CommonsHttpSolrServer.java
@@ -32,11 +32,11 @@
 import org.apache.commons.httpclient.methods.PostMethod;
 import org.apache.commons.httpclient.methods.RequestEntity;
 import org.apache.commons.httpclient.methods.multipart.MultipartRequestEntity;
+import org.apache.commons.httpclient.methods.multipart.FilePart;
 import org.apache.commons.httpclient.methods.multipart.Part;
-import org.apache.commons.httpclient.methods.multipart.PartBase;
+import org.apache.commons.httpclient.methods.multipart.PartSource;
 import org.apache.commons.httpclient.methods.multipart.StringPart;
 import org.apache.commons.httpclient.params.HttpMethodParams;
-import org.apache.commons.io.IOUtils;
 import org.apache.solr.client.solrj.ResponseParser;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServer;
@@ -324,25 +324,24 @@
                   final ContentStream c = content;
 
                   String charSet = null;
-                  String transferEncoding = null;
-                  parts.add(new PartBase(c.getName(), c.getContentType(),
-                      charSet, transferEncoding) {
+                  PartSource source = new PartSource() {
                     @Override
-                    protected long lengthOfData() throws IOException {
+                    public long getLength() {
                       return c.getSize();
                     }
+                      
+                    public String getFileName() {
+                      return c.getName();
+                    }
 
                     @Override
-                    protected void sendData(OutputStream out)
-                        throws IOException {
-                      InputStream in = c.getStream();
-                      try {
-                        IOUtils.copy(in, out);
-                      } finally {
-                        in.close();
-                      }
+                    public InputStream createInputStream() throws IOException {
+                      return c.getStream();
                     }
-                  });
+                  };
+                
+                  parts.add(new FilePart(c.getName(), source, 
+                                         c.getContentType(), charSet));
                 }
               }
               if (parts.size() > 0) {
diff --git a/solr/src/test-files/docs1.xml b/solr/src/test-files/docs1.xml
new file mode 100644
index 0000000..3c5448d
--- /dev/null
+++ b/solr/src/test-files/docs1.xml
@@ -0,0 +1,56 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<add>
+<doc>
+  <field name="id">SP2514N</field>
+  <field name="name">Samsung SpinPoint P120 SP2514N - hard drive - 250 GB - ATA-133</field>
+  <field name="manu">Samsung Electronics Co. Ltd.</field>
+  <!-- Join -->
+  <field name="manu_id_s">samsung</field>
+  <field name="cat">electronics</field>
+  <field name="cat">hard drive</field>
+  <field name="features">7200RPM, 8MB cache, IDE Ultra ATA-133</field>
+  <field name="features">NoiseGuard, SilentSeek technology, Fluid Dynamic Bearing (FDB) motor</field>
+  <field name="price">92</field>
+  <field name="popularity">6</field>
+  <field name="inStock">true</field>
+  <field name="manufacturedate_dt">2006-02-13T15:26:37Z</field>
+  <!-- Near Oklahoma city -->
+  <field name="store">35.0752,-97.032</field>
+</doc>
+
+<doc>
+  <field name="id">6H500F0</field>
+  <field name="name">Maxtor DiamondMax 11 - hard drive - 500 GB - SATA-300</field>
+  <field name="manu">Maxtor Corp.</field>
+  <!-- Join -->
+  <field name="manu_id_s">maxtor</field>
+  <field name="cat">electronics</field>
+  <field name="cat">hard drive</field>
+  <field name="features">SATA 3.0Gb/s, NCQ</field>
+  <field name="features">8.5ms seek</field>
+  <field name="features">16MB cache</field>
+  <field name="price">350</field>
+  <field name="popularity">6</field>
+  <field name="inStock">true</field>
+  <!-- Buffalo store -->
+  <field name="store">45.17614,-93.87341</field>
+  <field name="manufacturedate_dt">2006-02-13T15:26:37Z</field>
+</doc>
+</add>
+
diff --git a/solr/src/test-files/docs2.xml b/solr/src/test-files/docs2.xml
new file mode 100644
index 0000000..0b89d67
--- /dev/null
+++ b/solr/src/test-files/docs2.xml
@@ -0,0 +1,77 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<add>
+<doc>
+  <field name="id">TWINX2048-3200PRO</field>
+  <field name="name">CORSAIR  XMS 2GB (2 x 1GB) 184-Pin DDR SDRAM Unbuffered DDR 400 (PC 3200) Dual Channel Kit System Memory - Retail</field>
+  <field name="manu">Corsair Microsystems Inc.</field>
+  <!-- Join -->
+  <field name="manu_id_s">corsair</field>
+  <field name="cat">electronics</field>
+  <field name="cat">memory</field>
+  <field name="features">CAS latency 2,	2-3-3-6 timing, 2.75v, unbuffered, heat-spreader</field>
+  <field name="price">185</field>
+  <field name="popularity">5</field>
+  <field name="inStock">true</field>
+  <!-- San Francisco store -->
+  <field name="store">37.7752,-122.4232</field>
+  <field name="manufacturedate_dt">2006-02-13T15:26:37Z</field>
+
+  <!-- a field for testing payload tagged text via DelimitedPayloadTokenFilter -->
+  <field name="payloads">electronics|6.0 memory|3.0</field>
+</doc>
+
+<doc>
+  <field name="id">VS1GB400C3</field>
+  <field name="name">CORSAIR ValueSelect 1GB 184-Pin DDR SDRAM Unbuffered DDR 400 (PC 3200) System Memory - Retail</field>
+  <field name="manu">Corsair Microsystems Inc.</field>
+  <!-- Join -->
+  <field name="manu_id_s">corsair</field>
+  <field name="cat">electronics</field>
+  <field name="cat">memory</field>
+  <field name="price">74.99</field>
+  <field name="popularity">7</field>
+  <field name="inStock">true</field>
+  <!-- Dodge City store -->
+  <field name="store">37.7752,-100.0232</field>
+  <field name="manufacturedate_dt">2006-02-13T15:26:37Z</field>
+
+  <field name="payloads">electronics|4.0 memory|2.0</field>
+</doc>
+
+<doc>
+  <field name="id">VDBDB1A16</field>
+  <field name="name">A-DATA V-Series 1GB 184-Pin DDR SDRAM Unbuffered DDR 400 (PC 3200) System Memory - OEM</field>
+  <field name="manu">A-DATA Technology Inc.</field>
+  <!-- Join -->
+  <field name="manu_id_s">corsair</field>
+  <field name="cat">electronics</field>
+  <field name="cat">memory</field>
+  <field name="features">CAS latency 3,	 2.7v</field>
+  <!-- note: price & popularity is missing on this one -->
+  <field name="popularity">0</field>
+  <field name="inStock">true</field>
+  <!-- Buffalo store -->
+  <field name="store">45.18414,-93.88141</field>
+  <field name="manufacturedate_dt">2006-02-13T15:26:37Z</field>
+
+  <field name="payloads">electronics|0.9 memory|0.1</field>
+</doc>
+
+</add>
+
diff --git a/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java b/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
index e19992a..1f28fa1 100644
--- a/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
+++ b/solr/src/test/org/apache/solr/client/solrj/SolrExampleTests.java
@@ -446,6 +446,24 @@
     assertNotNull("Couldn't upload books.csv", result);
     rsp = server.query( new SolrQuery( "*:*") );
     Assert.assertEquals( 10, rsp.getResults().getNumFound() );
+ }
+
+ @Test
+ public void testMultiContentStreamRequest() throws Exception {
+    SolrServer server = getSolrServer();
+    server.deleteByQuery( "*:*" );// delete everything!
+    server.commit();
+    QueryResponse rsp = server.query( new SolrQuery( "*:*") );
+    Assert.assertEquals( 0, rsp.getResults().getNumFound() );
+
+    ContentStreamUpdateRequest up = new ContentStreamUpdateRequest("/update");
+    up.addFile(getFile("docs1.xml")); // 2
+    up.addFile(getFile("docs2.xml")); // 3
+    up.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
+    NamedList<Object> result = server.request(up);
+    assertNotNull("Couldn't upload xml files", result);
+    rsp = server.query( new SolrQuery( "*:*") );
+    Assert.assertEquals( 5 , rsp.getResults().getNumFound() );
   }
 
 
diff --git a/solr/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java b/solr/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java
index 975aa20..bf3c168 100644
--- a/solr/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java
+++ b/solr/src/test/org/apache/solr/client/solrj/embedded/TestSolrProperties.java
@@ -202,8 +202,15 @@
     assertTrue("should have more recent time: " + after + "," + before, after > before);
 
     mcr = CoreAdminRequest.persist("solr-persist.xml", coreadmin);
-    
-    System.out.println(IOUtils.toString(new FileInputStream(new File(solrXml.getParent(), "solr-persist.xml"))));
+
+    if (VERBOSE) {
+      FileInputStream fis = new FileInputStream(new File(solrXml.getParent(), "solr-persist.xml"));
+      try {
+        System.out.println(IOUtils.toString(fis));
+      } finally {
+        fis.close();
+      }
+    }
     DocumentBuilder builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
     FileInputStream fis = new FileInputStream(new File(solrXml.getParent(), "solr-persist.xml"));
     try {
diff --git a/solr/src/test/org/apache/solr/core/MockCodecProviderFactory.java b/solr/src/test/org/apache/solr/core/MockCodecProviderFactory.java
index 08f29dc..ace2e09 100644
--- a/solr/src/test/org/apache/solr/core/MockCodecProviderFactory.java
+++ b/solr/src/test/org/apache/solr/core/MockCodecProviderFactory.java
@@ -42,7 +42,7 @@
   public CodecProvider create() {
     CodecProvider cp = new CodecProvider();
     cp.register(new StandardCodec());
-    cp.register(new PulsingCodec(1));
+    cp.register(new PulsingCodec());
     if (codecs != null) {
       for (Object codec : codecs.getAll("name")) {
         if (!cp.isCodecRegistered((String)codec)) {
diff --git a/solr/src/test/org/apache/solr/request/SimpleFacetsTest.java b/solr/src/test/org/apache/solr/request/SimpleFacetsTest.java
index 9302daa..47f31f6 100644
--- a/solr/src/test/org/apache/solr/request/SimpleFacetsTest.java
+++ b/solr/src/test/org/apache/solr/request/SimpleFacetsTest.java
@@ -892,7 +892,48 @@
   public void testNumericRangeFacetsSortableDouble() {
     helpTestFractionalNumberRangeFacets("range_facet_sd");
   }
-  private void helpTestFractionalNumberRangeFacets(final String fieldName) {
+
+  @Test
+  public void testNumericRangeFacetsOverflowTrieDouble() {
+    helpTestNumericRangeFacetsDoubleOverflow("range_facet_d");
+  }
+  @Test
+  public void testNumericRangeFacetsOverflowSortableDouble() {
+    helpTestNumericRangeFacetsDoubleOverflow("range_facet_sd");
+  }
+
+  private void helpTestNumericRangeFacetsDoubleOverflow(final String fieldName) {
+    final String f = fieldName;
+    final String pre = "//lst[@name='facet_ranges']/lst[@name='"+f+"']/lst[@name='counts']";
+    final String meta = pre + "/../";
+
+    String start = "0.0";
+    String gap = (new Double( (double)Float.MAX_VALUE )).toString();
+    String end = (new Double( ((double)Float.MAX_VALUE) * 3D )).toString();
+    String mid = (new Double( ((double)Float.MAX_VALUE) * 2D )).toString();
+
+    assertQ(f+": checking counts for lower",
+            req( "q", "id:[30 TO 60]"
+                ,"rows", "0"
+                ,"facet", "true"
+                ,"facet.range", f
+                ,"facet.range.start", start
+                ,"facet.range.end",   end
+                ,"facet.range.gap",   gap
+                ,"facet.range.other", "all"
+                ,"facet.range.include", "lower"
+                )
+            ,"*[count("+pre+"/int)=3]"
+            ,pre+"/int[@name='"+start+"'][.='6'  ]"
+            ,pre+"/int[@name='"+mid+"'][.='0'  ]"
+            //
+            ,meta+"/double[@name='end' ][.='"+end+"']"
+            ,meta+"/int[@name='before' ][.='0']"
+            ,meta+"/int[@name='after'  ][.='0']"
+            ,meta+"/int[@name='between'][.='6']"
+            );
+  }
+   private void helpTestFractionalNumberRangeFacets(final String fieldName) {
 
     final String f = fieldName;
     final String pre = "//lst[@name='facet_ranges']/lst[@name='"+f+"']/lst[@name='counts']";
@@ -1112,6 +1153,47 @@
     helpTestWholeNumberRangeFacets("range_facet_sl");
   }
 
+
+  @Test
+  public void testNumericRangeFacetsOverflowTrieLong() {
+    helpTestNumericRangeFacetsLongOverflow("range_facet_l");
+  }
+  @Test
+  public void testNumericRangeFacetsOverflowSortableLong() {
+    helpTestNumericRangeFacetsLongOverflow("range_facet_sl");
+  }
+
+  private void helpTestNumericRangeFacetsLongOverflow(final String fieldName) {
+    final String f = fieldName;
+    final String pre = "//lst[@name='facet_ranges']/lst[@name='"+f+"']/lst[@name='counts']";
+    final String meta = pre + "/../";
+
+    String start = "0";
+    String gap = (new Long( (long)Integer.MAX_VALUE )).toString();
+    String end = (new Long( ((long)Integer.MAX_VALUE) * 3L )).toString();
+    String mid = (new Long( ((long)Integer.MAX_VALUE) * 2L )).toString();
+
+    assertQ(f+": checking counts for lower",
+            req( "q", "id:[30 TO 60]"
+                ,"rows", "0"
+                ,"facet", "true"
+                ,"facet.range", f
+                ,"facet.range.start", start
+                ,"facet.range.end",   end
+                ,"facet.range.gap",   gap
+                ,"facet.range.other", "all"
+                ,"facet.range.include", "lower"
+                )
+            ,"*[count("+pre+"/int)=3]"
+            ,pre+"/int[@name='"+start+"'][.='6'  ]"
+            ,pre+"/int[@name='"+mid+"'][.='0'  ]"
+            //
+            ,meta+"/long[@name='end'   ][.='"+end+"']"
+            ,meta+"/int[@name='before' ][.='0']"
+            ,meta+"/int[@name='after'  ][.='0']"
+            ,meta+"/int[@name='between'][.='6']"
+            );
+  }
   private void helpTestWholeNumberRangeFacets(final String fieldName) {
 
     // the float test covers a lot of the weird edge cases
diff --git a/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java b/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java
index ba86864..38ab99e 100755
--- a/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java
+++ b/solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java
@@ -21,7 +21,7 @@
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.search.DefaultSimilarity;
 import org.apache.lucene.search.FieldCache;
-import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TFIDFSimilarity;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.common.params.SolrParams;
 import org.apache.solr.common.util.NamedList;
@@ -305,7 +305,7 @@
     assertQ(req("fl","*,score","q", "{!func}docfreq($field,$value)", "fq","id:6", "field","a_t", "value","cow"), "//float[@name='score']='3.0'");
     assertQ(req("fl","*,score","q", "{!func}termfreq(a_t,cow)", "fq","id:6"), "//float[@name='score']='5.0'");
 
-    Similarity similarity = new DefaultSimilarity();
+    TFIDFSimilarity similarity = new DefaultSimilarity();
 
     // make sure it doesn't get a NPE if no terms are present in a field.
     assertQ(req("fl","*,score","q", "{!func}termfreq(nofield_t,cow)", "fq","id:6"), "//float[@name='score']='0.0'");
@@ -323,7 +323,7 @@
     state.setBoost(1.0f);
     state.setLength(4);
     assertQ(req("fl","*,score","q", "{!func}norm(a_t)", "fq","id:2"),
-        "//float[@name='score']='" + similarity.computeNorm(state)  + "'");  // sqrt(4)==2 and is exactly representable when quantized to a byte
+        "//float[@name='score']='" + similarity.decodeNormValue(similarity.computeNorm(state))  + "'");  // sqrt(4)==2 and is exactly representable when quantized to a byte
 
     // test that ord and rord are working on a global index basis, not just
     // at the segment level (since Lucene 2.9 has switched to per-segment searching)