| Index: solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java
|
| ===================================================================
|
| --- solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java (revision 1063004)
|
| +++ solr/src/test/org/apache/solr/search/function/TestFunctionQuery.java (working copy)
|
| @@ -21,6 +21,7 @@
|
| import org.apache.lucene.search.DefaultSimilarity; |
| import org.apache.lucene.search.FieldCache; |
| import org.apache.lucene.search.Similarity; |
| +import org.apache.lucene.search.TFIDFSimilarity; |
| import org.apache.solr.SolrTestCaseJ4; |
| import org.junit.BeforeClass; |
| import org.junit.Test; |
| @@ -290,7 +291,7 @@
|
| assertQ(req("fl","*,score","q", "{!func}docfreq('a_t','cow')", "fq","id:6"), "//float[@name='score']='3.0'"); |
| assertQ(req("fl","*,score","q", "{!func}docfreq($field,$value)", "fq","id:6", "field","a_t", "value","cow"), "//float[@name='score']='3.0'"); |
| assertQ(req("fl","*,score","q", "{!func}termfreq(a_t,cow)", "fq","id:6"), "//float[@name='score']='5.0'"); |
| - Similarity similarity = new DefaultSimilarity(); |
| + TFIDFSimilarity similarity = new DefaultSimilarity(); |
| assertQ(req("fl","*,score","q", "{!func}idf(a_t,cow)", "fq","id:6"), |
| "//float[@name='score']='" + similarity.idf(3,6) + "'"); |
| assertQ(req("fl","*,score","q", "{!func}tf(a_t,cow)", "fq","id:6"), |
| Index: solr/src/java/org/apache/solr/search/function/IDFValueSource.java
|
| ===================================================================
|
| --- solr/src/java/org/apache/solr/search/function/IDFValueSource.java (revision 1063004)
|
| +++ solr/src/java/org/apache/solr/search/function/IDFValueSource.java (working copy)
|
| @@ -21,6 +21,7 @@
|
| import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| import org.apache.lucene.search.IndexSearcher; |
| import org.apache.lucene.search.Similarity; |
| +import org.apache.lucene.search.TFIDFSimilarity; |
| import org.apache.lucene.util.BytesRef; |
| import org.apache.solr.util.ByteUtils; |
| |
| @@ -42,10 +43,16 @@
|
| public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { |
| IndexSearcher searcher = (IndexSearcher)context.get("searcher"); |
| Similarity sim = searcher.getSimilarityProvider().get(field); |
| + // nocommit: |
| + // what to do? its an idf valuesource... we could generalize to sim.computeWeight though |
| + // (which is idf for TF/IDF and something like it elsewhere) |
| + if (!(sim instanceof TFIDFSimilarity)) { |
| + throw new UnsupportedOperationException("only works with TF/IDF Similarity"); |
| + } |
| // todo: we need docFreq that takes a BytesRef |
| String strVal = ByteUtils.UTF8toUTF16(indexedBytes); |
| int docfreq = searcher.docFreq(new Term(indexedField, strVal)); |
| - float idf = sim.idf(docfreq, searcher.maxDoc()); |
| + float idf = ((TFIDFSimilarity)sim).idf(docfreq, searcher.maxDoc()); |
| return new ConstDoubleDocValues(idf, this); |
| } |
| } |
| Index: solr/src/java/org/apache/solr/search/function/TFValueSource.java
|
| ===================================================================
|
| --- solr/src/java/org/apache/solr/search/function/TFValueSource.java (revision 1063004)
|
| +++ solr/src/java/org/apache/solr/search/function/TFValueSource.java (working copy)
|
| @@ -5,6 +5,7 @@
|
| import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.search.IndexSearcher; |
| import org.apache.lucene.search.Similarity; |
| +import org.apache.lucene.search.TFIDFSimilarity; |
| import org.apache.lucene.util.BytesRef; |
| import org.apache.solr.common.SolrException; |
| |
| @@ -25,8 +26,14 @@
|
| public DocValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { |
| Fields fields = readerContext.reader.fields(); |
| final Terms terms = fields.terms(field); |
| - final Similarity similarity = ((IndexSearcher)context.get("searcher")).getSimilarityProvider().get(field); |
| - |
| + // nocommit: |
| + // what to do? its a TF valuesource... |
| + final Similarity sim = ((IndexSearcher)context.get("searcher")).getSimilarityProvider().get(field); |
| + if (!(sim instanceof TFIDFSimilarity)) { |
| + throw new UnsupportedOperationException("only works with TF/IDF Similarity"); |
| + } |
| + final TFIDFSimilarity similarity = (TFIDFSimilarity) sim; |
| + |
| return new FloatDocValues(this) { |
| DocsEnum docs ; |
| int atDoc; |
| Index: lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (revision 1063004)
|
| +++ lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (working copy)
|
| @@ -17,7 +17,6 @@
|
| */ |
| import java.io.IOException; |
| import java.io.Reader; |
| -import java.util.Collection; |
| |
| import org.apache.lucene.analysis.Analyzer; |
| import org.apache.lucene.analysis.MockTokenizer; |
| @@ -43,6 +42,7 @@
|
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.English; |
| import org.apache.lucene.util.LuceneTestCase; |
| +import org.apache.lucene.util.PerReaderTermState; |
| import org.apache.lucene.search.Explanation.IDFExplanation; |
| |
| |
| @@ -325,7 +325,7 @@
|
| return 1.0f; |
| } |
| // idf used for phrase queries |
| - @Override public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException { |
| + @Override public IDFExplanation idfExplain(PerReaderTermState states[], IndexSearcher searcher) throws IOException { |
| return new IDFExplanation() { |
| @Override |
| public float getIdf() { |
| Index: lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java (revision 1063004)
|
| +++ lucene/src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java (working copy)
|
| @@ -135,8 +135,8 @@
|
| static final class JustCompileSpanScorer extends SpanScorer { |
| |
| protected JustCompileSpanScorer(Spans spans, Weight weight, |
| - Similarity similarity, byte[] norms) throws IOException { |
| - super(spans, weight, similarity, norms); |
| + Similarity similarity, String field, AtomicReaderContext context) throws IOException { |
| + super(spans, weight, similarity, field, context); |
| } |
| |
| @Override |
| Index: lucene/src/test/org/apache/lucene/search/JustCompileSearch.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/search/JustCompileSearch.java (revision 1063004)
|
| +++ lucene/src/test/org/apache/lucene/search/JustCompileSearch.java (working copy)
|
| @@ -187,8 +187,8 @@
|
| static final class JustCompilePhraseScorer extends PhraseScorer { |
| |
| JustCompilePhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings, |
| - Similarity similarity, byte[] norms) { |
| - super(weight, postings, similarity, norms); |
| + Similarity similarity, String field, AtomicReaderContext context) throws IOException { |
| + super(weight, postings, similarity, field, context); |
| } |
| |
| @Override |
| @@ -240,7 +240,8 @@
|
| } |
| } |
| |
| - static final class JustCompileSimilarity extends Similarity { |
| + // nocommit: extend the Base Similarity here? |
| + static final class JustCompileSimilarity extends TFIDFSimilarity { |
| |
| @Override |
| public float idf(int docFreq, int numDocs) { |
| Index: lucene/src/test/org/apache/lucene/search/TestSimilarity.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/search/TestSimilarity.java (revision 1063004)
|
| +++ lucene/src/test/org/apache/lucene/search/TestSimilarity.java (working copy)
|
| @@ -18,8 +18,9 @@
|
| */ |
| |
| import org.apache.lucene.util.LuceneTestCase; |
| +import org.apache.lucene.util.PerReaderTermState; |
| + |
| import java.io.IOException; |
| -import java.util.Collection; |
| |
| import org.apache.lucene.index.FieldInvertState; |
| import org.apache.lucene.index.IndexReader; |
| @@ -39,12 +40,12 @@
|
| */ |
| public class TestSimilarity extends LuceneTestCase { |
| |
| - public static class SimpleSimilarity extends Similarity implements SimilarityProvider { |
| + public static class SimpleSimilarity extends TFIDFSimilarity implements SimilarityProvider { |
| @Override public float computeNorm(String field, FieldInvertState state) { return state.getBoost(); } |
| @Override public float tf(float freq) { return freq; } |
| @Override public float sloppyFreq(int distance) { return 2.0f; } |
| @Override public float idf(int docFreq, int numDocs) { return 1.0f; } |
| - @Override public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException { |
| + @Override public IDFExplanation idfExplain(PerReaderTermState[] stats, IndexSearcher searcher) throws IOException { |
| return new IDFExplanation() { |
| @Override |
| public float getIdf() { |
| Index: lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java (revision 1063004)
|
| +++ lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java (working copy)
|
| @@ -105,7 +105,7 @@
|
| } |
| } |
| |
| - private class Sim1 extends Similarity { |
| + private class Sim1 extends TFIDFSimilarity { |
| @Override |
| public float computeNorm(String field, FieldInvertState state) { |
| return 1f; |
| @@ -127,7 +127,7 @@
|
| } |
| } |
| |
| - private class Sim2 extends Similarity { |
| + private class Sim2 extends TFIDFSimilarity { |
| @Override |
| public float computeNorm(String field, FieldInvertState state) { |
| return 10f; |
| Index: lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (revision 1063004)
|
| +++ lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (working copy)
|
| @@ -25,6 +25,7 @@
|
| import org.apache.lucene.search.Explanation.IDFExplanation; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.BytesRef; |
| +import org.apache.lucene.util.PerReaderTermState; |
| import org.apache.lucene.document.Document; |
| import org.apache.lucene.document.Field; |
| |
| @@ -298,7 +299,7 @@
|
| searcher.setSimilarityProvider(new DefaultSimilarity() { |
| |
| @Override |
| - public IDFExplanation idfExplain(Collection<Term> terms, |
| + public IDFExplanation idfExplain(PerReaderTermState stats[], |
| IndexSearcher searcher) throws IOException { |
| return new IDFExplanation() { |
| |
| Index: lucene/src/test/org/apache/lucene/index/TestOmitTf.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestOmitTf.java (revision 1063004)
|
| +++ lucene/src/test/org/apache/lucene/index/TestOmitTf.java (working copy)
|
| @@ -18,9 +18,9 @@
|
| */ |
| |
| import java.io.IOException; |
| -import java.util.Collection; |
| |
| import org.apache.lucene.util.LuceneTestCase; |
| +import org.apache.lucene.util.PerReaderTermState; |
| import org.apache.lucene.util._TestUtil; |
| import org.apache.lucene.analysis.Analyzer; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| @@ -35,12 +35,12 @@
|
| |
| public class TestOmitTf extends LuceneTestCase { |
| |
| - public static class SimpleSimilarity extends Similarity implements SimilarityProvider { |
| + public static class SimpleSimilarity extends TFIDFSimilarity implements SimilarityProvider { |
| @Override public float computeNorm(String field, FieldInvertState state) { return state.getBoost(); } |
| @Override public float tf(float freq) { return freq; } |
| @Override public float sloppyFreq(int distance) { return 2.0f; } |
| @Override public float idf(int docFreq, int numDocs) { return 1.0f; } |
| - @Override public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException { |
| + @Override public IDFExplanation idfExplain(PerReaderTermState[] terms, IndexSearcher searcher) throws IOException { |
| return new IDFExplanation() { |
| @Override |
| public float getIdf() { |
| Index: lucene/src/java/org/apache/lucene/search/Similarity.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/Similarity.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/Similarity.java (working copy)
|
| @@ -20,11 +20,11 @@
|
| |
| import java.io.IOException; |
| import java.io.Serializable; |
| -import java.util.Collection; |
| |
| import org.apache.lucene.index.FieldInvertState; |
| -import org.apache.lucene.index.Term; |
| +import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| import org.apache.lucene.search.Explanation.IDFExplanation; |
| +import org.apache.lucene.util.PerReaderTermState; |
| import org.apache.lucene.util.SmallFloat; |
| |
| |
| @@ -35,493 +35,6 @@
|
| * Overriding computation of these components is a convenient |
| * way to alter Lucene scoring. |
| * |
| - * <p>Suggested reading: |
| - * <a href="http://nlp.stanford.edu/IR-book/html/htmledition/queries-as-vectors-1.html"> |
| - * Introduction To Information Retrieval, Chapter 6</a>. |
| - * |
| - * <p>The following describes how Lucene scoring evolves from |
| - * underlying information retrieval models to (efficient) implementation. |
| - * We first brief on <i>VSM Score</i>, |
| - * then derive from it <i>Lucene's Conceptual Scoring Formula</i>, |
| - * from which, finally, evolves <i>Lucene's Practical Scoring Function</i> |
| - * (the latter is connected directly with Lucene classes and methods). |
| - * |
| - * <p>Lucene combines |
| - * <a href="http://en.wikipedia.org/wiki/Standard_Boolean_model"> |
| - * Boolean model (BM) of Information Retrieval</a> |
| - * with |
| - * <a href="http://en.wikipedia.org/wiki/Vector_Space_Model"> |
| - * Vector Space Model (VSM) of Information Retrieval</a> - |
| - * documents "approved" by BM are scored by VSM. |
| - * |
| - * <p>In VSM, documents and queries are represented as |
| - * weighted vectors in a multi-dimensional space, |
| - * where each distinct index term is a dimension, |
| - * and weights are |
| - * <a href="http://en.wikipedia.org/wiki/Tfidf">Tf-idf</a> values. |
| - * |
| - * <p>VSM does not require weights to be <i>Tf-idf</i> values, |
| - * but <i>Tf-idf</i> values are believed to produce search results of high quality, |
| - * and so Lucene is using <i>Tf-idf</i>. |
| - * <i>Tf</i> and <i>Idf</i> are described in more detail below, |
| - * but for now, for completion, let's just say that |
| - * for given term <i>t</i> and document (or query) <i>x</i>, |
| - * <i>Tf(t,x)</i> varies with the number of occurrences of term <i>t</i> in <i>x</i> |
| - * (when one increases so does the other) and |
| - * <i>idf(t)</i> similarly varies with the inverse of the |
| - * number of index documents containing term <i>t</i>. |
| - * |
| - * <p><i>VSM score</i> of document <i>d</i> for query <i>q</i> is the |
| - * <a href="http://en.wikipedia.org/wiki/Cosine_similarity"> |
| - * Cosine Similarity</a> |
| - * of the weighted query vectors <i>V(q)</i> and <i>V(d)</i>: |
| - * |
| - * <br> <br> |
| - * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| - * <tr><td> |
| - * <table cellpadding="1" cellspacing="0" border="1" align="center"> |
| - * <tr><td> |
| - * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| - * <tr> |
| - * <td valign="middle" align="right" rowspan="1"> |
| - * cosine-similarity(q,d) = |
| - * </td> |
| - * <td valign="middle" align="center"> |
| - * <table> |
| - * <tr><td align="center"><small>V(q) · V(d)</small></td></tr> |
| - * <tr><td align="center">–––––––––</td></tr> |
| - * <tr><td align="center"><small>|V(q)| |V(d)|</small></td></tr> |
| - * </table> |
| - * </td> |
| - * </tr> |
| - * </table> |
| - * </td></tr> |
| - * </table> |
| - * </td></tr> |
| - * <tr><td> |
| - * <center><font=-1><u>VSM Score</u></font></center> |
| - * </td></tr> |
| - * </table> |
| - * <br> <br> |
| - * |
| - * |
| - * Where <i>V(q)</i> · <i>V(d)</i> is the |
| - * <a href="http://en.wikipedia.org/wiki/Dot_product">dot product</a> |
| - * of the weighted vectors, |
| - * and <i>|V(q)|</i> and <i>|V(d)|</i> are their |
| - * <a href="http://en.wikipedia.org/wiki/Euclidean_norm#Euclidean_norm">Euclidean norms</a>. |
| - * |
| - * <p>Note: the above equation can be viewed as the dot product of |
| - * the normalized weighted vectors, in the sense that dividing |
| - * <i>V(q)</i> by its euclidean norm is normalizing it to a unit vector. |
| - * |
| - * <p>Lucene refines <i>VSM score</i> for both search quality and usability: |
| - * <ul> |
| - * <li>Normalizing <i>V(d)</i> to the unit vector is known to be problematic in that |
| - * it removes all document length information. |
| - * For some documents removing this info is probably ok, |
| - * e.g. a document made by duplicating a certain paragraph <i>10</i> times, |
| - * especially if that paragraph is made of distinct terms. |
| - * But for a document which contains no duplicated paragraphs, |
| - * this might be wrong. |
| - * To avoid this problem, a different document length normalization |
| - * factor is used, which normalizes to a vector equal to or larger |
| - * than the unit vector: <i>doc-len-norm(d)</i>. |
| - * </li> |
| - * |
| - * <li>At indexing, users can specify that certain documents are more |
| - * important than others, by assigning a document boost. |
| - * For this, the score of each document is also multiplied by its boost value |
| - * <i>doc-boost(d)</i>. |
| - * </li> |
| - * |
| - * <li>Lucene is field based, hence each query term applies to a single |
| - * field, document length normalization is by the length of the certain field, |
| - * and in addition to document boost there are also document fields boosts. |
| - * </li> |
| - * |
| - * <li>The same field can be added to a document during indexing several times, |
| - * and so the boost of that field is the multiplication of the boosts of |
| - * the separate additions (or parts) of that field within the document. |
| - * </li> |
| - * |
| - * <li>At search time users can specify boosts to each query, sub-query, and |
| - * each query term, hence the contribution of a query term to the score of |
| - * a document is multiplied by the boost of that query term <i>query-boost(q)</i>. |
| - * </li> |
| - * |
| - * <li>A document may match a multi term query without containing all |
| - * the terms of that query (this is correct for some of the queries), |
| - * and users can further reward documents matching more query terms |
| - * through a coordination factor, which is usually larger when |
| - * more terms are matched: <i>coord-factor(q,d)</i>. |
| - * </li> |
| - * </ul> |
| - * |
| - * <p>Under the simplifying assumption of a single field in the index, |
| - * we get <i>Lucene's Conceptual scoring formula</i>: |
| - * |
| - * <br> <br> |
| - * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| - * <tr><td> |
| - * <table cellpadding="1" cellspacing="0" border="1" align="center"> |
| - * <tr><td> |
| - * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| - * <tr> |
| - * <td valign="middle" align="right" rowspan="1"> |
| - * score(q,d) = |
| - * <font color="#FF9933">coord-factor(q,d)</font> · |
| - * <font color="#CCCC00">query-boost(q)</font> · |
| - * </td> |
| - * <td valign="middle" align="center"> |
| - * <table> |
| - * <tr><td align="center"><small><font color="#993399">V(q) · V(d)</font></small></td></tr> |
| - * <tr><td align="center">–––––––––</td></tr> |
| - * <tr><td align="center"><small><font color="#FF33CC">|V(q)|</font></small></td></tr> |
| - * </table> |
| - * </td> |
| - * <td valign="middle" align="right" rowspan="1"> |
| - * · <font color="#3399FF">doc-len-norm(d)</font> |
| - * · <font color="#3399FF">doc-boost(d)</font> |
| - * </td> |
| - * </tr> |
| - * </table> |
| - * </td></tr> |
| - * </table> |
| - * </td></tr> |
| - * <tr><td> |
| - * <center><font=-1><u>Lucene Conceptual Scoring Formula</u></font></center> |
| - * </td></tr> |
| - * </table> |
| - * <br> <br> |
| - * |
| - * <p>The conceptual formula is a simplification in the sense that (1) terms and documents |
| - * are fielded and (2) boosts are usually per query term rather than per query. |
| - * |
| - * <p>We now describe how Lucene implements this conceptual scoring formula, and |
| - * derive from it <i>Lucene's Practical Scoring Function</i>. |
| - * |
| - * <p>For efficient score computation some scoring components |
| - * are computed and aggregated in advance: |
| - * |
| - * <ul> |
| - * <li><i>Query-boost</i> for the query (actually for each query term) |
| - * is known when search starts. |
| - * </li> |
| - * |
| - * <li>Query Euclidean norm <i>|V(q)|</i> can be computed when search starts, |
| - * as it is independent of the document being scored. |
| - * From search optimization perspective, it is a valid question |
| - * why bother to normalize the query at all, because all |
| - * scored documents will be multiplied by the same <i>|V(q)|</i>, |
| - * and hence documents ranks (their order by score) will not |
| - * be affected by this normalization. |
| - * There are two good reasons to keep this normalization: |
| - * <ul> |
| - * <li>Recall that |
| - * <a href="http://en.wikipedia.org/wiki/Cosine_similarity"> |
| - * Cosine Similarity</a> can be used find how similar |
| - * two documents are. One can use Lucene for e.g. |
| - * clustering, and use a document as a query to compute |
| - * its similarity to other documents. |
| - * In this use case it is important that the score of document <i>d3</i> |
| - * for query <i>d1</i> is comparable to the score of document <i>d3</i> |
| - * for query <i>d2</i>. In other words, scores of a document for two |
| - * distinct queries should be comparable. |
| - * There are other applications that may require this. |
| - * And this is exactly what normalizing the query vector <i>V(q)</i> |
| - * provides: comparability (to a certain extent) of two or more queries. |
| - * </li> |
| - * |
| - * <li>Applying query normalization on the scores helps to keep the |
| - * scores around the unit vector, hence preventing loss of score data |
| - * because of floating point precision limitations. |
| - * </li> |
| - * </ul> |
| - * </li> |
| - * |
| - * <li>Document length norm <i>doc-len-norm(d)</i> and document |
| - * boost <i>doc-boost(d)</i> are known at indexing time. |
| - * They are computed in advance and their multiplication |
| - * is saved as a single value in the index: <i>norm(d)</i>. |
| - * (In the equations below, <i>norm(t in d)</i> means <i>norm(field(t) in doc d)</i> |
| - * where <i>field(t)</i> is the field associated with term <i>t</i>.) |
| - * </li> |
| - * </ul> |
| - * |
| - * <p><i>Lucene's Practical Scoring Function</i> is derived from the above. |
| - * The color codes demonstrate how it relates |
| - * to those of the <i>conceptual</i> formula: |
| - * |
| - * <P> |
| - * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| - * <tr><td> |
| - * <table cellpadding="" cellspacing="2" border="2" align="center"> |
| - * <tr><td> |
| - * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| - * <tr> |
| - * <td valign="middle" align="right" rowspan="1"> |
| - * score(q,d) = |
| - * <A HREF="#formula_coord"><font color="#FF9933">coord(q,d)</font></A> · |
| - * <A HREF="#formula_queryNorm"><font color="#FF33CC">queryNorm(q)</font></A> · |
| - * </td> |
| - * <td valign="bottom" align="center" rowspan="1"> |
| - * <big><big><big>∑</big></big></big> |
| - * </td> |
| - * <td valign="middle" align="right" rowspan="1"> |
| - * <big><big>(</big></big> |
| - * <A HREF="#formula_tf"><font color="#993399">tf(t in d)</font></A> · |
| - * <A HREF="#formula_idf"><font color="#993399">idf(t)</font></A><sup>2</sup> · |
| - * <A HREF="#formula_termBoost"><font color="#CCCC00">t.getBoost()</font></A> · |
| - * <A HREF="#formula_norm"><font color="#3399FF">norm(t,d)</font></A> |
| - * <big><big>)</big></big> |
| - * </td> |
| - * </tr> |
| - * <tr valigh="top"> |
| - * <td></td> |
| - * <td align="center"><small>t in q</small></td> |
| - * <td></td> |
| - * </tr> |
| - * </table> |
| - * </td></tr> |
| - * </table> |
| - * </td></tr> |
| - * <tr><td> |
| - * <center><font=-1><u>Lucene Practical Scoring Function</u></font></center> |
| - * </td></tr> |
| - * </table> |
| - * |
| - * <p> where |
| - * <ol> |
| - * <li> |
| - * <A NAME="formula_tf"></A> |
| - * <b><i>tf(t in d)</i></b> |
| - * correlates to the term's <i>frequency</i>, |
| - * defined as the number of times term <i>t</i> appears in the currently scored document <i>d</i>. |
| - * Documents that have more occurrences of a given term receive a higher score. |
| - * Note that <i>tf(t in q)</i> is assumed to be <i>1</i> and therefore it does not appear in this equation, |
| - * However if a query contains twice the same term, there will be |
| - * two term-queries with that same term and hence the computation would still be correct (although |
| - * not very efficient). |
| - * The default computation for <i>tf(t in d)</i> in |
| - * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) DefaultSimilarity} is: |
| - * |
| - * <br> <br> |
| - * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| - * <tr> |
| - * <td valign="middle" align="right" rowspan="1"> |
| - * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) tf(t in d)} = |
| - * </td> |
| - * <td valign="top" align="center" rowspan="1"> |
| - * frequency<sup><big>½</big></sup> |
| - * </td> |
| - * </tr> |
| - * </table> |
| - * <br> <br> |
| - * </li> |
| - * |
| - * <li> |
| - * <A NAME="formula_idf"></A> |
| - * <b><i>idf(t)</i></b> stands for Inverse Document Frequency. This value |
| - * correlates to the inverse of <i>docFreq</i> |
| - * (the number of documents in which the term <i>t</i> appears). |
| - * This means rarer terms give higher contribution to the total score. |
| - * <i>idf(t)</i> appears for <i>t</i> in both the query and the document, |
| - * hence it is squared in the equation. |
| - * The default computation for <i>idf(t)</i> in |
| - * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) DefaultSimilarity} is: |
| - * |
| - * <br> <br> |
| - * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| - * <tr> |
| - * <td valign="middle" align="right"> |
| - * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) idf(t)} = |
| - * </td> |
| - * <td valign="middle" align="center"> |
| - * 1 + log <big>(</big> |
| - * </td> |
| - * <td valign="middle" align="center"> |
| - * <table> |
| - * <tr><td align="center"><small>numDocs</small></td></tr> |
| - * <tr><td align="center">–––––––––</td></tr> |
| - * <tr><td align="center"><small>docFreq+1</small></td></tr> |
| - * </table> |
| - * </td> |
| - * <td valign="middle" align="center"> |
| - * <big>)</big> |
| - * </td> |
| - * </tr> |
| - * </table> |
| - * <br> <br> |
| - * </li> |
| - * |
| - * <li> |
| - * <A NAME="formula_coord"></A> |
| - * <b><i>coord(q,d)</i></b> |
| - * is a score factor based on how many of the query terms are found in the specified document. |
| - * Typically, a document that contains more of the query's terms will receive a higher score |
| - * than another document with fewer query terms. |
| - * This is a search time factor computed in |
| - * {@link SimilarityProvider#coord(int, int) coord(q,d)} |
| - * by the Similarity in effect at search time. |
| - * <br> <br> |
| - * </li> |
| - * |
| - * <li><b> |
| - * <A NAME="formula_queryNorm"></A> |
| - * <i>queryNorm(q)</i> |
| - * </b> |
| - * is a normalizing factor used to make scores between queries comparable. |
| - * This factor does not affect document ranking (since all ranked documents are multiplied by the same factor), |
| - * but rather just attempts to make scores from different queries (or even different indexes) comparable. |
| - * This is a search time factor computed by the Similarity in effect at search time. |
| - * |
| - * The default computation in |
| - * {@link org.apache.lucene.search.DefaultSimilarity#queryNorm(float) DefaultSimilarity} |
| - * produces a <a href="http://en.wikipedia.org/wiki/Euclidean_norm#Euclidean_norm">Euclidean norm</a>: |
| - * <br> <br> |
| - * <table cellpadding="1" cellspacing="0" border="0" align="center"> |
| - * <tr> |
| - * <td valign="middle" align="right" rowspan="1"> |
| - * queryNorm(q) = |
| - * {@link org.apache.lucene.search.DefaultSimilarity#queryNorm(float) queryNorm(sumOfSquaredWeights)} |
| - * = |
| - * </td> |
| - * <td valign="middle" align="center" rowspan="1"> |
| - * <table> |
| - * <tr><td align="center"><big>1</big></td></tr> |
| - * <tr><td align="center"><big> |
| - * –––––––––––––– |
| - * </big></td></tr> |
| - * <tr><td align="center">sumOfSquaredWeights<sup><big>½</big></sup></td></tr> |
| - * </table> |
| - * </td> |
| - * </tr> |
| - * </table> |
| - * <br> <br> |
| - * |
| - * The sum of squared weights (of the query terms) is |
| - * computed by the query {@link org.apache.lucene.search.Weight} object. |
| - * For example, a {@link org.apache.lucene.search.BooleanQuery} |
| - * computes this value as: |
| - * |
| - * <br> <br> |
| - * <table cellpadding="1" cellspacing="0" border="0"n align="center"> |
| - * <tr> |
| - * <td valign="middle" align="right" rowspan="1"> |
| - * {@link org.apache.lucene.search.Weight#sumOfSquaredWeights() sumOfSquaredWeights} = |
| - * {@link org.apache.lucene.search.Query#getBoost() q.getBoost()} <sup><big>2</big></sup> |
| - * · |
| - * </td> |
| - * <td valign="bottom" align="center" rowspan="1"> |
| - * <big><big><big>∑</big></big></big> |
| - * </td> |
| - * <td valign="middle" align="right" rowspan="1"> |
| - * <big><big>(</big></big> |
| - * <A HREF="#formula_idf">idf(t)</A> · |
| - * <A HREF="#formula_termBoost">t.getBoost()</A> |
| - * <big><big>) <sup>2</sup> </big></big> |
| - * </td> |
| - * </tr> |
| - * <tr valigh="top"> |
| - * <td></td> |
| - * <td align="center"><small>t in q</small></td> |
| - * <td></td> |
| - * </tr> |
| - * </table> |
| - * <br> <br> |
| - * |
| - * </li> |
| - * |
| - * <li> |
| - * <A NAME="formula_termBoost"></A> |
| - * <b><i>t.getBoost()</i></b> |
| - * is a search time boost of term <i>t</i> in the query <i>q</i> as |
| - * specified in the query text |
| - * (see <A HREF="../../../../../../queryparsersyntax.html#Boosting a Term">query syntax</A>), |
| - * or as set by application calls to |
| - * {@link org.apache.lucene.search.Query#setBoost(float) setBoost()}. |
| - * Notice that there is really no direct API for accessing a boost of one term in a multi term query, |
| - * but rather multi terms are represented in a query as multi |
| - * {@link org.apache.lucene.search.TermQuery TermQuery} objects, |
| - * and so the boost of a term in the query is accessible by calling the sub-query |
| - * {@link org.apache.lucene.search.Query#getBoost() getBoost()}. |
| - * <br> <br> |
| - * </li> |
| - * |
| - * <li> |
| - * <A NAME="formula_norm"></A> |
| - * <b><i>norm(t,d)</i></b> encapsulates a few (indexing time) boost and length factors: |
| - * |
| - * <ul> |
| - * <li><b>Document boost</b> - set by calling |
| - * {@link org.apache.lucene.document.Document#setBoost(float) doc.setBoost()} |
| - * before adding the document to the index. |
| - * </li> |
| - * <li><b>Field boost</b> - set by calling |
| - * {@link org.apache.lucene.document.Fieldable#setBoost(float) field.setBoost()} |
| - * before adding the field to a document. |
| - * </li> |
| - * <li><b>lengthNorm</b> - computed |
| - * when the document is added to the index in accordance with the number of tokens |
| - * of this field in the document, so that shorter fields contribute more to the score. |
| - * LengthNorm is computed by the Similarity class in effect at indexing. |
| - * </li> |
| - * </ul> |
| - * The {@link #computeNorm} method is responsible for |
| - * combining all of these factors into a single float. |
| - * |
| - * <p> |
| - * When a document is added to the index, all the above factors are multiplied. |
| - * If the document has multiple fields with the same name, all their boosts are multiplied together: |
| - * |
| - * <br> <br> |
| - * <table cellpadding="1" cellspacing="0" border="0"n align="center"> |
| - * <tr> |
| - * <td valign="middle" align="right" rowspan="1"> |
| - * norm(t,d) = |
| - * {@link org.apache.lucene.document.Document#getBoost() doc.getBoost()} |
| - * · |
| - * lengthNorm |
| - * · |
| - * </td> |
| - * <td valign="bottom" align="center" rowspan="1"> |
| - * <big><big><big>∏</big></big></big> |
| - * </td> |
| - * <td valign="middle" align="right" rowspan="1"> |
| - * {@link org.apache.lucene.document.Fieldable#getBoost() f.getBoost}() |
| - * </td> |
| - * </tr> |
| - * <tr valigh="top"> |
| - * <td></td> |
| - * <td align="center"><small>field <i><b>f</b></i> in <i>d</i> named as <i><b>t</b></i></small></td> |
| - * <td></td> |
| - * </tr> |
| - * </table> |
| - * <br> <br> |
| - * However the resulted <i>norm</i> value is {@link #encodeNormValue(float) encoded} as a single byte |
| - * before being stored. |
| - * At search time, the norm byte value is read from the index |
| - * {@link org.apache.lucene.store.Directory directory} and |
| - * {@link #decodeNormValue(byte) decoded} back to a float <i>norm</i> value. |
| - * This encoding/decoding, while reducing index size, comes with the price of |
| - * precision loss - it is not guaranteed that <i>decode(encode(x)) = x</i>. |
| - * For instance, <i>decode(encode(0.89)) = 0.75</i>. |
| - * <br> <br> |
| - * Compression of norm values to a single byte saves memory at search time, |
| - * because once a field is referenced at search time, its norms - for |
| - * all documents - are maintained in memory. |
| - * <br> <br> |
| - * The rationale supporting such lossy compression of norm values is that |
| - * given the difficulty (and inaccuracy) of users to express their true information |
| - * need by a query, only big differences matter. |
| - * <br> <br> |
| - * Last, note that search time is too late to modify this <i>norm</i> part of scoring, e.g. by |
| - * using a different {@link Similarity} for search. |
| - * <br> <br> |
| - * </li> |
| - * </ol> |
| - * |
| * @see org.apache.lucene.index.IndexWriterConfig#setSimilarityProvider(SimilarityProvider) |
| * @see IndexSearcher#setSimilarityProvider(SimilarityProvider) |
| */ |
| @@ -575,36 +88,6 @@
|
| */ |
| public abstract float computeNorm(String field, FieldInvertState state); |
| |
| - /** Computes the normalization value for a field given the total number of |
| - * terms contained in a field. These values, together with field boosts, are |
| - * stored in an index and multipled into scores for hits on each field by the |
| - * search code. |
| - * |
| - * <p>Matches in longer fields are less precise, so implementations of this |
| - * method usually return smaller values when <code>numTokens</code> is large, |
| - * and larger values when <code>numTokens</code> is small. |
| - * |
| - * <p>Note that the return values are computed under |
| - * {@link org.apache.lucene.index.IndexWriter#addDocument(org.apache.lucene.document.Document)} |
| - * and then stored using |
| - * {@link #encodeNormValue(float)}. |
| - * Thus they have limited precision, and documents |
| - * must be re-indexed if this method is altered. |
| - * |
| - * @param fieldName the name of the field |
| - * @param numTokens the total number of tokens contained in fields named |
| - * <i>fieldName</i> of <i>doc</i>. |
| - * @return a normalization factor for hits on this field of this document |
| - * |
| - * @see org.apache.lucene.document.Field#setBoost(float) |
| - * |
| - * @deprecated Please override computeNorm instead |
| - */ |
| - @Deprecated |
| - public final float lengthNorm(String fieldName, int numTokens) { |
| - throw new UnsupportedOperationException("please use computeNorm instead"); |
| - } |
| - |
| /** Encodes a normalization factor for storage in an index. |
| * |
| * <p>The encoding uses a three-bit mantissa, a five-bit exponent, and |
| @@ -621,25 +104,6 @@
|
| public byte encodeNormValue(float f) { |
| return SmallFloat.floatToByte315(f); |
| } |
| - |
| - /** Computes a score factor based on a term or phrase's frequency in a |
| - * document. This value is multiplied by the {@link #idf(int, int)} |
| - * factor for each term in the query and these products are then summed to |
| - * form the initial score for a document. |
| - * |
| - * <p>Terms and phrases repeated in a document indicate the topic of the |
| - * document, so implementations of this method usually return larger values |
| - * when <code>freq</code> is large, and smaller values when <code>freq</code> |
| - * is small. |
| - * |
| - * <p>The default implementation calls {@link #tf(float)}. |
| - * |
| - * @param freq the frequency of a term within a document |
| - * @return a score factor based on a term's within-document frequency |
| - */ |
| - public float tf(int freq) { |
| - return tf((float)freq); |
| - } |
| |
| /** Computes the amount of a sloppy phrase match, based on an edit distance. |
| * This value is summed for each sloppy phrase match in a document to form |
| @@ -656,125 +120,7 @@
|
| */ |
| public abstract float sloppyFreq(int distance); |
| |
| - /** Computes a score factor based on a term or phrase's frequency in a |
| - * document. This value is multiplied by the {@link #idf(int, int)} |
| - * factor for each term in the query and these products are then summed to |
| - * form the initial score for a document. |
| - * |
| - * <p>Terms and phrases repeated in a document indicate the topic of the |
| - * document, so implementations of this method usually return larger values |
| - * when <code>freq</code> is large, and smaller values when <code>freq</code> |
| - * is small. |
| - * |
| - * @param freq the frequency of a term within a document |
| - * @return a score factor based on a term's within-document frequency |
| - */ |
| - public abstract float tf(float freq); |
| - |
| /** |
| - * Computes a score factor for a simple term and returns an explanation |
| - * for that score factor. |
| - * |
| - * <p> |
| - * The default implementation uses: |
| - * |
| - * <pre> |
| - * idf(docFreq, searcher.maxDoc()); |
| - * </pre> |
| - * |
| - * Note that {@link IndexSearcher#maxDoc()} is used instead of |
| - * {@link org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also |
| - * {@link IndexSearcher#docFreq(Term)} is used, and when the latter |
| - * is inaccurate, so is {@link IndexSearcher#maxDoc()}, and in the same direction. |
| - * In addition, {@link IndexSearcher#maxDoc()} is more efficient to compute |
| - * |
| - * @param term the term in question |
| - * @param searcher the document collection being searched |
| - * @param docFreq externally computed docFreq for this term |
| - * @return an IDFExplain object that includes both an idf score factor |
| - and an explanation for the term. |
| - * @throws IOException |
| - */ |
| - public IDFExplanation idfExplain(final Term term, final IndexSearcher searcher, int docFreq) throws IOException { |
| - final int df = docFreq; |
| - final int max = searcher.maxDoc(); |
| - final float idf = idf(df, max); |
| - return new IDFExplanation() { |
| - @Override |
| - public String explain() { |
| - return "idf(docFreq=" + df + |
| - ", maxDocs=" + max + ")"; |
| - } |
| - @Override |
| - public float getIdf() { |
| - return idf; |
| - }}; |
| - } |
| - |
| - /** |
| - * This method forwards to {@link |
| - * #idfExplain(Term,IndexSearcher,int)} by passing |
| - * <code>searcher.docFreq(term)</code> as the docFreq. |
| - */ |
| - public IDFExplanation idfExplain(final Term term, final IndexSearcher searcher) throws IOException { |
| - return idfExplain(term, searcher, searcher.docFreq(term)); |
| - } |
| - |
| - /** |
| - * Computes a score factor for a phrase. |
| - * |
| - * <p> |
| - * The default implementation sums the idf factor for |
| - * each term in the phrase. |
| - * |
| - * @param terms the terms in the phrase |
| - * @param searcher the document collection being searched |
| - * @return an IDFExplain object that includes both an idf |
| - * score factor for the phrase and an explanation |
| - * for each term. |
| - * @throws IOException |
| - */ |
| - public IDFExplanation idfExplain(Collection<Term> terms, IndexSearcher searcher) throws IOException { |
| - final int max = searcher.maxDoc(); |
| - float idf = 0.0f; |
| - final StringBuilder exp = new StringBuilder(); |
| - for (final Term term : terms ) { |
| - final int df = searcher.docFreq(term); |
| - idf += idf(df, max); |
| - exp.append(" "); |
| - exp.append(term.text()); |
| - exp.append("="); |
| - exp.append(df); |
| - } |
| - final float fIdf = idf; |
| - return new IDFExplanation() { |
| - @Override |
| - public float getIdf() { |
| - return fIdf; |
| - } |
| - @Override |
| - public String explain() { |
| - return exp.toString(); |
| - } |
| - }; |
| - } |
| - |
| - /** Computes a score factor based on a term's document frequency (the number |
| - * of documents which contain the term). This value is multiplied by the |
| - * {@link #tf(int)} factor for each term in the query and these products are |
| - * then summed to form the initial score for a document. |
| - * |
| - * <p>Terms that occur in fewer documents are better indicators of topic, so |
| - * implementations of this method usually return larger values for rare terms, |
| - * and smaller values for common terms. |
| - * |
| - * @param docFreq the number of documents which contain the term |
| - * @param numDocs the total number of documents in the collection |
| - * @return a score factor based on the term's document frequency |
| - */ |
| - public abstract float idf(int docFreq, int numDocs); |
| - |
| - /** |
| * Calculate a scoring factor based on the data in the payload. Overriding implementations |
| * are responsible for interpreting what is in the payload. Lucene makes no assumptions about |
| * what is in the byte array. |
| @@ -796,5 +142,17 @@
|
| { |
| return 1; |
| } |
| - |
| + |
| + public abstract IDFExplanation computeWeight(IndexSearcher searcher, String fieldName, PerReaderTermState... termStats) throws IOException; |
| + |
| + public abstract ExactDocScorer exactDocScorer(Weight weight, String fieldName, AtomicReaderContext context) throws IOException; |
| + public abstract SloppyDocScorer sloppyDocScorer(Weight weight, String fieldName, AtomicReaderContext context) throws IOException; |
| + |
| + public abstract class ExactDocScorer { |
| + public abstract float score(int doc, int freq); |
| + } |
| + |
| + public abstract class SloppyDocScorer { |
| + public abstract float score(int doc, float freq); |
| + } |
| } |
| Index: lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java (working copy)
|
| @@ -145,7 +145,7 @@
|
| @Override |
| public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException { |
| return new PayloadNearSpanScorer(query.getSpans(context), this, |
| - similarity, context.reader.norms(query.getField())); |
| + similarity, query.getField(), context); |
| } |
| } |
| |
| @@ -155,8 +155,8 @@
|
| private int payloadsSeen; |
| |
| protected PayloadNearSpanScorer(Spans spans, Weight weight, |
| - Similarity similarity, byte[] norms) throws IOException { |
| - super(spans, weight, similarity, norms); |
| + Similarity similarity, String field, AtomicReaderContext context) throws IOException { |
| + super(spans, weight, similarity, field, context); |
| this.spans = spans; |
| } |
| |
| Index: lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java (working copy)
|
| @@ -76,7 +76,7 @@
|
| @Override |
| public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException { |
| return new PayloadTermSpanScorer((TermSpans) query.getSpans(context), |
| - this, similarity, context.reader.norms(query.getField())); |
| + this, similarity, query.getField(), context); |
| } |
| |
| protected class PayloadTermSpanScorer extends SpanScorer { |
| @@ -86,8 +86,8 @@
|
| private final TermSpans termSpans; |
| |
| public PayloadTermSpanScorer(TermSpans spans, Weight weight, |
| - Similarity similarity, byte[] norms) throws IOException { |
| - super(spans, weight, similarity, norms); |
| + Similarity similarity, String field, AtomicReaderContext context) throws IOException { |
| + super(spans, weight, similarity, field, context); |
| termSpans = spans; |
| } |
| |
| Index: lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java (working copy)
|
| @@ -18,13 +18,15 @@
|
| */ |
| |
| import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| +import org.apache.lucene.index.IndexReader.ReaderContext; |
| import org.apache.lucene.index.Term; |
| import org.apache.lucene.search.*; |
| import org.apache.lucene.search.Explanation.IDFExplanation; |
| +import org.apache.lucene.util.PerReaderTermState; |
| |
| import java.io.IOException; |
| -import java.util.HashSet; |
| import java.util.Set; |
| +import java.util.TreeSet; |
| |
| /** |
| * Expert-only. Public for use by other weight implementations |
| @@ -45,10 +47,14 @@
|
| this.similarity = searcher.getSimilarityProvider().get(query.getField()); |
| this.query = query; |
| |
| - terms=new HashSet<Term>(); |
| + terms=new TreeSet<Term>(); |
| query.extractTerms(terms); |
| - |
| - idfExp = similarity.idfExplain(terms, searcher); |
| + final ReaderContext context = searcher.getTopReaderContext(); |
| + final PerReaderTermState states[] = new PerReaderTermState[terms.size()]; |
| + int i = 0; |
| + for (Term term : terms) |
| + states[i++] = PerReaderTermState.build(context, term, true); |
| + idfExp = similarity.computeWeight(searcher, query.getField(), states); |
| idf = idfExp.getIdf(); |
| } |
| |
| @@ -73,8 +79,7 @@
|
| |
| @Override |
| public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException { |
| - return new SpanScorer(query.getSpans(context), this, similarity, context.reader |
| - .norms(query.getField())); |
| + return new SpanScorer(query.getSpans(context), this, similarity, query.getField(), context); |
| } |
| |
| @Override |
| Index: lucene/src/java/org/apache/lucene/search/spans/SpanScorer.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/spans/SpanScorer.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/spans/SpanScorer.java (working copy)
|
| @@ -19,7 +19,9 @@
|
| |
| import java.io.IOException; |
| |
| +import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| import org.apache.lucene.search.Explanation; |
| +import org.apache.lucene.search.TFIDFSimilarity; |
| import org.apache.lucene.search.Weight; |
| import org.apache.lucene.search.Scorer; |
| import org.apache.lucene.search.Similarity; |
| @@ -29,22 +31,21 @@
|
| */ |
| public class SpanScorer extends Scorer { |
| protected Spans spans; |
| - protected byte[] norms; |
| - protected float value; |
| |
| protected boolean more = true; |
| |
| protected int doc; |
| protected float freq; |
| protected final Similarity similarity; |
| + protected final Similarity.SloppyDocScorer docScorer; |
| |
| - protected SpanScorer(Spans spans, Weight weight, Similarity similarity, byte[] norms) |
| + protected SpanScorer(Spans spans, Weight weight, Similarity similarity, String field, AtomicReaderContext context) |
| throws IOException { |
| super(weight); |
| this.similarity = similarity; |
| + this.docScorer = similarity.sloppyDocScorer(weight, field, context); |
| this.spans = spans; |
| - this.norms = norms; |
| - this.value = weight.getValue(); |
| + |
| if (this.spans.next()) { |
| doc = -1; |
| } else { |
| @@ -94,8 +95,7 @@
|
| |
| @Override |
| public float score() throws IOException { |
| - float raw = similarity.tf(freq) * value; // raw score |
| - return norms == null? raw : raw * similarity.decodeNormValue(norms[doc]); // normalize |
| + return docScorer.score(doc, freq); |
| } |
| |
| @Override |
| @@ -105,15 +105,18 @@
|
| |
| /** This method is no longer an official member of {@link Scorer}, |
| * but it is needed by SpanWeight to build an explanation. */ |
| + // nocommit: die |
| protected Explanation explain(final int doc) throws IOException { |
| Explanation tfExplanation = new Explanation(); |
| |
| int expDoc = advance(doc); |
| |
| float phraseFreq = (expDoc == doc) ? freq : 0.0f; |
| - tfExplanation.setValue(similarity.tf(phraseFreq)); |
| - tfExplanation.setDescription("tf(phraseFreq=" + phraseFreq + ")"); |
| - |
| + if (similarity instanceof TFIDFSimilarity) { |
| + TFIDFSimilarity tfidf = (TFIDFSimilarity) similarity; |
| + tfExplanation.setValue(tfidf.tf(phraseFreq)); |
| + tfExplanation.setDescription("tf(phraseFreq=" + phraseFreq + ")"); |
| + } |
| return tfExplanation; |
| } |
| |
| Index: lucene/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/SloppyPhraseScorer.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/SloppyPhraseScorer.java (working copy)
|
| @@ -20,16 +20,20 @@
|
| import java.io.IOException; |
| import java.util.HashMap; |
| |
| +import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| + |
| final class SloppyPhraseScorer extends PhraseScorer { |
| private int slop; |
| private PhrasePositions repeats[]; |
| private PhrasePositions tmpPos[]; // for flipping repeating pps. |
| private boolean checkedRepeats; |
| - |
| + private final Similarity similarity; |
| + |
| SloppyPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings, Similarity similarity, |
| - int slop, byte[] norms) { |
| - super(weight, postings, similarity, norms); |
| + int slop, String field, AtomicReaderContext context) throws IOException { |
| + super(weight, postings, similarity, field, context); |
| this.slop = slop; |
| + this.similarity = similarity; |
| } |
| |
| /** |
| Index: lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java (working copy)
|
| @@ -22,12 +22,14 @@
|
| |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| +import org.apache.lucene.index.IndexReader.ReaderContext; |
| import org.apache.lucene.index.Term; |
| import org.apache.lucene.index.DocsEnum; |
| import org.apache.lucene.index.DocsAndPositionsEnum; |
| import org.apache.lucene.search.Explanation.IDFExplanation; |
| import org.apache.lucene.util.ArrayUtil; |
| import org.apache.lucene.util.BytesRef; |
| +import org.apache.lucene.util.PerReaderTermState; |
| import org.apache.lucene.util.ToStringUtils; |
| import org.apache.lucene.util.PriorityQueue; |
| import org.apache.lucene.util.Bits; |
| @@ -140,15 +142,16 @@
|
| public MultiPhraseWeight(IndexSearcher searcher) |
| throws IOException { |
| this.similarity = searcher.getSimilarityProvider().get(field); |
| - |
| + final ReaderContext context = searcher.getTopReaderContext(); |
| + |
| // compute idf |
| - ArrayList<Term> allTerms = new ArrayList<Term>(); |
| + ArrayList<PerReaderTermState> allTerms = new ArrayList<PerReaderTermState>(); |
| for(final Term[] terms: termArrays) { |
| for (Term term: terms) { |
| - allTerms.add(term); |
| + allTerms.add(PerReaderTermState.build(context, term, true)); |
| } |
| } |
| - idfExp = similarity.idfExplain(allTerms, searcher); |
| + idfExp = similarity.computeWeight(searcher, field, allTerms.toArray(new PerReaderTermState[allTerms.size()])); |
| idf = idfExp.getIdf(); |
| } |
| |
| @@ -223,8 +226,7 @@
|
| } |
| |
| if (slop == 0) { |
| - ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity, |
| - reader.norms(field)); |
| + ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity, field, context); |
| if (s.noDocs) { |
| return null; |
| } else { |
| @@ -232,13 +234,18 @@
|
| } |
| } else { |
| return new SloppyPhraseScorer(this, postingsFreqs, similarity, |
| - slop, reader.norms(field)); |
| + slop, field, context); |
| } |
| } |
| |
| @Override |
| public Explanation explain(AtomicReaderContext context, int doc) |
| throws IOException { |
| + //nocommit: fix explains |
| + if (!(similarity instanceof TFIDFSimilarity)) |
| + return new ComplexExplanation(); |
| + final TFIDFSimilarity similarity = (TFIDFSimilarity) this.similarity; |
| + |
| ComplexExplanation result = new ComplexExplanation(); |
| result.setDescription("weight("+getQuery()+" in "+doc+"), product of:"); |
| |
| Index: lucene/src/java/org/apache/lucene/search/PhraseScorer.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/PhraseScorer.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/PhraseScorer.java (working copy)
|
| @@ -19,6 +19,8 @@
|
| |
| import java.io.IOException; |
| |
| +import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| + |
| /** Expert: Scoring functionality for phrase queries. |
| * <br>A document is considered matching if it contains the phrase-query terms |
| * at "valid" positions. What "valid positions" are |
| @@ -40,14 +42,12 @@
|
| |
| private float freq; //phrase frequency in current doc as computed by phraseFreq(). |
| |
| - protected final Similarity similarity; |
| + protected final Similarity.SloppyDocScorer docScorer; |
| |
| PhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings, |
| - Similarity similarity, byte[] norms) { |
| + Similarity similarity, String field, AtomicReaderContext context) throws IOException { |
| super(weight); |
| - this.similarity = similarity; |
| - this.norms = norms; |
| - this.value = weight.getValue(); |
| + docScorer = similarity.sloppyDocScorer(weight, field, context); |
| |
| // convert tps to a list of phrase positions. |
| // note: phrase-position differs from term-position in that its position |
| @@ -107,9 +107,7 @@
|
| |
| @Override |
| public float score() throws IOException { |
| - //System.out.println("scoring " + first.doc); |
| - float raw = similarity.tf(freq) * value; // raw score |
| - return norms == null ? raw : raw * similarity.decodeNormValue(norms[first.doc]); // normalize |
| + return docScorer.score(first.doc, freq); |
| } |
| |
| @Override |
| Index: lucene/src/java/org/apache/lucene/search/PhraseQuery.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/PhraseQuery.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/PhraseQuery.java (working copy)
|
| @@ -22,10 +22,14 @@
|
| import java.util.ArrayList; |
| |
| import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| +import org.apache.lucene.index.IndexReader.ReaderContext; |
| import org.apache.lucene.index.Term; |
| import org.apache.lucene.index.DocsAndPositionsEnum; |
| import org.apache.lucene.index.IndexReader; |
| +import org.apache.lucene.index.TermState; |
| +import org.apache.lucene.index.TermsEnum; |
| import org.apache.lucene.search.Explanation.IDFExplanation; |
| +import org.apache.lucene.util.PerReaderTermState; |
| import org.apache.lucene.util.ToStringUtils; |
| import org.apache.lucene.util.ArrayUtil; |
| import org.apache.lucene.util.Bits; |
| @@ -143,12 +147,16 @@
|
| private float queryNorm; |
| private float queryWeight; |
| private IDFExplanation idfExp; |
| + private transient PerReaderTermState states[]; |
| |
| public PhraseWeight(IndexSearcher searcher) |
| throws IOException { |
| this.similarity = searcher.getSimilarityProvider().get(field); |
| - |
| - idfExp = similarity.idfExplain(terms, searcher); |
| + final ReaderContext context = searcher.getTopReaderContext(); |
| + states = new PerReaderTermState[terms.size()]; |
| + for (int i = 0; i < terms.size(); i++) |
| + states[i] = PerReaderTermState.build(context, terms.get(i), true); |
| + idfExp = similarity.computeWeight(searcher, field, states); |
| idf = idfExp.getIdf(); |
| } |
| |
| @@ -183,21 +191,29 @@
|
| final Bits delDocs = reader.getDeletedDocs(); |
| for (int i = 0; i < terms.size(); i++) { |
| final Term t = terms.get(i); |
| + final TermState state = states[i].get(context.ord); |
| + if (state == null) /* term doesnt exist in this segment */ |
| + return null; |
| DocsAndPositionsEnum postingsEnum = reader.termPositionsEnum(delDocs, |
| t.field(), |
| - t.bytes()); |
| + t.bytes(), |
| + state); |
| // PhraseQuery on a field that did not index |
| // positions. |
| if (postingsEnum == null) { |
| - if (reader.termDocsEnum(delDocs, t.field(), t.bytes()) != null) { |
| + if (reader.termDocsEnum(delDocs, t.field(), t.bytes(), state) != null) { |
| // term does exist, but has no positions |
| throw new IllegalStateException("field \"" + t.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + t.text() + ")"); |
| } else { |
| // term does not exist |
| + // nocommit: should be impossible, state should be null? |
| return null; |
| } |
| } |
| - postingsFreqs[i] = new PostingsAndFreq(postingsEnum, reader.docFreq(t.field(), t.bytes()), positions.get(i).intValue()); |
| + // get the docFreq without seeking |
| + TermsEnum te = reader.fields().terms(field).getThreadTermsEnum(); |
| + te.seek(t.bytes(), state); |
| + postingsFreqs[i] = new PostingsAndFreq(postingsEnum, te.docFreq(), positions.get(i).intValue()); |
| } |
| |
| // sort by increasing docFreq order |
| @@ -206,8 +222,7 @@
|
| } |
| |
| if (slop == 0) { // optimize exact case |
| - ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity, |
| - reader.norms(field)); |
| + ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity, field, context); |
| if (s.noDocs) { |
| return null; |
| } else { |
| @@ -215,15 +230,18 @@
|
| } |
| } else { |
| return |
| - new SloppyPhraseScorer(this, postingsFreqs, similarity, slop, |
| - reader.norms(field)); |
| + new SloppyPhraseScorer(this, postingsFreqs, similarity, slop, field, context); |
| } |
| } |
| |
| @Override |
| public Explanation explain(AtomicReaderContext context, int doc) |
| throws IOException { |
| - |
| + //nocommit: fix explains |
| + if (!(similarity instanceof TFIDFSimilarity)) |
| + return new ComplexExplanation(); |
| + final TFIDFSimilarity similarity = (TFIDFSimilarity) this.similarity; |
| + |
| Explanation result = new Explanation(); |
| result.setDescription("weight("+getQuery()+" in "+doc+"), product of:"); |
| |
| Index: lucene/src/java/org/apache/lucene/search/TermQuery.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/TermQuery.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/TermQuery.java (working copy)
|
| @@ -50,16 +50,12 @@
|
| private final IDFExplanation idfExp; |
| private transient PerReaderTermState termStates; |
| |
| - public TermWeight(IndexSearcher searcher, PerReaderTermState termStates, int docFreq) |
| + public TermWeight(IndexSearcher searcher, PerReaderTermState termStates) |
| throws IOException { |
| assert termStates != null : "PerReaderTermState must not be null"; |
| this.termStates = termStates; |
| this.similarity = searcher.getSimilarityProvider().get(term.field()); |
| - if (docFreq != -1) { |
| - idfExp = similarity.idfExplain(term, searcher, docFreq); |
| - } else { |
| - idfExp = similarity.idfExplain(term, searcher); |
| - } |
| + idfExp = similarity.computeWeight(searcher, term.field(), termStates); |
| idf = idfExp.getIdf(); |
| } |
| |
| @@ -98,7 +94,7 @@
|
| } |
| final DocsEnum docs = reader.termDocsEnum(reader.getDeletedDocs(), field, term.bytes(), state); |
| assert docs != null; |
| - return new TermScorer(this, docs, similarity, context.reader.norms(field)); |
| + return new TermScorer(this, docs, similarity, field, context); |
| } |
| |
| private boolean termNotInReader(IndexReader reader, String field, BytesRef bytes) throws IOException { |
| @@ -110,6 +106,11 @@
|
| @Override |
| public Explanation explain(AtomicReaderContext context, int doc) |
| throws IOException { |
| + //nocommit: fix explains |
| + if (!(similarity instanceof TFIDFSimilarity)) |
| + return new ComplexExplanation(); |
| + final TFIDFSimilarity similarity = (TFIDFSimilarity) this.similarity; |
| + |
| final IndexReader reader = context.reader; |
| |
| ComplexExplanation result = new ComplexExplanation(); |
| @@ -214,20 +215,20 @@
|
| @Override |
| public Weight createWeight(IndexSearcher searcher) throws IOException { |
| final ReaderContext context = searcher.getTopReaderContext(); |
| - final int weightDocFreq; |
| final PerReaderTermState termState; |
| if (perReaderTermState == null || perReaderTermState.topReaderContext != context) { |
| // make TermQuery single-pass if we don't have a PRTS or if the context differs! |
| termState = PerReaderTermState.build(context, term, true); // cache term lookups! |
| - // we must not ignore the given docFreq - if set use the given value |
| - weightDocFreq = docFreq == -1 ? termState.docFreq() : docFreq; |
| } else { |
| // PRTS was pre-build for this IS |
| termState = this.perReaderTermState; |
| - weightDocFreq = docFreq; |
| } |
| + |
| + // we must not ignore the given docFreq - if set use the given value (lie) |
| + if (docFreq != -1) |
| + termState.setDocFreq(docFreq); |
| |
| - return new TermWeight(searcher, termState, weightDocFreq); |
| + return new TermWeight(searcher, termState); |
| } |
| |
| @Override |
| Index: lucene/src/java/org/apache/lucene/search/TFIDFSimilarity.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/TFIDFSimilarity.java (revision 0)
|
| +++ lucene/src/java/org/apache/lucene/search/TFIDFSimilarity.java (revision 0)
|
| @@ -0,0 +1,751 @@
|
| +package org.apache.lucene.search; |
| + |
| +/** |
| + * Licensed to the Apache Software Foundation (ASF) under one or more |
| + * contributor license agreements. See the NOTICE file distributed with |
| + * this work for additional information regarding copyright ownership. |
| + * The ASF licenses this file to You under the Apache License, Version 2.0 |
| + * (the "License"); you may not use this file except in compliance with |
| + * the License. You may obtain a copy of the License at |
| + * |
| + * http://www.apache.org/licenses/LICENSE-2.0 |
| + * |
| + * Unless required by applicable law or agreed to in writing, software |
| + * distributed under the License is distributed on an "AS IS" BASIS, |
| + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| + * See the License for the specific language governing permissions and |
| + * limitations under the License. |
| + */ |
| + |
| + |
| +import java.io.IOException; |
| +import java.io.Serializable; |
| + |
| +import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| +import org.apache.lucene.index.Term; |
| +import org.apache.lucene.search.Explanation.IDFExplanation; |
| +import org.apache.lucene.util.PerReaderTermState; |
| + |
| + |
| +/** |
| + * Expert: Scoring API. |
| + * |
| + * <p>Similarity defines the components of Lucene scoring. |
| + * Overriding computation of these components is a convenient |
| + * way to alter Lucene scoring. |
| + * |
| + * <p>Suggested reading: |
| + * <a href="http://nlp.stanford.edu/IR-book/html/htmledition/queries-as-vectors-1.html"> |
| + * Introduction To Information Retrieval, Chapter 6</a>. |
| + * |
| + * <p>The following describes how Lucene scoring evolves from |
| + * underlying information retrieval models to (efficient) implementation. |
| + * We first brief on <i>VSM Score</i>, |
| + * then derive from it <i>Lucene's Conceptual Scoring Formula</i>, |
| + * from which, finally, evolves <i>Lucene's Practical Scoring Function</i> |
| + * (the latter is connected directly with Lucene classes and methods). |
| + * |
| + * <p>Lucene combines |
| + * <a href="http://en.wikipedia.org/wiki/Standard_Boolean_model"> |
| + * Boolean model (BM) of Information Retrieval</a> |
| + * with |
| + * <a href="http://en.wikipedia.org/wiki/Vector_Space_Model"> |
| + * Vector Space Model (VSM) of Information Retrieval</a> - |
| + * documents "approved" by BM are scored by VSM. |
| + * |
| + * <p>In VSM, documents and queries are represented as |
| + * weighted vectors in a multi-dimensional space, |
| + * where each distinct index term is a dimension, |
| + * and weights are |
| + * <a href="http://en.wikipedia.org/wiki/Tfidf">Tf-idf</a> values. |
| + * |
| + * <p>VSM does not require weights to be <i>Tf-idf</i> values, |
| + * but <i>Tf-idf</i> values are believed to produce search results of high quality, |
| + * and so Lucene is using <i>Tf-idf</i>. |
| + * <i>Tf</i> and <i>Idf</i> are described in more detail below, |
| + * but for now, for completion, let's just say that |
| + * for given term <i>t</i> and document (or query) <i>x</i>, |
| + * <i>Tf(t,x)</i> varies with the number of occurrences of term <i>t</i> in <i>x</i> |
| + * (when one increases so does the other) and |
| + * <i>idf(t)</i> similarly varies with the inverse of the |
| + * number of index documents containing term <i>t</i>. |
| + * |
| + * <p><i>VSM score</i> of document <i>d</i> for query <i>q</i> is the |
| + * <a href="http://en.wikipedia.org/wiki/Cosine_similarity"> |
| + * Cosine Similarity</a> |
| + * of the weighted query vectors <i>V(q)</i> and <i>V(d)</i>: |
| + * |
| + * <br> <br> |
| + * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| + * <tr><td> |
| + * <table cellpadding="1" cellspacing="0" border="1" align="center"> |
| + * <tr><td> |
| + * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| + * <tr> |
| + * <td valign="middle" align="right" rowspan="1"> |
| + * cosine-similarity(q,d) = |
| + * </td> |
| + * <td valign="middle" align="center"> |
| + * <table> |
| + * <tr><td align="center"><small>V(q) · V(d)</small></td></tr> |
| + * <tr><td align="center">–––––––––</td></tr> |
| + * <tr><td align="center"><small>|V(q)| |V(d)|</small></td></tr> |
| + * </table> |
| + * </td> |
| + * </tr> |
| + * </table> |
| + * </td></tr> |
| + * </table> |
| + * </td></tr> |
| + * <tr><td> |
| + * <center><font=-1><u>VSM Score</u></font></center> |
| + * </td></tr> |
| + * </table> |
| + * <br> <br> |
| + * |
| + * |
| + * Where <i>V(q)</i> · <i>V(d)</i> is the |
| + * <a href="http://en.wikipedia.org/wiki/Dot_product">dot product</a> |
| + * of the weighted vectors, |
| + * and <i>|V(q)|</i> and <i>|V(d)|</i> are their |
| + * <a href="http://en.wikipedia.org/wiki/Euclidean_norm#Euclidean_norm">Euclidean norms</a>. |
| + * |
| + * <p>Note: the above equation can be viewed as the dot product of |
| + * the normalized weighted vectors, in the sense that dividing |
| + * <i>V(q)</i> by its euclidean norm is normalizing it to a unit vector. |
| + * |
| + * <p>Lucene refines <i>VSM score</i> for both search quality and usability: |
| + * <ul> |
| + * <li>Normalizing <i>V(d)</i> to the unit vector is known to be problematic in that |
| + * it removes all document length information. |
| + * For some documents removing this info is probably ok, |
| + * e.g. a document made by duplicating a certain paragraph <i>10</i> times, |
| + * especially if that paragraph is made of distinct terms. |
| + * But for a document which contains no duplicated paragraphs, |
| + * this might be wrong. |
| + * To avoid this problem, a different document length normalization |
| + * factor is used, which normalizes to a vector equal to or larger |
| + * than the unit vector: <i>doc-len-norm(d)</i>. |
| + * </li> |
| + * |
| + * <li>At indexing, users can specify that certain documents are more |
| + * important than others, by assigning a document boost. |
| + * For this, the score of each document is also multiplied by its boost value |
| + * <i>doc-boost(d)</i>. |
| + * </li> |
| + * |
| + * <li>Lucene is field based, hence each query term applies to a single |
| + * field, document length normalization is by the length of the certain field, |
| + * and in addition to document boost there are also document fields boosts. |
| + * </li> |
| + * |
| + * <li>The same field can be added to a document during indexing several times, |
| + * and so the boost of that field is the multiplication of the boosts of |
| + * the separate additions (or parts) of that field within the document. |
| + * </li> |
| + * |
| + * <li>At search time users can specify boosts to each query, sub-query, and |
| + * each query term, hence the contribution of a query term to the score of |
| + * a document is multiplied by the boost of that query term <i>query-boost(q)</i>. |
| + * </li> |
| + * |
| + * <li>A document may match a multi term query without containing all |
| + * the terms of that query (this is correct for some of the queries), |
| + * and users can further reward documents matching more query terms |
| + * through a coordination factor, which is usually larger when |
| + * more terms are matched: <i>coord-factor(q,d)</i>. |
| + * </li> |
| + * </ul> |
| + * |
| + * <p>Under the simplifying assumption of a single field in the index, |
| + * we get <i>Lucene's Conceptual scoring formula</i>: |
| + * |
| + * <br> <br> |
| + * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| + * <tr><td> |
| + * <table cellpadding="1" cellspacing="0" border="1" align="center"> |
| + * <tr><td> |
| + * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| + * <tr> |
| + * <td valign="middle" align="right" rowspan="1"> |
| + * score(q,d) = |
| + * <font color="#FF9933">coord-factor(q,d)</font> · |
| + * <font color="#CCCC00">query-boost(q)</font> · |
| + * </td> |
| + * <td valign="middle" align="center"> |
| + * <table> |
| + * <tr><td align="center"><small><font color="#993399">V(q) · V(d)</font></small></td></tr> |
| + * <tr><td align="center">–––––––––</td></tr> |
| + * <tr><td align="center"><small><font color="#FF33CC">|V(q)|</font></small></td></tr> |
| + * </table> |
| + * </td> |
| + * <td valign="middle" align="right" rowspan="1"> |
| + * · <font color="#3399FF">doc-len-norm(d)</font> |
| + * · <font color="#3399FF">doc-boost(d)</font> |
| + * </td> |
| + * </tr> |
| + * </table> |
| + * </td></tr> |
| + * </table> |
| + * </td></tr> |
| + * <tr><td> |
| + * <center><font=-1><u>Lucene Conceptual Scoring Formula</u></font></center> |
| + * </td></tr> |
| + * </table> |
| + * <br> <br> |
| + * |
| + * <p>The conceptual formula is a simplification in the sense that (1) terms and documents |
| + * are fielded and (2) boosts are usually per query term rather than per query. |
| + * |
| + * <p>We now describe how Lucene implements this conceptual scoring formula, and |
| + * derive from it <i>Lucene's Practical Scoring Function</i>. |
| + * |
| + * <p>For efficient score computation some scoring components |
| + * are computed and aggregated in advance: |
| + * |
| + * <ul> |
| + * <li><i>Query-boost</i> for the query (actually for each query term) |
| + * is known when search starts. |
| + * </li> |
| + * |
| + * <li>Query Euclidean norm <i>|V(q)|</i> can be computed when search starts, |
| + * as it is independent of the document being scored. |
| + * From search optimization perspective, it is a valid question |
| + * why bother to normalize the query at all, because all |
| + * scored documents will be multiplied by the same <i>|V(q)|</i>, |
| + * and hence documents ranks (their order by score) will not |
| + * be affected by this normalization. |
| + * There are two good reasons to keep this normalization: |
| + * <ul> |
| + * <li>Recall that |
| + * <a href="http://en.wikipedia.org/wiki/Cosine_similarity"> |
| + * Cosine Similarity</a> can be used find how similar |
| + * two documents are. One can use Lucene for e.g. |
| + * clustering, and use a document as a query to compute |
| + * its similarity to other documents. |
| + * In this use case it is important that the score of document <i>d3</i> |
| + * for query <i>d1</i> is comparable to the score of document <i>d3</i> |
| + * for query <i>d2</i>. In other words, scores of a document for two |
| + * distinct queries should be comparable. |
| + * There are other applications that may require this. |
| + * And this is exactly what normalizing the query vector <i>V(q)</i> |
| + * provides: comparability (to a certain extent) of two or more queries. |
| + * </li> |
| + * |
| + * <li>Applying query normalization on the scores helps to keep the |
| + * scores around the unit vector, hence preventing loss of score data |
| + * because of floating point precision limitations. |
| + * </li> |
| + * </ul> |
| + * </li> |
| + * |
| + * <li>Document length norm <i>doc-len-norm(d)</i> and document |
| + * boost <i>doc-boost(d)</i> are known at indexing time. |
| + * They are computed in advance and their multiplication |
| + * is saved as a single value in the index: <i>norm(d)</i>. |
| + * (In the equations below, <i>norm(t in d)</i> means <i>norm(field(t) in doc d)</i> |
| + * where <i>field(t)</i> is the field associated with term <i>t</i>.) |
| + * </li> |
| + * </ul> |
| + * |
| + * <p><i>Lucene's Practical Scoring Function</i> is derived from the above. |
| + * The color codes demonstrate how it relates |
| + * to those of the <i>conceptual</i> formula: |
| + * |
| + * <P> |
| + * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| + * <tr><td> |
| + * <table cellpadding="" cellspacing="2" border="2" align="center"> |
| + * <tr><td> |
| + * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| + * <tr> |
| + * <td valign="middle" align="right" rowspan="1"> |
| + * score(q,d) = |
| + * <A HREF="#formula_coord"><font color="#FF9933">coord(q,d)</font></A> · |
| + * <A HREF="#formula_queryNorm"><font color="#FF33CC">queryNorm(q)</font></A> · |
| + * </td> |
| + * <td valign="bottom" align="center" rowspan="1"> |
| + * <big><big><big>∑</big></big></big> |
| + * </td> |
| + * <td valign="middle" align="right" rowspan="1"> |
| + * <big><big>(</big></big> |
| + * <A HREF="#formula_tf"><font color="#993399">tf(t in d)</font></A> · |
| + * <A HREF="#formula_idf"><font color="#993399">idf(t)</font></A><sup>2</sup> · |
| + * <A HREF="#formula_termBoost"><font color="#CCCC00">t.getBoost()</font></A> · |
| + * <A HREF="#formula_norm"><font color="#3399FF">norm(t,d)</font></A> |
| + * <big><big>)</big></big> |
| + * </td> |
| + * </tr> |
| + * <tr valigh="top"> |
| + * <td></td> |
| + * <td align="center"><small>t in q</small></td> |
| + * <td></td> |
| + * </tr> |
| + * </table> |
| + * </td></tr> |
| + * </table> |
| + * </td></tr> |
| + * <tr><td> |
| + * <center><font=-1><u>Lucene Practical Scoring Function</u></font></center> |
| + * </td></tr> |
| + * </table> |
| + * |
| + * <p> where |
| + * <ol> |
| + * <li> |
| + * <A NAME="formula_tf"></A> |
| + * <b><i>tf(t in d)</i></b> |
| + * correlates to the term's <i>frequency</i>, |
| + * defined as the number of times term <i>t</i> appears in the currently scored document <i>d</i>. |
| + * Documents that have more occurrences of a given term receive a higher score. |
| + * Note that <i>tf(t in q)</i> is assumed to be <i>1</i> and therefore it does not appear in this equation, |
| + * However if a query contains twice the same term, there will be |
| + * two term-queries with that same term and hence the computation would still be correct (although |
| + * not very efficient). |
| + * The default computation for <i>tf(t in d)</i> in |
| + * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) DefaultSimilarity} is: |
| + * |
| + * <br> <br> |
| + * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| + * <tr> |
| + * <td valign="middle" align="right" rowspan="1"> |
| + * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) tf(t in d)} = |
| + * </td> |
| + * <td valign="top" align="center" rowspan="1"> |
| + * frequency<sup><big>½</big></sup> |
| + * </td> |
| + * </tr> |
| + * </table> |
| + * <br> <br> |
| + * </li> |
| + * |
| + * <li> |
| + * <A NAME="formula_idf"></A> |
| + * <b><i>idf(t)</i></b> stands for Inverse Document Frequency. This value |
| + * correlates to the inverse of <i>docFreq</i> |
| + * (the number of documents in which the term <i>t</i> appears). |
| + * This means rarer terms give higher contribution to the total score. |
| + * <i>idf(t)</i> appears for <i>t</i> in both the query and the document, |
| + * hence it is squared in the equation. |
| + * The default computation for <i>idf(t)</i> in |
| + * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) DefaultSimilarity} is: |
| + * |
| + * <br> <br> |
| + * <table cellpadding="2" cellspacing="2" border="0" align="center"> |
| + * <tr> |
| + * <td valign="middle" align="right"> |
| + * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) idf(t)} = |
| + * </td> |
| + * <td valign="middle" align="center"> |
| + * 1 + log <big>(</big> |
| + * </td> |
| + * <td valign="middle" align="center"> |
| + * <table> |
| + * <tr><td align="center"><small>numDocs</small></td></tr> |
| + * <tr><td align="center">–––––––––</td></tr> |
| + * <tr><td align="center"><small>docFreq+1</small></td></tr> |
| + * </table> |
| + * </td> |
| + * <td valign="middle" align="center"> |
| + * <big>)</big> |
| + * </td> |
| + * </tr> |
| + * </table> |
| + * <br> <br> |
| + * </li> |
| + * |
| + * <li> |
| + * <A NAME="formula_coord"></A> |
| + * <b><i>coord(q,d)</i></b> |
| + * is a score factor based on how many of the query terms are found in the specified document. |
| + * Typically, a document that contains more of the query's terms will receive a higher score |
| + * than another document with fewer query terms. |
| + * This is a search time factor computed in |
| + * {@link SimilarityProvider#coord(int, int) coord(q,d)} |
| + * by the Similarity in effect at search time. |
| + * <br> <br> |
| + * </li> |
| + * |
| + * <li><b> |
| + * <A NAME="formula_queryNorm"></A> |
| + * <i>queryNorm(q)</i> |
| + * </b> |
| + * is a normalizing factor used to make scores between queries comparable. |
| + * This factor does not affect document ranking (since all ranked documents are multiplied by the same factor), |
| + * but rather just attempts to make scores from different queries (or even different indexes) comparable. |
| + * This is a search time factor computed by the Similarity in effect at search time. |
| + * |
| + * The default computation in |
| + * {@link org.apache.lucene.search.DefaultSimilarity#queryNorm(float) DefaultSimilarity} |
| + * produces a <a href="http://en.wikipedia.org/wiki/Euclidean_norm#Euclidean_norm">Euclidean norm</a>: |
| + * <br> <br> |
| + * <table cellpadding="1" cellspacing="0" border="0" align="center"> |
| + * <tr> |
| + * <td valign="middle" align="right" rowspan="1"> |
| + * queryNorm(q) = |
| + * {@link org.apache.lucene.search.DefaultSimilarity#queryNorm(float) queryNorm(sumOfSquaredWeights)} |
| + * = |
| + * </td> |
| + * <td valign="middle" align="center" rowspan="1"> |
| + * <table> |
| + * <tr><td align="center"><big>1</big></td></tr> |
| + * <tr><td align="center"><big> |
| + * –––––––––––––– |
| + * </big></td></tr> |
| + * <tr><td align="center">sumOfSquaredWeights<sup><big>½</big></sup></td></tr> |
| + * </table> |
| + * </td> |
| + * </tr> |
| + * </table> |
| + * <br> <br> |
| + * |
| + * The sum of squared weights (of the query terms) is |
| + * computed by the query {@link org.apache.lucene.search.Weight} object. |
| + * For example, a {@link org.apache.lucene.search.BooleanQuery} |
| + * computes this value as: |
| + * |
| + * <br> <br> |
| + * <table cellpadding="1" cellspacing="0" border="0"n align="center"> |
| + * <tr> |
| + * <td valign="middle" align="right" rowspan="1"> |
| + * {@link org.apache.lucene.search.Weight#sumOfSquaredWeights() sumOfSquaredWeights} = |
| + * {@link org.apache.lucene.search.Query#getBoost() q.getBoost()} <sup><big>2</big></sup> |
| + * · |
| + * </td> |
| + * <td valign="bottom" align="center" rowspan="1"> |
| + * <big><big><big>∑</big></big></big> |
| + * </td> |
| + * <td valign="middle" align="right" rowspan="1"> |
| + * <big><big>(</big></big> |
| + * <A HREF="#formula_idf">idf(t)</A> · |
| + * <A HREF="#formula_termBoost">t.getBoost()</A> |
| + * <big><big>) <sup>2</sup> </big></big> |
| + * </td> |
| + * </tr> |
| + * <tr valigh="top"> |
| + * <td></td> |
| + * <td align="center"><small>t in q</small></td> |
| + * <td></td> |
| + * </tr> |
| + * </table> |
| + * <br> <br> |
| + * |
| + * </li> |
| + * |
| + * <li> |
| + * <A NAME="formula_termBoost"></A> |
| + * <b><i>t.getBoost()</i></b> |
| + * is a search time boost of term <i>t</i> in the query <i>q</i> as |
| + * specified in the query text |
| + * (see <A HREF="../../../../../../queryparsersyntax.html#Boosting a Term">query syntax</A>), |
| + * or as set by application calls to |
| + * {@link org.apache.lucene.search.Query#setBoost(float) setBoost()}. |
| + * Notice that there is really no direct API for accessing a boost of one term in a multi term query, |
| + * but rather multi terms are represented in a query as multi |
| + * {@link org.apache.lucene.search.TermQuery TermQuery} objects, |
| + * and so the boost of a term in the query is accessible by calling the sub-query |
| + * {@link org.apache.lucene.search.Query#getBoost() getBoost()}. |
| + * <br> <br> |
| + * </li> |
| + * |
| + * <li> |
| + * <A NAME="formula_norm"></A> |
| + * <b><i>norm(t,d)</i></b> encapsulates a few (indexing time) boost and length factors: |
| + * |
| + * <ul> |
| + * <li><b>Document boost</b> - set by calling |
| + * {@link org.apache.lucene.document.Document#setBoost(float) doc.setBoost()} |
| + * before adding the document to the index. |
| + * </li> |
| + * <li><b>Field boost</b> - set by calling |
| + * {@link org.apache.lucene.document.Fieldable#setBoost(float) field.setBoost()} |
| + * before adding the field to a document. |
| + * </li> |
| + * <li><b>lengthNorm</b> - computed |
| + * when the document is added to the index in accordance with the number of tokens |
| + * of this field in the document, so that shorter fields contribute more to the score. |
| + * LengthNorm is computed by the Similarity class in effect at indexing. |
| + * </li> |
| + * </ul> |
| + * The {@link #computeNorm} method is responsible for |
| + * combining all of these factors into a single float. |
| + * |
| + * <p> |
| + * When a document is added to the index, all the above factors are multiplied. |
| + * If the document has multiple fields with the same name, all their boosts are multiplied together: |
| + * |
| + * <br> <br> |
| + * <table cellpadding="1" cellspacing="0" border="0"n align="center"> |
| + * <tr> |
| + * <td valign="middle" align="right" rowspan="1"> |
| + * norm(t,d) = |
| + * {@link org.apache.lucene.document.Document#getBoost() doc.getBoost()} |
| + * · |
| + * lengthNorm |
| + * · |
| + * </td> |
| + * <td valign="bottom" align="center" rowspan="1"> |
| + * <big><big><big>∏</big></big></big> |
| + * </td> |
| + * <td valign="middle" align="right" rowspan="1"> |
| + * {@link org.apache.lucene.document.Fieldable#getBoost() f.getBoost}() |
| + * </td> |
| + * </tr> |
| + * <tr valigh="top"> |
| + * <td></td> |
| + * <td align="center"><small>field <i><b>f</b></i> in <i>d</i> named as <i><b>t</b></i></small></td> |
| + * <td></td> |
| + * </tr> |
| + * </table> |
| + * <br> <br> |
| + * However the resulted <i>norm</i> value is {@link #encodeNormValue(float) encoded} as a single byte |
| + * before being stored. |
| + * At search time, the norm byte value is read from the index |
| + * {@link org.apache.lucene.store.Directory directory} and |
| + * {@link #decodeNormValue(byte) decoded} back to a float <i>norm</i> value. |
| + * This encoding/decoding, while reducing index size, comes with the price of |
| + * precision loss - it is not guaranteed that <i>decode(encode(x)) = x</i>. |
| + * For instance, <i>decode(encode(0.89)) = 0.75</i>. |
| + * <br> <br> |
| + * Compression of norm values to a single byte saves memory at search time, |
| + * because once a field is referenced at search time, its norms - for |
| + * all documents - are maintained in memory. |
| + * <br> <br> |
| + * The rationale supporting such lossy compression of norm values is that |
| + * given the difficulty (and inaccuracy) of users to express their true information |
| + * need by a query, only big differences matter. |
| + * <br> <br> |
| + * Last, note that search time is too late to modify this <i>norm</i> part of scoring, e.g. by |
| + * using a different {@link Similarity} for search. |
| + * <br> <br> |
| + * </li> |
| + * </ol> |
| + * |
| + * @see org.apache.lucene.index.IndexWriterConfig#setSimilarityProvider(SimilarityProvider) |
| + * @see IndexSearcher#setSimilarityProvider(SimilarityProvider) |
| + */ |
| +public abstract class TFIDFSimilarity extends Similarity implements Serializable { |
| + |
| + /** Computes a score factor based on a term or phrase's frequency in a |
| + * document. This value is multiplied by the {@link #idf(int, int)} |
| + * factor for each term in the query and these products are then summed to |
| + * form the initial score for a document. |
| + * |
| + * <p>Terms and phrases repeated in a document indicate the topic of the |
| + * document, so implementations of this method usually return larger values |
| + * when <code>freq</code> is large, and smaller values when <code>freq</code> |
| + * is small. |
| + * |
| + * <p>The default implementation calls {@link #tf(float)}. |
| + * |
| + * @param freq the frequency of a term within a document |
| + * @return a score factor based on a term's within-document frequency |
| + */ |
| + public float tf(int freq) { |
| + return tf((float)freq); |
| + } |
| + |
| + /** Computes a score factor based on a term or phrase's frequency in a |
| + * document. This value is multiplied by the {@link #idf(int, int)} |
| + * factor for each term in the query and these products are then summed to |
| + * form the initial score for a document. |
| + * |
| + * <p>Terms and phrases repeated in a document indicate the topic of the |
| + * document, so implementations of this method usually return larger values |
| + * when <code>freq</code> is large, and smaller values when <code>freq</code> |
| + * is small. |
| + * |
| + * @param freq the frequency of a term within a document |
| + * @return a score factor based on a term's within-document frequency |
| + */ |
| + public abstract float tf(float freq); |
| + |
| + /** |
| + * Computes a score factor for a simple term and returns an explanation |
| + * for that score factor. |
| + * |
| + * <p> |
| + * The default implementation uses: |
| + * |
| + * <pre> |
| + * idf(docFreq, searcher.maxDoc()); |
| + * </pre> |
| + * |
| + * Note that {@link IndexSearcher#maxDoc()} is used instead of |
| + * {@link org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also |
| + * {@link IndexSearcher#docFreq(Term)} is used, and when the latter |
| + * is inaccurate, so is {@link IndexSearcher#maxDoc()}, and in the same direction. |
| + * In addition, {@link IndexSearcher#maxDoc()} is more efficient to compute |
| + * |
| + * @param term the term in question |
| + * @param searcher the document collection being searched |
| + * @param docFreq externally computed docFreq for this term |
| + * @return an IDFExplain object that includes both an idf score factor |
| + and an explanation for the term. |
| + * @throws IOException |
| + */ |
| + public IDFExplanation idfExplain(PerReaderTermState stats, final IndexSearcher searcher) throws IOException { |
| + final int df = stats.docFreq(); |
| + final int max = searcher.maxDoc(); |
| + final float idf = idf(df, max); |
| + return new IDFExplanation() { |
| + @Override |
| + public String explain() { |
| + return "idf(docFreq=" + df + |
| + ", maxDocs=" + max + ")"; |
| + } |
| + @Override |
| + public float getIdf() { |
| + return idf; |
| + }}; |
| + } |
| + |
| + /** |
| + * Computes a score factor for a phrase. |
| + * |
| + * <p> |
| + * The default implementation sums the idf factor for |
| + * each term in the phrase. |
| + * |
| + * @param terms the terms in the phrase |
| + * @param searcher the document collection being searched |
| + * @return an IDFExplain object that includes both an idf |
| + * score factor for the phrase and an explanation |
| + * for each term. |
| + * @throws IOException |
| + */ |
| + public IDFExplanation idfExplain(final PerReaderTermState stats[], IndexSearcher searcher) throws IOException { |
| + final int max = searcher.maxDoc(); |
| + float idf = 0.0f; |
| + final StringBuilder exp = new StringBuilder(); |
| + for (final PerReaderTermState stat : stats ) { |
| + final int df = stat.docFreq(); |
| + idf += idf(df, max); |
| + exp.append(" "); |
| + exp.append(df); |
| + } |
| + final float fIdf = idf; |
| + return new IDFExplanation() { |
| + @Override |
| + public float getIdf() { |
| + return fIdf; |
| + } |
| + @Override |
| + public String explain() { |
| + return exp.toString(); |
| + } |
| + }; |
| + } |
| + |
| + /** Computes a score factor based on a term's document frequency (the number |
| + * of documents which contain the term). This value is multiplied by the |
| + * {@link #tf(int)} factor for each term in the query and these products are |
| + * then summed to form the initial score for a document. |
| + * |
| + * <p>Terms that occur in fewer documents are better indicators of topic, so |
| + * implementations of this method usually return larger values for rare terms, |
| + * and smaller values for common terms. |
| + * |
| + * @param docFreq the number of documents which contain the term |
| + * @param numDocs the total number of documents in the collection |
| + * @return a score factor based on the term's document frequency |
| + */ |
| + public abstract float idf(int docFreq, int numDocs); |
| + |
| + @Override |
| + public final IDFExplanation computeWeight(IndexSearcher searcher, String fieldName, |
| + PerReaderTermState... termStats) throws IOException { |
| + return termStats.length == 1 |
| + ? idfExplain(termStats[0], searcher) |
| + : idfExplain(termStats, searcher); |
| + } |
| + |
| + @Override |
| + public final ExactDocScorer exactDocScorer(Weight weight, String fieldName, AtomicReaderContext context) throws IOException { |
| + final byte norms[] = context.reader.norms(fieldName); |
| + return norms == null |
| + ? new RawExactTFIDFDocScorer(weight.getValue()) |
| + : new ExactTFIDFDocScorer(weight.getValue(), norms); |
| + } |
| + |
| + @Override |
| + public final SloppyDocScorer sloppyDocScorer(Weight weight, String fieldName, AtomicReaderContext context) throws IOException { |
| + final byte norms[] = context.reader.norms(fieldName); |
| + return norms == null |
| + ? new RawSloppyTFIDFDocScorer(weight.getValue()) |
| + : new SloppyTFIDFDocScorer(weight.getValue(), norms); |
| + } |
| + |
| + // nocommit: make SCORE_CACHE_SIZE dynamic when available? (e.g. totalTermFreq / docFreq) |
| + // nocommit: make configurable? |
| + private final class ExactTFIDFDocScorer extends ExactDocScorer { |
| + private final float weightValue; |
| + private final byte[] norms; |
| + private static final int SCORE_CACHE_SIZE = 32; |
| + private float[] scoreCache = new float[SCORE_CACHE_SIZE]; |
| + |
| + ExactTFIDFDocScorer(float weightValue, byte norms[]) { |
| + this.weightValue = weightValue; |
| + this.norms = norms; |
| + for (int i = 0; i < SCORE_CACHE_SIZE; i++) |
| + scoreCache[i] = tf(i) * weightValue; |
| + } |
| + |
| + @Override |
| + public float score(int doc, int freq) { |
| + float raw = // compute tf(f)*weight |
| + freq < SCORE_CACHE_SIZE // check cache |
| + ? scoreCache[freq] // cache hit |
| + : tf(freq)*weightValue; // cache miss |
| + |
| + return raw * decodeNormValue(norms[doc]); // normalize for field |
| + } |
| + } |
| + |
| + private final class RawExactTFIDFDocScorer extends ExactDocScorer { |
| + private final float weightValue; |
| + private static final int SCORE_CACHE_SIZE = 32; |
| + private float[] scoreCache = new float[SCORE_CACHE_SIZE]; |
| + |
| + RawExactTFIDFDocScorer(float weightValue) { |
| + this.weightValue = weightValue; |
| + for (int i = 0; i < SCORE_CACHE_SIZE; i++) |
| + scoreCache[i] = tf(i) * weightValue; |
| + } |
| + |
| + @Override |
| + public float score(int doc, int freq) { |
| + return freq < SCORE_CACHE_SIZE // check cache |
| + ? scoreCache[freq] // cache hit |
| + : tf(freq)*weightValue; // cache miss |
| + } |
| + } |
| + |
| + private final class SloppyTFIDFDocScorer extends SloppyDocScorer { |
| + private final float weightValue; |
| + private final byte[] norms; |
| + |
| + SloppyTFIDFDocScorer(float weightValue, byte norms[]) { |
| + this.weightValue = weightValue; |
| + this.norms = norms; |
| + } |
| + |
| + @Override |
| + public float score(int doc, float freq) { |
| + return tf(freq) * weightValue * decodeNormValue(norms[doc]); // compute tf(f)*weight * normalize for field |
| + } |
| + } |
| + |
| + private final class RawSloppyTFIDFDocScorer extends SloppyDocScorer { |
| + private final float weightValue; |
| + |
| + RawSloppyTFIDFDocScorer(float weightValue) { |
| + this.weightValue = weightValue; |
| + } |
| + |
| + @Override |
| + public float score(int doc, float freq) { |
| + return tf(freq)*weightValue; // compute tf(f)*weight |
| + } |
| + } |
| +} |
|
|
| Property changes on: lucene\src\java\org\apache\lucene\search\TFIDFSimilarity.java
|
| ___________________________________________________________________
|
| Added: svn:eol-style
|
| + native
|
|
|
| Index: lucene/src/java/org/apache/lucene/search/ExactPhraseScorer.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/ExactPhraseScorer.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/ExactPhraseScorer.java (working copy)
|
| @@ -21,14 +21,9 @@
|
| import java.util.Arrays; |
| |
| import org.apache.lucene.index.*; |
| +import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| |
| final class ExactPhraseScorer extends Scorer { |
| - private final byte[] norms; |
| - private final float value; |
| - |
| - private static final int SCORE_CACHE_SIZE = 32; |
| - private final float[] scoreCache = new float[SCORE_CACHE_SIZE]; |
| - |
| private final int endMinus1; |
| |
| private final static int CHUNK = 4096; |
| @@ -60,14 +55,12 @@
|
| private int docID = -1; |
| private int freq; |
| |
| - private final Similarity similarity; |
| + private final Similarity.ExactDocScorer docScorer; |
| |
| ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings, |
| - Similarity similarity, byte[] norms) throws IOException { |
| + Similarity similarity, String field, AtomicReaderContext context) throws IOException { |
| super(weight); |
| - this.similarity = similarity; |
| - this.norms = norms; |
| - this.value = weight.getValue(); |
| + this.docScorer = similarity.exactDocScorer(weight, field, context); |
| |
| chunkStates = new ChunkState[postings.length]; |
| |
| @@ -88,10 +81,6 @@
|
| return; |
| } |
| } |
| - |
| - for (int i = 0; i < SCORE_CACHE_SIZE; i++) { |
| - scoreCache[i] = similarity.tf((float) i) * value; |
| - } |
| } |
| |
| @Override |
| @@ -206,13 +195,7 @@
|
| |
| @Override |
| public float score() throws IOException { |
| - final float raw; // raw score |
| - if (freq < SCORE_CACHE_SIZE) { |
| - raw = scoreCache[freq]; |
| - } else { |
| - raw = similarity.tf((float) freq) * value; |
| - } |
| - return norms == null ? raw : raw * similarity.decodeNormValue(norms[docID]); // normalize |
| + return docScorer.score(docID, freq); |
| } |
| |
| private int phraseFreq() throws IOException { |
| Index: lucene/src/java/org/apache/lucene/search/TermScorer.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/TermScorer.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/TermScorer.java (working copy)
|
| @@ -20,25 +20,22 @@
|
| import java.io.IOException; |
| |
| import org.apache.lucene.index.DocsEnum; |
| +import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| |
| /** Expert: A <code>Scorer</code> for documents matching a <code>Term</code>. |
| */ |
| final class TermScorer extends Scorer { |
| private DocsEnum docsEnum; |
| - private byte[] norms; |
| - private float weightValue; |
| private int doc = -1; |
| private int freq; |
| |
| private int pointer; |
| private int pointerMax; |
| |
| - private static final int SCORE_CACHE_SIZE = 32; |
| - private float[] scoreCache = new float[SCORE_CACHE_SIZE]; |
| private int[] docs; |
| private int[] freqs; |
| private final DocsEnum.BulkReadResult bulkResult; |
| - private final Similarity similarity; |
| + private final Similarity.ExactDocScorer docScorer; |
| |
| /** |
| * Construct a <code>TermScorer</code>. |
| @@ -53,16 +50,11 @@
|
| * @param norms |
| * The field norms of the document fields for the <code>Term</code>. |
| */ |
| - TermScorer(Weight weight, DocsEnum td, Similarity similarity, byte[] norms) { |
| + TermScorer(Weight weight, DocsEnum td, Similarity similarity, String fieldName, AtomicReaderContext context) throws IOException { |
| super(weight); |
| - this.similarity = similarity; |
| + this.docScorer = similarity.exactDocScorer(weight, fieldName, context); |
| this.docsEnum = td; |
| - this.norms = norms; |
| - this.weightValue = weight.getValue(); |
| bulkResult = td.getBulkResult(); |
| - |
| - for (int i = 0; i < SCORE_CACHE_SIZE; i++) |
| - scoreCache[i] = similarity.tf(i) * weightValue; |
| } |
| |
| @Override |
| @@ -134,12 +126,7 @@
|
| @Override |
| public float score() { |
| assert doc != NO_MORE_DOCS; |
| - float raw = // compute tf(f)*weight |
| - freq < SCORE_CACHE_SIZE // check cache |
| - ? scoreCache[freq] // cache hit |
| - : similarity.tf(freq)*weightValue; // cache miss |
| - |
| - return norms == null ? raw : raw * similarity.decodeNormValue(norms[doc]); // normalize for field |
| + return docScorer.score(doc, freq); |
| } |
| |
| /** |
| Index: lucene/src/java/org/apache/lucene/search/DefaultSimilarity.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/search/DefaultSimilarity.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/search/DefaultSimilarity.java (working copy)
|
| @@ -20,7 +20,7 @@
|
| */ |
| |
| /** Expert: Default scoring implementation. */ |
| -public class DefaultSimilarity extends Similarity implements SimilarityProvider { |
| +public class DefaultSimilarity extends TFIDFSimilarity implements SimilarityProvider { |
| |
| /** Implemented as |
| * <code>state.getBoost()*lengthNorm(numTerms)</code>, where |
| Index: lucene/src/java/org/apache/lucene/util/PerReaderTermState.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/util/PerReaderTermState.java (revision 1063004)
|
| +++ lucene/src/java/org/apache/lucene/util/PerReaderTermState.java (working copy)
|
| @@ -145,4 +145,10 @@
|
| public int docFreq() { |
| return docFreq; |
| } |
| + |
| + /** expert: only available for queries that want to lie about docfreq |
| + * @lucene.internal */ |
| + public void setDocFreq(int docFreq) { |
| + this.docFreq = docFreq; |
| + } |
| }
|
| \ No newline at end of file
|
| Index: lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
|
| ===================================================================
|
| --- lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java (revision 1063004)
|
| +++ lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java (working copy)
|
| @@ -20,6 +20,7 @@
|
| |
| import org.apache.lucene.search.DefaultSimilarity; |
| import org.apache.lucene.search.Similarity; |
| +import org.apache.lucene.search.TFIDFSimilarity; |
| import org.apache.lucene.util.LuceneTestCase; |
| import org.apache.lucene.index.FieldInvertState; |
| |
| @@ -151,8 +152,8 @@
|
| |
| SweetSpotSimilarity ss = new SweetSpotSimilarity(); |
| |
| - Similarity d = new DefaultSimilarity(); |
| - Similarity s = ss; |
| + TFIDFSimilarity d = new DefaultSimilarity(); |
| + TFIDFSimilarity s = ss; |
| |
| // tf equal |
| |
| @@ -203,7 +204,7 @@
|
| }; |
| ss.setHyperbolicTfFactors(3.3f, 7.7f, Math.E, 5.0f); |
| |
| - Similarity s = ss; |
| + TFIDFSimilarity s = ss; |
| |
| for (int i = 1; i <=1000; i++) { |
| assertTrue("MIN tf: i="+i+" : s="+s.tf(i), |
| Index: lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
|
| ===================================================================
|
| --- lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (revision 1063004)
|
| +++ lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (working copy)
|
| @@ -44,6 +44,7 @@
|
| import org.apache.lucene.search.Query; |
| import org.apache.lucene.search.ScoreDoc; |
| import org.apache.lucene.search.Similarity; |
| +import org.apache.lucene.search.TFIDFSimilarity; |
| import org.apache.lucene.search.TermQuery; |
| import org.apache.lucene.search.TopDocs; |
| import org.apache.lucene.store.FSDirectory; |
| @@ -284,7 +285,8 @@
|
| /** |
| * For idf() calculations. |
| */ |
| - private Similarity similarity;// = new DefaultSimilarity(); |
| + // nocommit? this is pretty much wired to tf-idf things... |
| + private TFIDFSimilarity similarity;// = new DefaultSimilarity(); |
| |
| /** |
| * IndexReader to use |
| @@ -319,17 +321,17 @@
|
| this(ir, new DefaultSimilarity()); |
| } |
| |
| - public MoreLikeThis(IndexReader ir, Similarity sim){ |
| + public MoreLikeThis(IndexReader ir, TFIDFSimilarity sim){ |
| this.ir = ir; |
| this.similarity = sim; |
| } |
| |
| |
| - public Similarity getSimilarity() { |
| + public TFIDFSimilarity getSimilarity() { |
| return similarity; |
| } |
| |
| - public void setSimilarity(Similarity similarity) { |
| + public void setSimilarity(TFIDFSimilarity similarity) { |
| this.similarity = similarity; |
| } |
| |
| Index: lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java
|
| ===================================================================
|
| --- lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java (revision 1063004)
|
| +++ lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java (working copy)
|
| @@ -51,7 +51,8 @@
|
| */ |
| public class FuzzyLikeThisQuery extends Query |
| { |
| - static Similarity sim=new DefaultSimilarity(); |
| + //nocommit? this query is pretty much hardcoded at TF/IDF |
| + static TFIDFSimilarity sim=new DefaultSimilarity(); |
| Query rewrittenQuery=null; |
| ArrayList<FieldVals> fieldVals=new ArrayList<FieldVals>(); |
| Analyzer analyzer; |