| Index: lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java
|
| ===================================================================
|
| --- lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java (revision 1143083)
|
| +++ lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java (working copy)
|
| @@ -24,8 +24,8 @@
|
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.LuceneTestCase; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.TextField; |
| |
| /** |
| * @since 2009-mar-30 13:15:49 |
| @@ -65,7 +65,7 @@
|
| |
| private void addDocument(IndexWriter iw, String text) throws IOException { |
| Document doc = new Document(); |
| - doc.add(new Field("field", text, Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(new TextField("field", text)); |
| iw.addDocument(doc); |
| } |
| } |
| Index: lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
|
| ===================================================================
|
| --- lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (revision 1143083)
|
| +++ lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (working copy)
|
| @@ -29,8 +29,9 @@
|
| import org.apache.lucene.analysis.MockAnalyzer; |
| import org.apache.lucene.analysis.MockTokenFilter; |
| import org.apache.lucene.analysis.MockTokenizer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.index.IndexWriterConfig; |
| import org.apache.lucene.queryParser.QueryParser; |
| @@ -108,8 +109,8 @@
|
| IndexWriter writer = new IndexWriter(ramdir, |
| new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setCodecProvider(_TestUtil.alwaysCodec("Standard"))); |
| Document doc = new Document(); |
| - Field field1 = newField("foo", fooField.toString(), Field.Store.NO, Field.Index.ANALYZED); |
| - Field field2 = newField("term", termField.toString(), Field.Store.NO, Field.Index.ANALYZED); |
| + Field field1 = newField("foo", fooField.toString(), TextField.DEFAULT_TYPE); |
| + Field field2 = newField("term", termField.toString(), TextField.DEFAULT_TYPE); |
| doc.add(field1); |
| doc.add(field2); |
| writer.addDocument(doc); |
| Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java
|
| ===================================================================
|
| --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java (revision 1143083)
|
| +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java (working copy)
|
| @@ -21,8 +21,10 @@
|
| import java.util.Arrays; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| import org.apache.lucene.search.Collector; |
| import org.apache.lucene.search.DefaultSimilarity; |
| @@ -65,13 +67,21 @@
|
| |
| for (int i = 0; i < NUM_DOCS; i++) { |
| Document d = new Document(); |
| - d.add(newField("field", "word", Field.Store.YES, Field.Index.ANALYZED)); |
| - d.add(newField("nonorm", "word", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); |
| - d.add(newField("untokfield", "20061212 20071212", Field.Store.YES, Field.Index.ANALYZED)); |
| |
| + FieldType storedTextType = new FieldType(TextField.DEFAULT_TYPE); |
| + storedTextType.setStored(true); |
| + d.add(newField("field", "word", storedTextType)); |
| + |
| + FieldType storedTextType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + storedTextType2.setStored(true); |
| + storedTextType2.setTokenized(false); |
| + storedTextType2.setOmitNorms(true); |
| + d.add(newField("nonorm", "word", storedTextType2)); |
| + d.add(newField("untokfield", "20061212 20071212", storedTextType)); |
| + |
| for (int j = 1; j <= i; j++) { |
| - d.add(newField("field", "crap", Field.Store.YES, Field.Index.ANALYZED)); |
| - d.add(newField("nonorm", "more words", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); |
| + d.add(newField("field", "crap", storedTextType)); |
| + d.add(newField("nonorm", "more words", storedTextType2)); |
| } |
| writer.addDocument(d); |
| } |
| Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
|
| ===================================================================
|
| --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (revision 1143083)
|
| +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (working copy)
|
| @@ -17,8 +17,10 @@
|
| */ |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.LuceneTestCase; |
| import org.apache.lucene.util.BytesRef; |
| @@ -36,8 +38,13 @@
|
| Document doc; |
| for (int i = 0; i < NUM_DOCS; i++) { |
| doc = new Document(); |
| - doc.add(newField("id", i + "", Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - doc.add(newField("f", i + " " + i, Field.Store.YES, Field.Index.ANALYZED)); |
| + FieldType storedTextType = new FieldType(TextField.DEFAULT_TYPE); |
| + storedTextType.setStored(true); |
| + storedTextType.setTokenized(false); |
| + FieldType storedTextType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + storedTextType.setStored(true); |
| + doc.add(newField("id", i + "", storedTextType)); |
| + doc.add(newField("f", i + " " + i, storedTextType2)); |
| w.addDocument(doc); |
| } |
| w.close(); |
| @@ -70,7 +77,7 @@
|
| IndexReader ir; |
| ir = IndexReader.open(dirs[0], true); |
| assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error |
| - Document doc = ir.document(0); |
| + org.apache.lucene.document.Document doc = ir.document(0); |
| assertEquals("0", doc.get("id")); |
| TermsEnum te = MultiFields.getTerms(ir, "id").iterator(); |
| assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seek(new BytesRef("1"))); |
| @@ -115,7 +122,7 @@
|
| IndexReader ir; |
| ir = IndexReader.open(dirs[0], true); |
| assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); |
| - Document doc = ir.document(0); |
| + org.apache.lucene.document.Document doc = ir.document(0); |
| assertEquals("0", doc.get("id")); |
| int start = ir.numDocs(); |
| ir.close(); |
| Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java
|
| ===================================================================
|
| --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (revision 1143083)
|
| +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (working copy)
|
| @@ -1,8 +1,10 @@
|
| package org.apache.lucene.index; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.LuceneTestCase; |
| |
| @@ -30,33 +32,42 @@
|
| Document doc; |
| |
| doc = new Document(); |
| - doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| - doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| - doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStoreTermVectors(true); |
| + customType.setStoreTermVectorPositions(true); |
| + customType.setStoreTermVectorOffsets(true); |
| + doc.add(newField("a", "a b a c a d a e a f a g a h a", customType)); |
| + doc.add(newField("b", "a b c b d b e b f b g b h b", customType)); |
| + doc.add(newField("c", "a c b c d c e c f c g c h c", customType)); |
| iw.addDocument(doc); |
| |
| doc = new Document(); |
| - doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); |
| - doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); |
| - doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStoreTermVectors(true); |
| + customType2.setStoreTermVectorPositions(true); |
| + doc.add(newField("a", "a b a c a d a e a f a g a h a", customType2)); |
| + doc.add(newField("b", "a b c b d b e b f b g b h b", customType2)); |
| + doc.add(newField("c", "a c b c d c e c f c g c h c", customType2)); |
| iw.addDocument(doc); |
| |
| doc = new Document(); |
| - doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); |
| - doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); |
| - doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); |
| + FieldType customType3 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType3.setStoreTermVectors(true); |
| + doc.add(newField("a", "a b a c a d a e a f a g a h a", customType3)); |
| + doc.add(newField("b", "a b c b d b e b f b g b h b", customType3)); |
| + doc.add(newField("c", "a c b c d c e c f c g c h c", customType3)); |
| iw.addDocument(doc); |
| |
| doc = new Document(); |
| - doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO)); |
| - doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO)); |
| - doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO)); |
| + doc.add(newField("a", "a b a c a d a e a f a g a h a", TextField.DEFAULT_TYPE)); |
| + doc.add(newField("b", "a b c b d b e b f b g b h b", TextField.DEFAULT_TYPE)); |
| + doc.add(newField("c", "a c b c d c e c f c g c h c", TextField.DEFAULT_TYPE)); |
| iw.addDocument(doc); |
| |
| doc = new Document(); |
| - doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| - doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO)); |
| - doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); |
| + doc.add(newField("a", "a b a c a d a e a f a g a h a", customType)); |
| + doc.add(newField("b", "a b c b d b e b f b g b h b", TextField.DEFAULT_TYPE)); |
| + doc.add(newField("c", "a c b c d c e c f c g c h c", customType3)); |
| iw.addDocument(doc); |
| |
| iw.close(); |
| Index: lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java
|
| ===================================================================
|
| --- lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (revision 1143083)
|
| +++ lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (working copy)
|
| @@ -21,10 +21,9 @@
|
| import java.util.Random; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field.Index; |
| -import org.apache.lucene.document.Field.Store; |
| -import org.apache.lucene.document.Field.TermVector; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.DocsEnum; |
| import org.apache.lucene.index.Fields; |
| import org.apache.lucene.index.IndexReader; |
| @@ -140,7 +139,12 @@
|
| ((TieredMergePolicy)cfg.getMergePolicy()).setUseCompoundFile(false); |
| IndexWriter writer = new IndexWriter(dir, cfg); |
| Document doc = new Document(); |
| - doc.add(newField("f", text, Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); |
| + FieldType storedTextType = new FieldType(TextField.DEFAULT_TYPE); |
| + storedTextType.setStored(true); |
| + storedTextType.setStoreTermVectors(true); |
| + storedTextType.setStoreTermVectorPositions(true); |
| + storedTextType.setStoreTermVectorOffsets(true); |
| + doc.add(newField("f", text, storedTextType)); |
| writer.addDocument(doc); |
| writer.commit(); |
| writer.addDocument(doc); |
| @@ -148,8 +152,8 @@
|
| writer.close(); |
| IndexReader reader = IndexReader.open(dir, null, true, 1, new AppendingCodecProvider()); |
| assertEquals(2, reader.numDocs()); |
| - doc = reader.document(0); |
| - assertEquals(text, doc.get("f")); |
| + org.apache.lucene.document.Document doc2 = reader.document(0); |
| + assertEquals(text, doc2.get("f")); |
| Fields fields = MultiFields.getFields(reader); |
| Terms terms = fields.terms("f"); |
| assertNotNull(terms); |
| Index: lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java
|
| ===================================================================
|
| --- lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java (revision 1143083)
|
| +++ lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java (working copy)
|
| @@ -19,8 +19,10 @@
|
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| import org.apache.lucene.analysis.MockTokenizer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.store.Directory; |
| @@ -199,13 +201,15 @@
|
| /** |
| * Generate 10 documents where term n has a docFreq of n and a totalTermFreq of n*2 (squared). |
| */ |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| for (int i = 1; i <= 10; i++) { |
| Document doc = new Document(); |
| String content = getContent(i); |
| |
| - doc.add(newField(random, "FIELD_1", content, Field.Store.YES,Field.Index.ANALYZED, Field.TermVector.NO)); |
| + doc.add(newField(random, "FIELD_1", content, customType)); |
| //add a different field |
| - doc.add(newField(random, "different_field", "diff", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); |
| + doc.add(newField(random, "different_field", "diff", customType)); |
| writer.addDocument(doc); |
| } |
| |
| @@ -213,7 +217,7 @@
|
| //highest freq terms for a specific field. |
| for (int i = 1; i <= 10; i++) { |
| Document doc = new Document(); |
| - doc.add(newField(random, "different_field", "diff", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); |
| + doc.add(newField(random, "different_field", "diff", customType)); |
| writer.addDocument(doc); |
| } |
| // add some docs where tf < df so we can see if sorting works |
| @@ -224,7 +228,7 @@
|
| for (int i = 0; i < highTF; i++) { |
| content += "highTF "; |
| } |
| - doc.add(newField(random, "FIELD_1", content, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); |
| + doc.add(newField(random, "FIELD_1", content, customType)); |
| writer.addDocument(doc); |
| // highTF medium df =5 |
| int medium_df = 5; |
| @@ -235,7 +239,7 @@
|
| for (int j = 0; j < tf; j++) { |
| newcontent += "highTFmedDF "; |
| } |
| - newdoc.add(newField(random, "FIELD_1", newcontent, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); |
| + newdoc.add(newField(random, "FIELD_1", newcontent, customType)); |
| writer.addDocument(newdoc); |
| } |
| // add a doc with high tf in field different_field |
| @@ -245,7 +249,7 @@
|
| for (int i = 0; i < targetTF; i++) { |
| content += "TF150 "; |
| } |
| - doc.add(newField(random, "different_field", content, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); |
| + doc.add(newField(random, "different_field", content, customType)); |
| writer.addDocument(doc); |
| writer.close(); |
| |
| Index: lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
|
| ===================================================================
|
| --- lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (revision 1143083)
|
| +++ lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (working copy)
|
| @@ -20,8 +20,10 @@
|
| import java.io.IOException; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.FieldInvertState; |
| import org.apache.lucene.index.FieldNormModifier; |
| import org.apache.lucene.index.IndexReader; |
| @@ -70,16 +72,17 @@
|
| |
| for (int i = 0; i < NUM_DOCS; i++) { |
| Document d = new Document(); |
| - d.add(newField("field", "word", |
| - Field.Store.YES, Field.Index.ANALYZED)); |
| - d.add(newField("nonorm", "word", |
| - Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + d.add(newField("field", "word", customType)); |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + customType2.setOmitNorms(true); |
| + d.add(newField("nonorm", "word", customType2)); |
| |
| for (int j = 1; j <= i; j++) { |
| - d.add(newField("field", "crap", |
| - Field.Store.YES, Field.Index.ANALYZED)); |
| - d.add(newField("nonorm", "more words", |
| - Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); |
| + d.add(newField("field", "crap", customType)); |
| + d.add(newField("nonorm", "more words", customType2)); |
| } |
| writer.addDocument(d); |
| } |
| Index: lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java
|
| ===================================================================
|
| --- lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (revision 1143083)
|
| +++ lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (working copy)
|
| @@ -21,8 +21,9 @@
|
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| import org.apache.lucene.analysis.MockTokenizer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| import org.apache.lucene.index.RandomIndexWriter; |
| @@ -61,10 +62,12 @@
|
| private void addDoc(RandomIndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException |
| { |
| Document doc=new Document(); |
| - doc.add(newField("accessRights",accessRights,Field.Store.YES,Field.Index.ANALYZED)); |
| - doc.add(newField("price",price,Field.Store.YES,Field.Index.ANALYZED)); |
| - doc.add(newField("date",date,Field.Store.YES,Field.Index.ANALYZED)); |
| - doc.add(newField("inStock",inStock,Field.Store.YES,Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(newField("accessRights",accessRights,customType)); |
| + doc.add(newField("price",price,customType)); |
| + doc.add(newField("date",date,customType)); |
| + doc.add(newField("inStock",inStock,customType)); |
| writer.addDocument(doc); |
| } |
| |
| Index: lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java
|
| ===================================================================
|
| --- lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (revision 1143083)
|
| +++ lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (working copy)
|
| @@ -20,8 +20,9 @@
|
| import java.util.Calendar; |
| import java.util.GregorianCalendar; |
| |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.RandomIndexWriter; |
| import org.apache.lucene.index.Term; |
| @@ -62,9 +63,12 @@
|
| |
| for (int i = 0; i < MAX; i++) { |
| Document doc = new Document(); |
| - doc.add(newField("key", "" + (i + 1), Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - doc.add(newField("owner", (i < MAX / 2) ? "bob" : "sue", Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - doc.add(newField("date", cal.getTime().toString(), Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + doc.add(newField("key", "" + (i + 1), customType)); |
| + doc.add(newField("owner", (i < MAX / 2) ? "bob" : "sue", customType)); |
| + doc.add(newField("date", cal.getTime().toString(), customType)); |
| writer.addDocument(doc); |
| |
| cal.add(Calendar.DATE, 1); |
| Index: lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
|
| ===================================================================
|
| --- lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (revision 1143083)
|
| +++ lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (working copy)
|
| @@ -21,8 +21,9 @@
|
| import java.util.HashSet; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.DocsEnum; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.MultiFields; |
| @@ -76,9 +77,12 @@
|
| private void addDoc(RandomIndexWriter writer, String url, String text, String date) throws IOException |
| { |
| Document doc=new Document(); |
| - doc.add(newField(KEY_FIELD,url,Field.Store.YES,Field.Index.NOT_ANALYZED)); |
| - doc.add(newField("text",text,Field.Store.YES,Field.Index.ANALYZED)); |
| - doc.add(newField("date",date,Field.Store.YES,Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + doc.add(newField(KEY_FIELD,url,customType)); |
| + doc.add(newField("text",text,TextField.DEFAULT_TYPE)); |
| + doc.add(newField("date",date,TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| } |
| |
| @@ -89,7 +93,7 @@
|
| ScoreDoc[] hits = searcher.search(tq,df, 1000).scoreDocs; |
| for(int i=0;i<hits.length;i++) |
| { |
| - Document d=searcher.doc(hits[i].doc); |
| + org.apache.lucene.document.Document d=searcher.doc(hits[i].doc); |
| String url=d.get(KEY_FIELD); |
| assertFalse("No duplicate urls should be returned",results.contains(url)); |
| results.add(url); |
| @@ -103,7 +107,7 @@
|
| boolean dupsFound=false; |
| for(int i=0;i<hits.length;i++) |
| { |
| - Document d=searcher.doc(hits[i].doc); |
| + org.apache.lucene.document.Document d=searcher.doc(hits[i].doc); |
| String url=d.get(KEY_FIELD); |
| if(!dupsFound) |
| dupsFound=results.contains(url); |
| @@ -121,7 +125,7 @@
|
| assertTrue("Filtered searching should have found some matches",hits.length>0); |
| for(int i=0;i<hits.length;i++) |
| { |
| - Document d=searcher.doc(hits[i].doc); |
| + org.apache.lucene.document.Document d=searcher.doc(hits[i].doc); |
| String url=d.get(KEY_FIELD); |
| assertFalse("No duplicate urls should be returned",results.contains(url)); |
| results.add(url); |
| @@ -136,7 +140,7 @@
|
| assertTrue("Filtered searching should have found some matches",hits.length>0); |
| for(int i=0;i<hits.length;i++) |
| { |
| - Document d=searcher.doc(hits[i].doc); |
| + org.apache.lucene.document.Document d=searcher.doc(hits[i].doc); |
| String url=d.get(KEY_FIELD); |
| DocsEnum td = MultiFields.getTermDocsEnum(reader, |
| MultiFields.getDeletedDocs(reader), |
| @@ -160,7 +164,7 @@
|
| assertTrue("Filtered searching should have found some matches",hits.length>0); |
| for(int i=0;i<hits.length;i++) |
| { |
| - Document d=searcher.doc(hits[i].doc); |
| + org.apache.lucene.document.Document d=searcher.doc(hits[i].doc); |
| String url=d.get(KEY_FIELD); |
| DocsEnum td = MultiFields.getTermDocsEnum(reader, |
| MultiFields.getDeletedDocs(reader), |
| Index: lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
|
| ===================================================================
|
| --- lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java (revision 1143083)
|
| +++ lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java (working copy)
|
| @@ -22,8 +22,9 @@
|
| |
| import org.apache.lucene.analysis.Analyzer; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.RandomIndexWriter; |
| import org.apache.lucene.index.Term; |
| @@ -65,8 +66,10 @@
|
| private void addDoc(RandomIndexWriter writer, String name, String id) throws IOException |
| { |
| Document doc=new Document(); |
| - doc.add(newField("name",name,Field.Store.YES,Field.Index.ANALYZED)); |
| - doc.add(newField("id",id,Field.Store.YES,Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(newField("name",name,customType)); |
| + doc.add(newField("id",id,customType)); |
| writer.addDocument(doc); |
| } |
| |
| @@ -85,7 +88,7 @@
|
| TopDocs topDocs = searcher.search(flt, 1); |
| ScoreDoc[] sd = topDocs.scoreDocs; |
| assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0)); |
| - Document doc=searcher.doc(sd[0].doc); |
| + org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); |
| assertEquals("Should match most similar not most rare variant", "2",doc.get("id")); |
| } |
| //Test multiple input words are having variants produced |
| @@ -101,7 +104,7 @@
|
| TopDocs topDocs = searcher.search(flt, 1); |
| ScoreDoc[] sd = topDocs.scoreDocs; |
| assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0)); |
| - Document doc=searcher.doc(sd[0].doc); |
| + org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); |
| assertEquals("Should match most similar when using 2 words", "2",doc.get("id")); |
| } |
| //Test bug found when first query word does not match anything |
| @@ -116,7 +119,7 @@
|
| TopDocs topDocs = searcher.search(flt, 1); |
| ScoreDoc[] sd = topDocs.scoreDocs; |
| assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0)); |
| - Document doc=searcher.doc(sd[0].doc); |
| + org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); |
| assertEquals("Should match most similar when using 2 words", "2",doc.get("id")); |
| } |
| |
| Index: lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java
|
| ===================================================================
|
| --- lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (revision 1143083)
|
| +++ lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (working copy)
|
| @@ -18,8 +18,9 @@
|
| */ |
| |
| import java.util.HashSet; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| import org.apache.lucene.index.RandomIndexWriter; |
| @@ -56,7 +57,10 @@
|
| for (int i = 0; i < 100; i++) { |
| Document doc=new Document(); |
| int term=i*10; //terms are units of 10; |
| - doc.add(newField(fieldName,""+term,Field.Store.YES,Field.Index.NOT_ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + doc.add(newField(fieldName,""+term,customType)); |
| w.addDocument(doc); |
| } |
| IndexReader reader = new SlowMultiReaderWrapper(w.getReader()); |
| Index: lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java
|
| ===================================================================
|
| --- lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java (revision 1143083)
|
| +++ lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java (working copy)
|
| @@ -4,8 +4,10 @@
|
| import java.text.Collator; |
| import java.util.Locale; |
| |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.RandomIndexWriter; |
| import org.apache.lucene.search.BooleanClause.Occur; |
| @@ -55,7 +57,11 @@
|
| for (int i = 0; i < numDocs; i++) { |
| Document doc = new Document(); |
| String value = _TestUtil.randomUnicodeString(random); |
| - Field field = newField("field", value, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setOmitNorms(true); |
| + customType.setTokenized(false); |
| + Field field = newField("field", value, customType); |
| doc.add(field); |
| iw.addDocument(doc); |
| } |
| Index: lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
|
| ===================================================================
|
| --- lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (revision 1143083)
|
| +++ lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (working copy)
|
| @@ -24,8 +24,9 @@
|
| import org.apache.lucene.index.RandomIndexWriter; |
| import org.apache.lucene.index.Term; |
| import org.apache.lucene.index.Terms; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.search.IndexSearcher; |
| import org.apache.lucene.index.TermsEnum; |
| |
| @@ -47,7 +48,7 @@
|
| directory = newDirectory(); |
| RandomIndexWriter writer = new RandomIndexWriter(random, directory); |
| Document doc = new Document(); |
| - doc.add(newField(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField(FN, "the quick brown fox jumps over the lazy dog", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| reader = writer.getReader(); |
| writer.close(); |
| Index: lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
|
| ===================================================================
|
| --- lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (revision 1143083)
|
| +++ lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (working copy)
|
| @@ -20,8 +20,10 @@
|
| import java.io.IOException; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.CorruptIndexException; |
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.index.Term; |
| @@ -62,12 +64,10 @@
|
| // Field.Store.NO, Field.Index.ANALYZED)); |
| // writer.addDocument(doc); |
| // doc = new Document(); |
| - doc.add(newField("field", "auto update", Field.Store.NO, |
| - Field.Index.ANALYZED)); |
| + doc.add(newField("field", "auto update", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| doc = new Document(); |
| - doc.add(newField("field", "first auto update", Field.Store.NO, |
| - Field.Index.ANALYZED)); |
| + doc.add(newField("field", "first auto update", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| writer.optimize(); |
| writer.close(); |
| @@ -87,13 +87,13 @@
|
| LockObtainFailedException, IOException { |
| // creating a document to store |
| Document lDoc = new Document(); |
| - lDoc.add(newField("field", "a1 b1", Field.Store.NO, |
| - Field.Index.ANALYZED_NO_NORMS)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setOmitNorms(true); |
| + lDoc.add(newField("field", "a1 b1", customType)); |
| |
| // creating a document to store |
| Document lDoc2 = new Document(); |
| - lDoc2.add(newField("field", "a2 b2", Field.Store.NO, |
| - Field.Index.ANALYZED_NO_NORMS)); |
| + lDoc2.add(newField("field", "a2 b2", customType)); |
| |
| // creating first index writer |
| IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig( |
| Index: lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
|
| ===================================================================
|
| --- lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (revision 1143083)
|
| +++ lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (working copy)
|
| @@ -25,8 +25,10 @@
|
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| import org.apache.lucene.analysis.MockTokenizer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.RandomIndexWriter; |
| import org.apache.lucene.search.BooleanClause; |
| @@ -66,7 +68,9 @@
|
| |
| private void addDoc(RandomIndexWriter writer, String text) throws IOException { |
| Document doc = new Document(); |
| - doc.add(newField("text", text, Field.Store.YES, Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(newField("text", text, customType)); |
| writer.addDocument(doc); |
| } |
| |
| Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java
|
| ===================================================================
|
| --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (revision 1143083)
|
| +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (working copy)
|
| @@ -21,8 +21,9 @@
|
| |
| import org.apache.lucene.analysis.Analyzer; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.queryParser.QueryParser; |
| import org.apache.lucene.search.IndexSearcher; |
| @@ -97,7 +98,7 @@
|
| TopDocs td = searcher.search(q, 10); |
| ScoreDoc[] sd = td.scoreDocs; |
| for (int i = 0; i < sd.length; i++) { |
| - Document doc = searcher.doc(sd[i].doc); |
| + org.apache.lucene.document.Document doc = searcher.doc(sd[i].doc); |
| String id = doc.get("id"); |
| assertTrue(qString + "matched doc#" + id + " not expected", expecteds |
| .contains(id)); |
| @@ -113,12 +114,12 @@
|
| super.setUp(); |
| rd = newDirectory(); |
| IndexWriter w = new IndexWriter(rd, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| for (int i = 0; i < docsContent.length; i++) { |
| Document doc = new Document(); |
| - doc.add(newField("name", docsContent[i].name, Field.Store.YES, |
| - Field.Index.ANALYZED)); |
| - doc.add(newField("id", docsContent[i].id, Field.Store.YES, |
| - Field.Index.ANALYZED)); |
| + doc.add(newField("name", docsContent[i].name, customType)); |
| + doc.add(newField("id", docsContent[i].id, customType)); |
| w.addDocument(doc); |
| } |
| w.close(); |
| Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java
|
| ===================================================================
|
| --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java (revision 1143083)
|
| +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java (working copy)
|
| @@ -24,8 +24,8 @@
|
| import org.apache.lucene.analysis.Analyzer; |
| import org.apache.lucene.analysis.TokenStream; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.queryParser.core.QueryNodeException; |
| import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator; |
| @@ -320,8 +320,7 @@
|
| Directory ramDir = newDirectory(); |
| IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); |
| Document doc = new Document(); |
| - doc.add(newField("body", "blah the footest blah", Field.Store.NO, |
| - Field.Index.ANALYZED)); |
| + doc.add(newField("body", "blah the footest blah", TextField.DEFAULT_TYPE)); |
| iw.addDocument(doc); |
| iw.close(); |
| |
| Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java
|
| ===================================================================
|
| --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (revision 1143083)
|
| +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (working copy)
|
| @@ -40,8 +40,8 @@
|
| import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; |
| import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; |
| import org.apache.lucene.document.DateTools; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.Term; |
| @@ -1226,7 +1226,7 @@
|
| Directory dir = newDirectory(); |
| IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new CannedAnalyzer())); |
| Document doc = new Document(); |
| - doc.add(newField("field", "", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("field", "", TextField.DEFAULT_TYPE)); |
| w.addDocument(doc); |
| IndexReader r = IndexReader.open(w, true); |
| IndexSearcher s = newSearcher(r); |
| Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java
|
| ===================================================================
|
| --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java (revision 1143083)
|
| +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java (working copy)
|
| @@ -24,8 +24,9 @@
|
| import org.apache.lucene.store.RAMDirectory; |
| import org.apache.lucene.util.Version; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.index.IndexWriterConfig; |
| |
| @@ -44,7 +45,7 @@
|
| new MockAnalyzer(random))); |
| for (int j = 0; j < docs.length; j++) { |
| Document d = new Document(); |
| - d.add(new Field(fieldName, docs[j], Field.Store.NO, Field.Index.ANALYZED)); |
| + d.add(new Field(fieldName, TextField.DEFAULT_TYPE, docs[j])); |
| writer.addDocument(d); |
| } |
| writer.close(); |
| Index: lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
|
| ===================================================================
|
| --- lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (revision 1143083)
|
| +++ lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (working copy)
|
| @@ -22,9 +22,11 @@
|
| import java.util.Map; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.NumericField; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.NumericField; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.Term; |
| @@ -97,26 +99,30 @@
|
| private void addPoint(IndexWriter writer, String name, double lat, double lng) throws IOException{ |
| |
| Document doc = new Document(); |
| + |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(newField("name", name, customType)); |
| |
| - doc.add(newField("name", name,Field.Store.YES, Field.Index.ANALYZED)); |
| - |
| // convert the lat / long to lucene fields |
| - doc.add(new NumericField(latField, Integer.MAX_VALUE, Field.Store.YES, true).setDoubleValue(lat)); |
| - doc.add(new NumericField(lngField, Integer.MAX_VALUE, Field.Store.YES, true).setDoubleValue(lng)); |
| + FieldType customType2 = new FieldType(NumericField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + doc.add(new NumericField(latField, Integer.MAX_VALUE, customType2).setDoubleValue(lat)); |
| + doc.add(new NumericField(lngField, Integer.MAX_VALUE, customType2).setDoubleValue(lng)); |
| |
| // add a default meta field to make searching all documents easy |
| - doc.add(newField("metafile", "doc",Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(newField("metafile", "doc", customType)); |
| |
| int ctpsize = ctps.size(); |
| + FieldType customType3 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType3.setStored(true); |
| + customType3.setTokenized(false); |
| + customType3.setOmitNorms(true); |
| for (int i =0; i < ctpsize; i++){ |
| CartesianTierPlotter ctp = ctps.get(i); |
| - doc.add(new NumericField(ctp.getTierFieldName(), Integer.MAX_VALUE, |
| - Field.Store.YES, |
| - true).setDoubleValue(ctp.getTierBoxId(lat,lng))); |
| + doc.add(new NumericField(ctp.getTierFieldName(), Integer.MAX_VALUE, customType).setDoubleValue(ctp.getTierBoxId(lat,lng))); |
| |
| - doc.add(newField(geoHashPrefix, GeoHashUtils.encode(lat,lng), |
| - Field.Store.YES, |
| - Field.Index.NOT_ANALYZED_NO_NORMS)); |
| + doc.add(newField(geoHashPrefix, GeoHashUtils.encode(lat,lng), customType3)); |
| } |
| writer.addDocument(doc); |
| |
| @@ -278,7 +284,7 @@
|
| assertEquals(2, results); |
| double lastDistance = 0; |
| for(int i =0 ; i < results; i++){ |
| - Document d = searcher.doc(scoreDocs[i].doc); |
| + org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); |
| |
| String name = d.get("name"); |
| double rsLat = Double.parseDouble(d.get(latField)); |
| @@ -374,7 +380,7 @@
|
| assertEquals(18, results); |
| double lastDistance = 0; |
| for(int i =0 ; i < results; i++){ |
| - Document d = searcher.doc(scoreDocs[i].doc); |
| + org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); |
| String name = d.get("name"); |
| double rsLat = Double.parseDouble(d.get(latField)); |
| double rsLng = Double.parseDouble(d.get(lngField)); |
| @@ -469,7 +475,7 @@
|
| assertEquals(expected[x], results); |
| double lastDistance = 0; |
| for(int i =0 ; i < results; i++){ |
| - Document d = searcher.doc(scoreDocs[i].doc); |
| + org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); |
| |
| String name = d.get("name"); |
| double rsLat = Double.parseDouble(d.get(latField)); |
| @@ -564,7 +570,7 @@
|
| assertEquals(expected[x], results); |
| |
| for(int i =0 ; i < results; i++){ |
| - Document d = searcher.doc(scoreDocs[i].doc); |
| + org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); |
| |
| String name = d.get("name"); |
| double rsLat = Double.parseDouble(d.get(latField)); |
| Index: lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java
|
| ===================================================================
|
| --- lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java (revision 1143083)
|
| +++ lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java (working copy)
|
| @@ -19,9 +19,11 @@
|
| import java.io.IOException; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.NumericField; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.NumericField; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.index.Term; |
| @@ -62,15 +64,19 @@
|
| private void addPoint(IndexWriter writer, String name, double lat, double lng) throws IOException{ |
| |
| Document doc = new Document(); |
| + |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(newField("name", name, customType)); |
| |
| - doc.add(newField("name", name,Field.Store.YES, Field.Index.ANALYZED)); |
| - |
| // convert the lat / long to lucene fields |
| - doc.add(new NumericField(latField, Integer.MAX_VALUE, Field.Store.YES, true).setDoubleValue(lat)); |
| - doc.add(new NumericField(lngField, Integer.MAX_VALUE,Field.Store.YES, true).setDoubleValue(lng)); |
| + FieldType customType2 = new FieldType(NumericField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(new NumericField(latField, Integer.MAX_VALUE, customType2).setDoubleValue(lat)); |
| + doc.add(new NumericField(lngField, Integer.MAX_VALUE, customType2).setDoubleValue(lng)); |
| |
| // add a default meta field to make searching all documents easy |
| - doc.add(newField("metafile", "doc",Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(newField("metafile", "doc", customType)); |
| writer.addDocument(doc); |
| |
| } |
| Index: lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java
|
| ===================================================================
|
| --- lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (revision 1143083)
|
| +++ lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (working copy)
|
| @@ -9,8 +9,10 @@
|
| import org.apache.lucene.analysis.MockAnalyzer; |
| import org.apache.lucene.analysis.MockTokenFilter; |
| import org.apache.lucene.analysis.MockTokenizer; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.NumericField; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.NumericField; |
| +import org.apache.lucene.document2.TextField; |
| +import org.apache.lucene.document2.Document; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.search.IndexSearcher; |
| @@ -41,200 +43,202 @@
|
| */ |
| |
| public class TestParser extends LuceneTestCase { |
| - private static CoreParser builder; |
| - private static Directory dir; |
| - private static IndexReader reader; |
| - private static IndexSearcher searcher; |
| + private static CoreParser builder; |
| + private static Directory dir; |
| + private static IndexReader reader; |
| + private static IndexSearcher searcher; |
| |
| - @BeforeClass |
| - public static void beforeClass() throws Exception { |
| - // TODO: rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work with CURRENT): |
| - Analyzer analyzer=new MockAnalyzer(random, MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false); |
| + @BeforeClass |
| + public static void beforeClass() throws Exception { |
| + // TODO: rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work with CURRENT): |
| + Analyzer analyzer=new MockAnalyzer(random, MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false); |
| //initialize the parser |
| - builder=new CorePlusExtensionsParser("contents",analyzer); |
| - |
| - BufferedReader d = new BufferedReader(new InputStreamReader(TestParser.class.getResourceAsStream("reuters21578.txt"))); |
| - dir=newDirectory(); |
| - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(Version.LUCENE_40, analyzer)); |
| - String line = d.readLine(); |
| - while(line!=null) |
| - { |
| - int endOfDate=line.indexOf('\t'); |
| - String date=line.substring(0,endOfDate).trim(); |
| - String content=line.substring(endOfDate).trim(); |
| - org.apache.lucene.document.Document doc =new org.apache.lucene.document.Document(); |
| - doc.add(newField("date",date,Field.Store.YES,Field.Index.ANALYZED)); |
| - doc.add(newField("contents",content,Field.Store.YES,Field.Index.ANALYZED)); |
| - NumericField numericField = new NumericField("date2"); |
| - numericField.setIntValue(Integer.valueOf(date)); |
| - doc.add(numericField); |
| - writer.addDocument(doc); |
| - line=d.readLine(); |
| - } |
| - d.close(); |
| + builder=new CorePlusExtensionsParser("contents",analyzer); |
| + |
| + BufferedReader d = new BufferedReader(new InputStreamReader(TestParser.class.getResourceAsStream("reuters21578.txt"))); |
| + dir=newDirectory(); |
| + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(Version.LUCENE_40, analyzer)); |
| + String line = d.readLine(); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + while(line!=null) |
| + { |
| + int endOfDate=line.indexOf('\t'); |
| + String date=line.substring(0,endOfDate).trim(); |
| + String content=line.substring(endOfDate).trim(); |
| + Document doc = new Document(); |
| + doc.add(newField("date",date,customType)); |
| + doc.add(newField("contents",content,customType)); |
| + NumericField numericField = new NumericField("date2"); |
| + numericField.setIntValue(Integer.valueOf(date)); |
| + doc.add(numericField); |
| + writer.addDocument(doc); |
| + line=d.readLine(); |
| + } |
| + d.close(); |
| writer.close(); |
| - reader=IndexReader.open(dir, true); |
| - searcher=newSearcher(reader); |
| - |
| - } |
| - |
| - |
| - |
| - |
| - @AfterClass |
| - public static void afterClass() throws Exception { |
| - reader.close(); |
| - searcher.close(); |
| - dir.close(); |
| - reader = null; |
| - searcher = null; |
| - dir = null; |
| - builder = null; |
| - } |
| - |
| - public void testSimpleXML() throws ParserException, IOException |
| - { |
| - Query q=parse("TermQuery.xml"); |
| - dumpResults("TermQuery", q, 5); |
| - } |
| - public void testSimpleTermsQueryXML() throws ParserException, IOException |
| - { |
| - Query q=parse("TermsQuery.xml"); |
| - dumpResults("TermsQuery", q, 5); |
| - } |
| - public void testBooleanQueryXML() throws ParserException, IOException |
| - { |
| - Query q=parse("BooleanQuery.xml"); |
| - dumpResults("BooleanQuery", q, 5); |
| - } |
| - public void testRangeFilterQueryXML() throws ParserException, IOException |
| - { |
| - Query q=parse("RangeFilterQuery.xml"); |
| - dumpResults("RangeFilter", q, 5); |
| - } |
| - public void testUserQueryXML() throws ParserException, IOException |
| - { |
| - Query q=parse("UserInputQuery.xml"); |
| - dumpResults("UserInput with Filter", q, 5); |
| - } |
| - |
| - public void testCustomFieldUserQueryXML() throws ParserException, IOException |
| - { |
| - Query q=parse("UserInputQueryCustomField.xml"); |
| - int h = searcher.search(q, null, 1000).totalHits; |
| - assertEquals("UserInputQueryCustomField should produce 0 result ", 0,h); |
| - } |
| - |
| - public void testLikeThisQueryXML() throws Exception |
| - { |
| - Query q=parse("LikeThisQuery.xml"); |
| - dumpResults("like this", q, 5); |
| - } |
| - public void testBoostingQueryXML() throws Exception |
| - { |
| - Query q=parse("BoostingQuery.xml"); |
| - dumpResults("boosting ",q, 5); |
| - } |
| - public void testFuzzyLikeThisQueryXML() throws Exception |
| - { |
| - Query q=parse("FuzzyLikeThisQuery.xml"); |
| - //show rewritten fuzzyLikeThisQuery - see what is being matched on |
| - if(VERBOSE) |
| - { |
| - System.out.println(q.rewrite(reader)); |
| - } |
| - dumpResults("FuzzyLikeThis", q, 5); |
| - } |
| - public void testTermsFilterXML() throws Exception |
| - { |
| - Query q=parse("TermsFilterQuery.xml"); |
| - dumpResults("Terms Filter",q, 5); |
| - } |
| + reader=IndexReader.open(dir, true); |
| + searcher=newSearcher(reader); |
| + |
| + } |
| + |
| + |
| + |
| + |
| + @AfterClass |
| + public static void afterClass() throws Exception { |
| + reader.close(); |
| + searcher.close(); |
| + dir.close(); |
| + reader = null; |
| + searcher = null; |
| + dir = null; |
| + builder = null; |
| + } |
| + |
| + public void testSimpleXML() throws ParserException, IOException |
| + { |
| + Query q=parse("TermQuery.xml"); |
| + dumpResults("TermQuery", q, 5); |
| + } |
| + public void testSimpleTermsQueryXML() throws ParserException, IOException |
| + { |
| + Query q=parse("TermsQuery.xml"); |
| + dumpResults("TermsQuery", q, 5); |
| + } |
| + public void testBooleanQueryXML() throws ParserException, IOException |
| + { |
| + Query q=parse("BooleanQuery.xml"); |
| + dumpResults("BooleanQuery", q, 5); |
| + } |
| + public void testRangeFilterQueryXML() throws ParserException, IOException |
| + { |
| + Query q=parse("RangeFilterQuery.xml"); |
| + dumpResults("RangeFilter", q, 5); |
| + } |
| + public void testUserQueryXML() throws ParserException, IOException |
| + { |
| + Query q=parse("UserInputQuery.xml"); |
| + dumpResults("UserInput with Filter", q, 5); |
| + } |
| + |
| + public void testCustomFieldUserQueryXML() throws ParserException, IOException |
| + { |
| + Query q=parse("UserInputQueryCustomField.xml"); |
| + int h = searcher.search(q, null, 1000).totalHits; |
| + assertEquals("UserInputQueryCustomField should produce 0 result ", 0,h); |
| + } |
| + |
| + public void testLikeThisQueryXML() throws Exception |
| + { |
| + Query q=parse("LikeThisQuery.xml"); |
| + dumpResults("like this", q, 5); |
| + } |
| + public void testBoostingQueryXML() throws Exception |
| + { |
| + Query q=parse("BoostingQuery.xml"); |
| + dumpResults("boosting ",q, 5); |
| + } |
| + public void testFuzzyLikeThisQueryXML() throws Exception |
| + { |
| + Query q=parse("FuzzyLikeThisQuery.xml"); |
| + //show rewritten fuzzyLikeThisQuery - see what is being matched on |
| + if(VERBOSE) |
| + { |
| + System.out.println(q.rewrite(reader)); |
| + } |
| + dumpResults("FuzzyLikeThis", q, 5); |
| + } |
| + public void testTermsFilterXML() throws Exception |
| + { |
| + Query q=parse("TermsFilterQuery.xml"); |
| + dumpResults("Terms Filter",q, 5); |
| + } |
| public void testBoostingTermQueryXML() throws Exception |
| - { |
| - Query q=parse("BoostingTermQuery.xml"); |
| - dumpResults("BoostingTermQuery",q, 5); |
| - } |
| + { |
| + Query q=parse("BoostingTermQuery.xml"); |
| + dumpResults("BoostingTermQuery",q, 5); |
| + } |
| public void testSpanTermXML() throws Exception |
| - { |
| - Query q=parse("SpanQuery.xml"); |
| - dumpResults("Span Query",q, 5); |
| - } |
| - public void testConstantScoreQueryXML() throws Exception |
| - { |
| - Query q=parse("ConstantScoreQuery.xml"); |
| - dumpResults("ConstantScoreQuery",q, 5); |
| - } |
| - public void testMatchAllDocsPlusFilterXML() throws ParserException, IOException |
| - { |
| - Query q=parse("MatchAllDocsQuery.xml"); |
| - dumpResults("MatchAllDocsQuery with range filter", q, 5); |
| - } |
| - public void testBooleanFilterXML() throws ParserException, IOException |
| - { |
| - Query q=parse("BooleanFilter.xml"); |
| - dumpResults("Boolean filter", q, 5); |
| - } |
| - public void testNestedBooleanQuery() throws ParserException, IOException |
| - { |
| - Query q=parse("NestedBooleanQuery.xml"); |
| - dumpResults("Nested Boolean query", q, 5); |
| - } |
| - public void testCachedFilterXML() throws ParserException, IOException |
| - { |
| - Query q=parse("CachedFilter.xml"); |
| - dumpResults("Cached filter", q, 5); |
| - } |
| - public void testDuplicateFilterQueryXML() throws ParserException, IOException |
| - { |
| + { |
| + Query q=parse("SpanQuery.xml"); |
| + dumpResults("Span Query",q, 5); |
| + } |
| + public void testConstantScoreQueryXML() throws Exception |
| + { |
| + Query q=parse("ConstantScoreQuery.xml"); |
| + dumpResults("ConstantScoreQuery",q, 5); |
| + } |
| + public void testMatchAllDocsPlusFilterXML() throws ParserException, IOException |
| + { |
| + Query q=parse("MatchAllDocsQuery.xml"); |
| + dumpResults("MatchAllDocsQuery with range filter", q, 5); |
| + } |
| + public void testBooleanFilterXML() throws ParserException, IOException |
| + { |
| + Query q=parse("BooleanFilter.xml"); |
| + dumpResults("Boolean filter", q, 5); |
| + } |
| + public void testNestedBooleanQuery() throws ParserException, IOException |
| + { |
| + Query q=parse("NestedBooleanQuery.xml"); |
| + dumpResults("Nested Boolean query", q, 5); |
| + } |
| + public void testCachedFilterXML() throws ParserException, IOException |
| + { |
| + Query q=parse("CachedFilter.xml"); |
| + dumpResults("Cached filter", q, 5); |
| + } |
| + public void testDuplicateFilterQueryXML() throws ParserException, IOException |
| + { |
| Assume.assumeTrue(searcher.getIndexReader().getSequentialSubReaders() == null || |
| searcher.getIndexReader().getSequentialSubReaders().length == 1); |
| - Query q=parse("DuplicateFilterQuery.xml"); |
| - int h = searcher.search(q, null, 1000).totalHits; |
| - assertEquals("DuplicateFilterQuery should produce 1 result ", 1,h); |
| - } |
| - |
| - public void testNumericRangeFilterQueryXML() throws ParserException, IOException |
| - { |
| - Query q=parse("NumericRangeFilterQuery.xml"); |
| - dumpResults("NumericRangeFilter", q, 5); |
| - } |
| - |
| - public void testNumericRangeQueryQueryXML() throws ParserException, IOException |
| - { |
| - Query q=parse("NumericRangeQueryQuery.xml"); |
| - dumpResults("NumericRangeQuery", q, 5); |
| - } |
| - |
| + Query q=parse("DuplicateFilterQuery.xml"); |
| + int h = searcher.search(q, null, 1000).totalHits; |
| + assertEquals("DuplicateFilterQuery should produce 1 result ", 1,h); |
| + } |
| + |
| + public void testNumericRangeFilterQueryXML() throws ParserException, IOException |
| + { |
| + Query q=parse("NumericRangeFilterQuery.xml"); |
| + dumpResults("NumericRangeFilter", q, 5); |
| + } |
| + |
| + public void testNumericRangeQueryQueryXML() throws ParserException, IOException |
| + { |
| + Query q=parse("NumericRangeQueryQuery.xml"); |
| + dumpResults("NumericRangeQuery", q, 5); |
| + } |
| + |
| |
| |
| - //================= Helper methods =================================== |
| - private Query parse(String xmlFileName) throws ParserException, IOException |
| - { |
| - InputStream xmlStream=TestParser.class.getResourceAsStream(xmlFileName); |
| - Query result=builder.parse(xmlStream); |
| - xmlStream.close(); |
| - return result; |
| - } |
| - private void dumpResults(String qType,Query q, int numDocs) throws IOException |
| - { |
| + //================= Helper methods =================================== |
| + private Query parse(String xmlFileName) throws ParserException, IOException |
| + { |
| + InputStream xmlStream=TestParser.class.getResourceAsStream(xmlFileName); |
| + Query result=builder.parse(xmlStream); |
| + xmlStream.close(); |
| + return result; |
| + } |
| + private void dumpResults(String qType,Query q, int numDocs) throws IOException |
| + { |
| if (VERBOSE) { |
| System.out.println("TEST: query=" + q); |
| } |
| TopDocs hits = searcher.search(q, null, numDocs); |
| - assertTrue(qType +" should produce results ", hits.totalHits>0); |
| - if(VERBOSE) |
| - { |
| - System.out.println("========="+qType+"============"); |
| - ScoreDoc[] scoreDocs = hits.scoreDocs; |
| - for(int i=0;i<Math.min(numDocs,hits.totalHits);i++) |
| - { |
| - org.apache.lucene.document.Document ldoc=searcher.doc(scoreDocs[i].doc); |
| - System.out.println("["+ldoc.get("date")+"]"+ldoc.get("contents")); |
| - } |
| - System.out.println(); |
| - } |
| - } |
| - |
| + assertTrue(qType +" should produce results ", hits.totalHits>0); |
| + if(VERBOSE) |
| + { |
| + System.out.println("========="+qType+"============"); |
| + ScoreDoc[] scoreDocs = hits.scoreDocs; |
| + for(int i=0;i<Math.min(numDocs,hits.totalHits);i++) |
| + { |
| + org.apache.lucene.document.Document ldoc=searcher.doc(scoreDocs[i].doc); |
| + System.out.println("["+ldoc.get("date")+"]"+ldoc.get("contents")); |
| + } |
| + System.out.println(); |
| + } |
| + } |
| + |
| |
| } |
| Index: lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java
|
| ===================================================================
|
| --- lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java (revision 1143083)
|
| +++ lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java (working copy)
|
| @@ -10,7 +10,9 @@
|
| |
| import org.apache.lucene.analysis.Analyzer; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriter; |
| import org.apache.lucene.search.IndexSearcher; |
| import org.apache.lucene.search.Query; |
| @@ -124,17 +126,19 @@
|
| } |
| |
| //Helper method to construct Lucene documents used in our tests |
| - org.apache.lucene.document.Document getDocumentFromString(String nameValuePairs) |
| + org.apache.lucene.document2.Document getDocumentFromString(String nameValuePairs) |
| { |
| - org.apache.lucene.document.Document result=new org.apache.lucene.document.Document(); |
| + org.apache.lucene.document2.Document result=new org.apache.lucene.document2.Document(); |
| StringTokenizer st=new StringTokenizer(nameValuePairs,"\t="); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| while(st.hasMoreTokens()) |
| { |
| String name=st.nextToken().trim(); |
| if(st.hasMoreTokens()) |
| { |
| String value=st.nextToken().trim(); |
| - result.add(newField(name,value,Field.Store.YES,Field.Index.ANALYZED)); |
| + result.add(newField(name,value,customType)); |
| } |
| } |
| return result; |
| Index: lucene/src/java/org/apache/lucene/document2/BinaryField.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/document2/BinaryField.java (revision 1143083)
|
| +++ lucene/src/java/org/apache/lucene/document2/BinaryField.java (working copy)
|
| @@ -24,11 +24,16 @@
|
| DEFAULT_TYPE.setStored(true); |
| DEFAULT_TYPE.freeze(); |
| } |
| - |
| + |
| public BinaryField(String name, byte[] value) { |
| super(name, BinaryField.DEFAULT_TYPE, value); |
| this.isBinary = true; |
| } |
| + |
| + public BinaryField(String name, byte[] value, int offset, int length) { |
| + super(name, BinaryField.DEFAULT_TYPE, value, offset, length); |
| + this.isBinary = true; |
| + } |
| |
| public boolean isNumeric() { |
| return false; |
| Index: lucene/src/java/org/apache/lucene/document2/DateTools.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/document2/DateTools.java (revision 0)
|
| +++ lucene/src/java/org/apache/lucene/document2/DateTools.java (revision 0)
|
| @@ -0,0 +1,210 @@
|
| +package org.apache.lucene.document2; |
| + |
| +/** |
| + * Licensed to the Apache Software Foundation (ASF) under one or more |
| + * contributor license agreements. See the NOTICE file distributed with |
| + * this work for additional information regarding copyright ownership. |
| + * The ASF licenses this file to You under the Apache License, Version 2.0 |
| + * (the "License"); you may not use this file except in compliance with |
| + * the License. You may obtain a copy of the License at |
| + * |
| + * http://www.apache.org/licenses/LICENSE-2.0 |
| + * |
| + * Unless required by applicable law or agreed to in writing, software |
| + * distributed under the License is distributed on an "AS IS" BASIS, |
| + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| + * See the License for the specific language governing permissions and |
| + * limitations under the License. |
| + */ |
| + |
| +import org.apache.lucene.search.NumericRangeQuery; // for javadocs |
| +import org.apache.lucene.util.NumericUtils; // for javadocs |
| + |
| +import java.text.ParseException; |
| +import java.text.SimpleDateFormat; |
| +import java.util.Calendar; |
| +import java.util.Date; |
| +import java.util.Locale; |
| +import java.util.TimeZone; |
| + |
| +/** |
| + * Provides support for converting dates to strings and vice-versa. |
| + * The strings are structured so that lexicographic sorting orders |
| + * them by date, which makes them suitable for use as field values |
| + * and search terms. |
| + * |
| + * <P>This class also helps you to limit the resolution of your dates. Do not |
| + * save dates with a finer resolution than you really need, as then |
| + * RangeQuery and PrefixQuery will require more memory and become slower. |
| + * |
| + * <P> |
| + * Another approach is {@link NumericUtils}, which provides |
| + * a sortable binary representation (prefix encoded) of numeric values, which |
| + * date/time are. |
| + * For indexing a {@link Date} or {@link Calendar}, just get the unix timestamp as |
| + * <code>long</code> using {@link Date#getTime} or {@link Calendar#getTimeInMillis} and |
| + * index this as a numeric value with {@link NumericField} |
| + * and use {@link NumericRangeQuery} to query it. |
| + */ |
| +public class DateTools { |
| + |
| + final static TimeZone GMT = TimeZone.getTimeZone("GMT"); |
| + |
| + private static final ThreadLocal<Calendar> TL_CAL = new ThreadLocal<Calendar>() { |
| + @Override |
| + protected Calendar initialValue() { |
| + return Calendar.getInstance(GMT, Locale.US); |
| + } |
| + }; |
| + |
| + //indexed by format length |
| + private static final ThreadLocal<SimpleDateFormat[]> TL_FORMATS = new ThreadLocal<SimpleDateFormat[]>() { |
| + @Override |
| + protected SimpleDateFormat[] initialValue() { |
| + SimpleDateFormat[] arr = new SimpleDateFormat[Resolution.MILLISECOND.formatLen+1]; |
| + for (Resolution resolution : Resolution.values()) { |
| + arr[resolution.formatLen] = (SimpleDateFormat)resolution.format.clone(); |
| + } |
| + return arr; |
| + } |
| + }; |
| + |
| + // cannot create, the class has static methods only |
| + private DateTools() {} |
| + |
| + /** |
| + * Converts a Date to a string suitable for indexing. |
| + * |
| + * @param date the date to be converted |
| + * @param resolution the desired resolution, see |
| + * {@link #round(Date, DateTools.Resolution)} |
| + * @return a string in format <code>yyyyMMddHHmmssSSS</code> or shorter, |
| + * depending on <code>resolution</code>; using GMT as timezone |
| + */ |
| + public static String dateToString(Date date, Resolution resolution) { |
| + return timeToString(date.getTime(), resolution); |
| + } |
| + |
| + /** |
| + * Converts a millisecond time to a string suitable for indexing. |
| + * |
| + * @param time the date expressed as milliseconds since January 1, 1970, 00:00:00 GMT |
| + * @param resolution the desired resolution, see |
| + * {@link #round(long, DateTools.Resolution)} |
| + * @return a string in format <code>yyyyMMddHHmmssSSS</code> or shorter, |
| + * depending on <code>resolution</code>; using GMT as timezone |
| + */ |
| + public static String timeToString(long time, Resolution resolution) { |
| + final Date date = new Date(round(time, resolution)); |
| + return TL_FORMATS.get()[resolution.formatLen].format(date); |
| + } |
| + |
| + /** |
| + * Converts a string produced by <code>timeToString</code> or |
| + * <code>dateToString</code> back to a time, represented as the |
| + * number of milliseconds since January 1, 1970, 00:00:00 GMT. |
| + * |
| + * @param dateString the date string to be converted |
| + * @return the number of milliseconds since January 1, 1970, 00:00:00 GMT |
| + * @throws ParseException if <code>dateString</code> is not in the |
| + * expected format |
| + */ |
| + public static long stringToTime(String dateString) throws ParseException { |
| + return stringToDate(dateString).getTime(); |
| + } |
| + |
| + /** |
| + * Converts a string produced by <code>timeToString</code> or |
| + * <code>dateToString</code> back to a time, represented as a |
| + * Date object. |
| + * |
| + * @param dateString the date string to be converted |
| + * @return the parsed time as a Date object |
| + * @throws ParseException if <code>dateString</code> is not in the |
| + * expected format |
| + */ |
| + public static Date stringToDate(String dateString) throws ParseException { |
| + try { |
| + return TL_FORMATS.get()[dateString.length()].parse(dateString); |
| + } catch (Exception e) { |
| + throw new ParseException("Input is not a valid date string: " + dateString, 0); |
| + } |
| + } |
| + |
| + /** |
| + * Limit a date's resolution. For example, the date <code>2004-09-21 13:50:11</code> |
| + * will be changed to <code>2004-09-01 00:00:00</code> when using |
| + * <code>Resolution.MONTH</code>. |
| + * |
| + * @param resolution The desired resolution of the date to be returned |
| + * @return the date with all values more precise than <code>resolution</code> |
| + * set to 0 or 1 |
| + */ |
| + public static Date round(Date date, Resolution resolution) { |
| + return new Date(round(date.getTime(), resolution)); |
| + } |
| + |
| + /** |
| + * Limit a date's resolution. For example, the date <code>1095767411000</code> |
| + * (which represents 2004-09-21 13:50:11) will be changed to |
| + * <code>1093989600000</code> (2004-09-01 00:00:00) when using |
| + * <code>Resolution.MONTH</code>. |
| + * |
| + * @param resolution The desired resolution of the date to be returned |
| + * @return the date with all values more precise than <code>resolution</code> |
| + * set to 0 or 1, expressed as milliseconds since January 1, 1970, 00:00:00 GMT |
| + */ |
| + @SuppressWarnings("fallthrough") |
| + public static long round(long time, Resolution resolution) { |
| + final Calendar calInstance = TL_CAL.get(); |
| + calInstance.setTimeInMillis(time); |
| + |
| + switch (resolution) { |
| + //NOTE: switch statement fall-through is deliberate |
| + case YEAR: |
| + calInstance.set(Calendar.MONTH, 0); |
| + case MONTH: |
| + calInstance.set(Calendar.DAY_OF_MONTH, 1); |
| + case DAY: |
| + calInstance.set(Calendar.HOUR_OF_DAY, 0); |
| + case HOUR: |
| + calInstance.set(Calendar.MINUTE, 0); |
| + case MINUTE: |
| + calInstance.set(Calendar.SECOND, 0); |
| + case SECOND: |
| + calInstance.set(Calendar.MILLISECOND, 0); |
| + case MILLISECOND: |
| + // don't cut off anything |
| + break; |
| + default: |
| + throw new IllegalArgumentException("unknown resolution " + resolution); |
| + } |
| + return calInstance.getTimeInMillis(); |
| + } |
| + |
| + /** Specifies the time granularity. */ |
| + public static enum Resolution { |
| + |
| + YEAR(4), MONTH(6), DAY(8), HOUR(10), MINUTE(12), SECOND(14), MILLISECOND(17); |
| + |
| + final int formatLen; |
| + final SimpleDateFormat format;//should be cloned before use, since it's not threadsafe |
| + |
| + Resolution(int formatLen) { |
| + this.formatLen = formatLen; |
| + // formatLen 10's place: 11111111 |
| + // formatLen 1's place: 12345678901234567 |
| + this.format = new SimpleDateFormat("yyyyMMddHHmmssSSS".substring(0,formatLen),Locale.US); |
| + this.format.setTimeZone(GMT); |
| + } |
| + |
| + /** this method returns the name of the resolution |
| + * in lowercase (for backwards compatibility) */ |
| + @Override |
| + public String toString() { |
| + return super.toString().toLowerCase(Locale.ENGLISH); |
| + } |
| + |
| + } |
| + |
| +} |
| Index: lucene/src/java/org/apache/lucene/document2/Document.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/document2/Document.java (revision 1143083)
|
| +++ lucene/src/java/org/apache/lucene/document2/Document.java (working copy)
|
| @@ -163,6 +163,14 @@
|
| return fields.size(); |
| } |
| |
| + public final String get(String name) { |
| + for (IndexableField field : fields) { |
| + if (field.name().equals(name) && (field.binaryValue(null) == null)) |
| + return field.stringValue(); |
| + } |
| + return null; |
| + } |
| + |
| /** Prints the fields of a document for human consumption. */ |
| @Override |
| public final String toString() { |
| Index: lucene/src/java/org/apache/lucene/document2/NumericField.java
|
| ===================================================================
|
| --- lucene/src/java/org/apache/lucene/document2/NumericField.java (revision 1143083)
|
| +++ lucene/src/java/org/apache/lucene/document2/NumericField.java (working copy)
|
| @@ -19,8 +19,6 @@
|
| |
| import java.io.Reader; |
| |
| -import org.apache.lucene.document.NumericField.DataType; |
| - |
| import org.apache.lucene.analysis.TokenStream; |
| import org.apache.lucene.analysis.NumericTokenStream; |
| import org.apache.lucene.util.NumericUtils; |
| @@ -150,10 +148,13 @@
|
| public static final FieldType DEFAULT_TYPE = new FieldType(); |
| static { |
| DEFAULT_TYPE.setIndexed(true); |
| + DEFAULT_TYPE.setTokenized(true); |
| DEFAULT_TYPE.setOmitNorms(true); |
| DEFAULT_TYPE.setOmitTermFreqAndPositions(true); |
| DEFAULT_TYPE.freeze(); |
| } |
| + |
| + public static enum DataType { INT, LONG, FLOAT, DOUBLE } |
| |
| private DataType dataType; |
| private transient NumericTokenStream numericTS; |
| Index: lucene/src/test-framework/org/apache/lucene/index/DocHelper.java
|
| ===================================================================
|
| --- lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (revision 1143083)
|
| +++ lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (working copy)
|
| @@ -26,9 +26,11 @@
|
| import org.apache.lucene.analysis.Analyzer; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| import org.apache.lucene.analysis.MockTokenizer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.Fieldable; |
| +import org.apache.lucene.document2.BinaryField; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.search.SimilarityProvider; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.LuceneTestCase; |
| @@ -36,63 +38,110 @@
|
| import static org.apache.lucene.util.LuceneTestCase.TEST_VERSION_CURRENT; |
| |
| class DocHelper { |
| + |
| + public static final FieldType customType; |
| public static final String FIELD_1_TEXT = "field one text"; |
| public static final String TEXT_FIELD_1_KEY = "textField1"; |
| - public static Field textField1 = new Field(TEXT_FIELD_1_KEY, FIELD_1_TEXT, |
| - Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO); |
| - |
| + public static Field textField1; |
| + static { |
| + customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + textField1 = new Field(TEXT_FIELD_1_KEY, customType, FIELD_1_TEXT); |
| + } |
| + |
| + public static final FieldType customType2; |
| public static final String FIELD_2_TEXT = "field field field two text"; |
| //Fields will be lexicographically sorted. So, the order is: field, text, two |
| public static final int [] FIELD_2_FREQS = {3, 1, 1}; |
| public static final String TEXT_FIELD_2_KEY = "textField2"; |
| - public static Field textField2 = new Field(TEXT_FIELD_2_KEY, FIELD_2_TEXT, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); |
| + public static Field textField2; |
| + static { |
| + customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + customType2.setStoreTermVectors(true); |
| + customType2.setStoreTermVectorPositions(true); |
| + customType2.setStoreTermVectorOffsets(true); |
| + textField2 = new Field(TEXT_FIELD_2_KEY, customType2, FIELD_2_TEXT); |
| + } |
| |
| + public static final FieldType customType3; |
| public static final String FIELD_3_TEXT = "aaaNoNorms aaaNoNorms bbbNoNorms"; |
| public static final String TEXT_FIELD_3_KEY = "textField3"; |
| - public static Field textField3 = new Field(TEXT_FIELD_3_KEY, FIELD_3_TEXT, Field.Store.YES, Field.Index.ANALYZED); |
| - static { textField3.setOmitNorms(true); } |
| + public static Field textField3; |
| + |
| + static { |
| + customType3 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType3.setStored(true); |
| + customType3.setOmitNorms(true); |
| + textField3 = new Field(TEXT_FIELD_3_KEY, customType3, FIELD_3_TEXT); |
| + } |
| |
| + public static final FieldType customType4; |
| public static final String KEYWORD_TEXT = "Keyword"; |
| public static final String KEYWORD_FIELD_KEY = "keyField"; |
| - public static Field keyField = new Field(KEYWORD_FIELD_KEY, KEYWORD_TEXT, |
| - Field.Store.YES, Field.Index.NOT_ANALYZED); |
| + public static Field keyField; |
| + static { |
| + customType4 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType4.setStored(true); |
| + customType4.setTokenized(false); |
| + keyField = new Field(KEYWORD_FIELD_KEY, customType4, KEYWORD_TEXT); |
| + } |
| |
| + public static final FieldType customType5; |
| public static final String NO_NORMS_TEXT = "omitNormsText"; |
| public static final String NO_NORMS_KEY = "omitNorms"; |
| - public static Field noNormsField = new Field(NO_NORMS_KEY, NO_NORMS_TEXT, |
| - Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); |
| + public static Field noNormsField; |
| + static { |
| + customType5 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType5.setOmitNorms(true); |
| + customType5.setStored(true); |
| + customType5.setTokenized(false); |
| + noNormsField = new Field(NO_NORMS_KEY, customType5, NO_NORMS_TEXT); |
| + } |
| |
| + public static final FieldType customType6; |
| public static final String NO_TF_TEXT = "analyzed with no tf and positions"; |
| public static final String NO_TF_KEY = "omitTermFreqAndPositions"; |
| - public static Field noTFField = new Field(NO_TF_KEY, NO_TF_TEXT, |
| - Field.Store.YES, Field.Index.ANALYZED); |
| + public static Field noTFField; |
| static { |
| - noTFField.setOmitTermFreqAndPositions(true); |
| + customType6 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType6.setOmitTermFreqAndPositions(true); |
| + customType6.setStored(true); |
| + noTFField = new Field(NO_TF_KEY, customType6, NO_TF_TEXT); |
| } |
| |
| + public static final FieldType customType7; |
| public static final String UNINDEXED_FIELD_TEXT = "unindexed field text"; |
| public static final String UNINDEXED_FIELD_KEY = "unIndField"; |
| - public static Field unIndField = new Field(UNINDEXED_FIELD_KEY, UNINDEXED_FIELD_TEXT, |
| - Field.Store.YES, Field.Index.NO); |
| + public static Field unIndField; |
| + static { |
| + customType7 = new FieldType(); |
| + customType7.setStored(true); |
| + unIndField = new Field(UNINDEXED_FIELD_KEY, customType7, UNINDEXED_FIELD_TEXT); |
| + } |
| |
| |
| public static final String UNSTORED_1_FIELD_TEXT = "unstored field text"; |
| public static final String UNSTORED_FIELD_1_KEY = "unStoredField1"; |
| - public static Field unStoredField1 = new Field(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT, |
| - Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO); |
| + public static Field unStoredField1 = new Field(UNSTORED_FIELD_1_KEY, TextField.DEFAULT_TYPE, UNSTORED_1_FIELD_TEXT); |
| |
| + public static final FieldType customType8; |
| public static final String UNSTORED_2_FIELD_TEXT = "unstored field text"; |
| public static final String UNSTORED_FIELD_2_KEY = "unStoredField2"; |
| - public static Field unStoredField2 = new Field(UNSTORED_FIELD_2_KEY, UNSTORED_2_FIELD_TEXT, |
| - Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES); |
| + public static Field unStoredField2; |
| + static { |
| + customType8 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType8.setStoreTermVectors(true); |
| + unStoredField2 = new Field(UNSTORED_FIELD_2_KEY, customType8, UNSTORED_2_FIELD_TEXT); |
| + } |
| |
| public static final String LAZY_FIELD_BINARY_KEY = "lazyFieldBinary"; |
| public static byte [] LAZY_FIELD_BINARY_BYTES; |
| public static Field lazyFieldBinary; |
| - |
| + |
| public static final String LAZY_FIELD_KEY = "lazyField"; |
| public static final String LAZY_FIELD_TEXT = "These are some field bytes"; |
| - public static Field lazyField = new Field(LAZY_FIELD_KEY, LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.ANALYZED); |
| + public static Field lazyField = new Field(LAZY_FIELD_KEY, customType, LAZY_FIELD_TEXT); |
| |
| public static final String LARGE_LAZY_FIELD_KEY = "largeLazyField"; |
| public static String LARGE_LAZY_FIELD_TEXT; |
| @@ -101,15 +150,13 @@
|
| //From Issue 509 |
| public static final String FIELD_UTF1_TEXT = "field one \u4e00text"; |
| public static final String TEXT_FIELD_UTF1_KEY = "textField1Utf8"; |
| - public static Field textUtfField1 = new Field(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT, |
| - Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO); |
| + public static Field textUtfField1 = new Field(TEXT_FIELD_UTF1_KEY, customType, FIELD_UTF1_TEXT); |
| |
| public static final String FIELD_UTF2_TEXT = "field field field \u4e00two text"; |
| //Fields will be lexicographically sorted. So, the order is: field, text, two |
| public static final int [] FIELD_UTF2_FREQS = {3, 1, 1}; |
| public static final String TEXT_FIELD_UTF2_KEY = "textField2Utf8"; |
| - public static Field textUtfField2 = new Field(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT, Field.Store.YES, |
| - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); |
| + public static Field textUtfField2 = new Field(TEXT_FIELD_UTF2_KEY, customType2, FIELD_UTF2_TEXT); |
| |
| |
| |
| @@ -135,16 +182,16 @@
|
| largeLazyField//placeholder for large field, since this is null. It must always be last |
| }; |
| |
| - public static Map<String,Fieldable> all =new HashMap<String,Fieldable>(); |
| - public static Map<String,Fieldable> indexed =new HashMap<String,Fieldable>(); |
| - public static Map<String,Fieldable> stored =new HashMap<String,Fieldable>(); |
| - public static Map<String,Fieldable> unstored=new HashMap<String,Fieldable>(); |
| - public static Map<String,Fieldable> unindexed=new HashMap<String,Fieldable>(); |
| - public static Map<String,Fieldable> termvector=new HashMap<String,Fieldable>(); |
| - public static Map<String,Fieldable> notermvector=new HashMap<String,Fieldable>(); |
| - public static Map<String,Fieldable> lazy= new HashMap<String,Fieldable>(); |
| - public static Map<String,Fieldable> noNorms=new HashMap<String,Fieldable>(); |
| - public static Map<String,Fieldable> noTf=new HashMap<String,Fieldable>(); |
| + public static Map<String,IndexableField> all =new HashMap<String,IndexableField>(); |
| + public static Map<String,IndexableField> indexed =new HashMap<String,IndexableField>(); |
| + public static Map<String,IndexableField> stored =new HashMap<String,IndexableField>(); |
| + public static Map<String,IndexableField> unstored=new HashMap<String,IndexableField>(); |
| + public static Map<String,IndexableField> unindexed=new HashMap<String,IndexableField>(); |
| + public static Map<String,IndexableField> termvector=new HashMap<String,IndexableField>(); |
| + public static Map<String,IndexableField> notermvector=new HashMap<String,IndexableField>(); |
| + public static Map<String,IndexableField> lazy= new HashMap<String,IndexableField>(); |
| + public static Map<String,IndexableField> noNorms=new HashMap<String,IndexableField>(); |
| + public static Map<String,IndexableField> noTf=new HashMap<String,IndexableField>(); |
| |
| static { |
| //Initialize the large Lazy Field |
| @@ -158,28 +205,28 @@
|
| LAZY_FIELD_BINARY_BYTES = "These are some binary field bytes".getBytes("UTF8"); |
| } catch (UnsupportedEncodingException e) { |
| } |
| - lazyFieldBinary = new Field(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES); |
| + lazyFieldBinary = new BinaryField(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES); |
| fields[fields.length - 2] = lazyFieldBinary; |
| LARGE_LAZY_FIELD_TEXT = buffer.toString(); |
| - largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.ANALYZED); |
| + largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, customType, LARGE_LAZY_FIELD_TEXT); |
| fields[fields.length - 1] = largeLazyField; |
| for (int i=0; i<fields.length; i++) { |
| - Fieldable f = fields[i]; |
| + IndexableField f = fields[i]; |
| add(all,f); |
| - if (f.isIndexed()) add(indexed,f); |
| + if (f.indexed()) add(indexed,f); |
| else add(unindexed,f); |
| - if (f.isTermVectorStored()) add(termvector,f); |
| - if (f.isIndexed() && !f.isTermVectorStored()) add(notermvector,f); |
| - if (f.isStored()) add(stored,f); |
| + if (f.storeTermVectors()) add(termvector,f); |
| + if (f.indexed() && !f.storeTermVectors()) add(notermvector,f); |
| + if (f.stored()) add(stored,f); |
| else add(unstored,f); |
| - if (f.getOmitNorms()) add(noNorms,f); |
| - if (f.getOmitTermFreqAndPositions()) add(noTf,f); |
| - if (f.isLazy()) add(lazy, f); |
| + if (f.omitNorms()) add(noNorms,f); |
| + if (f.omitTermFreqAndPositions()) add(noTf,f); |
| + //if (f.isLazy()) add(lazy, f); |
| } |
| } |
| |
| |
| - private static void add(Map<String,Fieldable> map, Fieldable field) { |
| + private static void add(Map<String,IndexableField> map, IndexableField field) { |
| map.put(field.name(), field); |
| } |
| |
| @@ -248,6 +295,10 @@
|
| } |
| |
| public static int numFields(Document doc) { |
| + return doc.size(); |
| + } |
| + |
| + public static int numFields2(org.apache.lucene.document.Document doc) { |
| return doc.getFields().size(); |
| } |
| } |
| Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
|
| ===================================================================
|
| --- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision 1143083)
|
| +++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (working copy)
|
| @@ -1079,16 +1079,17 @@
|
| } |
| |
| public static org.apache.lucene.document2.Field newField(Random random, String name, String value, FieldType type) { |
| + FieldType newType = new FieldType(type); |
| if (usually(random)) { |
| // most of the time, don't modify the params |
| - return new org.apache.lucene.document2.Field(name, type, value); |
| + return new org.apache.lucene.document2.Field(name, newType, value); |
| } |
| |
| - if (!type.stored() && random.nextBoolean()) { |
| - type.setStored(true); // randomly store it |
| + if (!newType.stored() && random.nextBoolean()) { |
| + newType.setStored(true); // randomly store it |
| } |
| |
| - return new org.apache.lucene.document2.Field(name, type, value); |
| + return new org.apache.lucene.document2.Field(name, newType, value); |
| } |
| |
| /** |
| Index: lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java
|
| ===================================================================
|
| --- lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java (revision 1143083)
|
| +++ lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java (working copy)
|
| @@ -34,12 +34,12 @@
|
| import java.util.zip.ZipEntry; |
| import java.util.zip.ZipFile; |
| |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Fieldable; |
| +import org.apache.lucene.document2.Document; |
| import org.apache.lucene.index.CheckIndex; |
| import org.apache.lucene.index.ConcurrentMergeScheduler; |
| import org.apache.lucene.index.FieldInfos; |
| import org.apache.lucene.index.IndexWriter; |
| +import org.apache.lucene.index.IndexableField; |
| import org.apache.lucene.index.LogMergePolicy; |
| import org.apache.lucene.index.MergePolicy; |
| import org.apache.lucene.index.MergeScheduler; |
| @@ -372,10 +372,9 @@
|
| |
| /** Adds field info for a Document. */ |
| public static void add(Document doc, FieldInfos fieldInfos) { |
| - List<Fieldable> fields = doc.getFields(); |
| - for (Fieldable field : fields) { |
| - fieldInfos.addOrUpdate(field.name(), field.isIndexed(), field.isTermVectorStored(), field.isStorePositionWithTermVector(), |
| - field.isStoreOffsetWithTermVector(), field.getOmitNorms(), false, field.getOmitTermFreqAndPositions()); |
| + for (IndexableField field : doc) { |
| + fieldInfos.addOrUpdate(field.name(), field.indexed(), field.storeTermVectors(), field.storeTermVectorPositions(), |
| + field.storeTermVectorOffsets(), field.omitNorms(), false, field.omitTermFreqAndPositions()); |
| } |
| } |
| |
| Index: lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (working copy)
|
| @@ -22,9 +22,8 @@
|
| |
| import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; |
| import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.Field.TermVector; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.MultiFields; |
| import org.apache.lucene.index.DocsAndPositionsEnum; |
| @@ -60,7 +59,7 @@
|
| |
| stream = new CachingTokenFilter(stream); |
| |
| - doc.add(new Field("preanalyzed", stream, TermVector.NO)); |
| + doc.add(new TextField("preanalyzed", stream)); |
| |
| // 1) we consume all tokens twice before we add the doc to the index |
| checkTokens(stream); |
| Index: lucene/src/test/org/apache/lucene/index/Test2BTerms.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/Test2BTerms.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/Test2BTerms.java (working copy)
|
| @@ -22,7 +22,7 @@
|
| import org.apache.lucene.search.*; |
| import org.apache.lucene.analysis.*; |
| import org.apache.lucene.analysis.tokenattributes.*; |
| -import org.apache.lucene.document.*; |
| +import org.apache.lucene.document2.*; |
| import org.apache.lucene.index.codecs.CodecProvider; |
| import java.io.File; |
| import java.io.IOException; |
| @@ -176,9 +176,12 @@
|
| |
| Document doc = new Document(); |
| final MyTokenStream ts = new MyTokenStream(random, TERMS_PER_DOC); |
| - Field field = new Field("field", ts); |
| - field.setOmitTermFreqAndPositions(true); |
| - field.setOmitNorms(true); |
| + |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setOmitTermFreqAndPositions(true); |
| + customType.setOmitNorms(true); |
| + Field field = new Field("field", customType, ts); |
| doc.add(field); |
| //w.setInfoStream(System.out); |
| final int numDocs = (int) (TERM_COUNT/TERMS_PER_DOC); |
| Index: lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (working copy)
|
| @@ -23,11 +23,11 @@
|
| import java.util.List; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.Field.Index; |
| -import org.apache.lucene.document.Field.Store; |
| -import org.apache.lucene.document.Field.TermVector; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.StringField; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| import org.apache.lucene.index.codecs.CodecProvider; |
| import org.apache.lucene.index.codecs.mocksep.MockSepCodec; |
| @@ -164,11 +164,12 @@
|
| |
| // Adds 10 docs, then replaces them with another 10 |
| // docs, so 10 pending deletes: |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setTokenized(false); |
| for (int i = 0; i < 20; i++) { |
| Document doc = new Document(); |
| - doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED)); |
| - doc.add(newField("content", "bbb " + i, Field.Store.NO, |
| - Field.Index.ANALYZED)); |
| + doc.add(newField("id", "" + (i % 10), customType)); |
| + doc.add(newField("content", "bbb " + i, TextField.DEFAULT_TYPE)); |
| writer.updateDocument(new Term("id", "" + (i%10)), doc); |
| } |
| // Deletes one of the 10 added docs, leaving 9: |
| @@ -200,10 +201,12 @@
|
| |
| // Adds 10 docs, then replaces them with another 10 |
| // docs, so 10 pending deletes: |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setTokenized(false); |
| for (int i = 0; i < 20; i++) { |
| Document doc = new Document(); |
| - doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED)); |
| - doc.add(newField("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("id", "" + (i % 10), customType)); |
| + doc.add(newField("content", "bbb " + i, TextField.DEFAULT_TYPE)); |
| writer.updateDocument(new Term("id", "" + (i%10)), doc); |
| } |
| |
| @@ -238,11 +241,12 @@
|
| |
| // Adds 10 docs, then replaces them with another 10 |
| // docs, so 10 pending deletes: |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setTokenized(false); |
| for (int i = 0; i < 20; i++) { |
| Document doc = new Document(); |
| - doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED)); |
| - doc.add(newField("content", "bbb " + i, Field.Store.NO, |
| - Field.Index.ANALYZED)); |
| + doc.add(newField("id", "" + (i % 10), customType)); |
| + doc.add(newField("content", "bbb " + i, TextField.DEFAULT_TYPE)); |
| writer.updateDocument(new Term("id", "" + (i%10)), doc); |
| } |
| |
| @@ -502,8 +506,7 @@
|
| private void addDocs(IndexWriter writer, int numDocs) throws IOException { |
| for (int i = 0; i < numDocs; i++) { |
| Document doc = new Document(); |
| - doc.add(newField("content", "aaa", Field.Store.NO, |
| - Field.Index.ANALYZED)); |
| + doc.add(newField("content", "aaa", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| } |
| } |
| @@ -511,8 +514,7 @@
|
| private void addDocs2(IndexWriter writer, int numDocs) throws IOException { |
| for (int i = 0; i < numDocs; i++) { |
| Document doc = new Document(); |
| - doc.add(newField("content", "bbb", Field.Store.NO, |
| - Field.Index.ANALYZED)); |
| + doc.add(newField("content", "bbb", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| } |
| } |
| @@ -581,20 +583,22 @@
|
| .setMaxBufferedDocs(5).setMergePolicy(lmp)); |
| |
| Document doc = new Document(); |
| - doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, |
| - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setStoreTermVectors(true); |
| + customType.setStoreTermVectorPositions(true); |
| + customType.setStoreTermVectorOffsets(true); |
| + doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType)); |
| for(int i=0;i<60;i++) |
| writer.addDocument(doc); |
| |
| Document doc2 = new Document(); |
| - doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, |
| - Field.Index.NO)); |
| - doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, |
| - Field.Index.NO)); |
| - doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, |
| - Field.Index.NO)); |
| - doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, |
| - Field.Index.NO)); |
| + FieldType customType2 = new FieldType(); |
| + customType2.setStored(true); |
| + doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2)); |
| + doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2)); |
| + doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2)); |
| + doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2)); |
| for(int i=0;i<10;i++) |
| writer.addDocument(doc2); |
| writer.close(); |
| @@ -618,7 +622,7 @@
|
| private void addDoc(IndexWriter writer) throws IOException |
| { |
| Document doc = new Document(); |
| - doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("content", "aaa", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| } |
| |
| @@ -943,7 +947,7 @@
|
| IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); |
| IndexWriter writer = new IndexWriter(dirs[i], conf); |
| Document doc = new Document(); |
| - doc.add(new Field("id", "myid", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); |
| + doc.add(new StringField("id", "myid")); |
| writer.addDocument(doc); |
| writer.close(); |
| } |
| @@ -972,8 +976,10 @@
|
| private void addDocs3(IndexWriter writer, int numDocs) throws IOException { |
| for (int i = 0; i < numDocs; i++) { |
| Document doc = new Document(); |
| - doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); |
| - doc.add(newField("id", "" + i, Field.Store.YES, Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(newField("content", "aaa", TextField.DEFAULT_TYPE)); |
| + doc.add(newField("id", "" + i, customType)); |
| writer.addDocument(doc); |
| } |
| } |
| @@ -1060,7 +1066,10 @@
|
| dirs[i] = new RAMDirectory(); |
| IndexWriter w = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| Document d = new Document(); |
| - d.add(new Field("c", "v", Store.YES, Index.ANALYZED, TermVector.YES)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setStoreTermVectors(true); |
| + d.add(new Field("c", customType, "v")); |
| w.addDocument(d); |
| w.close(); |
| } |
| @@ -1098,10 +1107,12 @@
|
| new MockAnalyzer(random)).setMergePolicy(lmp2); |
| IndexWriter w2 = new IndexWriter(src, conf2); |
| Document doc = new Document(); |
| - doc.add(new Field("c", "some text", Store.YES, Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(new Field("c", customType, "some text")); |
| w2.addDocument(doc); |
| doc = new Document(); |
| - doc.add(new Field("d", "delete", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); |
| + doc.add(new StringField("d", "delete")); |
| w2.addDocument(doc); |
| w2.commit(); |
| w2.deleteDocuments(new Term("d", "delete")); |
| @@ -1151,7 +1162,9 @@
|
| conf.setCodecProvider(provider); |
| IndexWriter w = new IndexWriter(toAdd, conf); |
| Document doc = new Document(); |
| - doc.add(newField("foo", "bar", Index.NOT_ANALYZED)); |
| + FieldType customType = new FieldType(); |
| + customType.setIndexed(true); |
| + doc.add(newField("foo", "bar", customType)); |
| w.addDocument(doc); |
| w.close(); |
| } |
| Index: lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java (working copy)
|
| @@ -18,7 +18,7 @@
|
| |
| import org.apache.lucene.util.*; |
| import org.apache.lucene.store.*; |
| -import org.apache.lucene.document.*; |
| +import org.apache.lucene.document2.*; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| |
| import java.util.Random; |
| @@ -93,10 +93,12 @@
|
| @Override |
| public void doWork() throws Exception { |
| // Update all 100 docs... |
| + FieldType customType = new FieldType(StringField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| for(int i=0; i<100; i++) { |
| Document d = new Document(); |
| - d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - d.add(new Field("contents", English.intToEnglish(i+10*count), Field.Store.NO, Field.Index.ANALYZED)); |
| + d.add(new Field("id", customType, Integer.toString(i))); |
| + d.add(new TextField("contents", English.intToEnglish(i+10*count))); |
| writer.updateDocument(new Term("id", Integer.toString(i)), d); |
| } |
| } |
| @@ -134,10 +136,12 @@
|
| writer.setInfoStream(VERBOSE ? System.out : null); |
| |
| // Establish a base index of 100 docs: |
| + FieldType customType = new FieldType(StringField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| for(int i=0;i<100;i++) { |
| Document d = new Document(); |
| - d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - d.add(newField("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED)); |
| + d.add(newField("id", Integer.toString(i), customType)); |
| + d.add(newField("contents", English.intToEnglish(i), TextField.DEFAULT_TYPE)); |
| if ((i-1)%7 == 0) { |
| writer.commit(); |
| } |
| Index: lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy)
|
| @@ -27,10 +27,12 @@
|
| import java.util.Random; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.document.Fieldable; |
| -import org.apache.lucene.document.NumericField; |
| +import org.apache.lucene.document2.NumericField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| import org.apache.lucene.search.DefaultSimilarity; |
| import org.apache.lucene.search.DocIdSetIterator; |
| @@ -285,12 +287,12 @@
|
| |
| for(int i=0;i<35;i++) { |
| if (!delDocs.get(i)) { |
| - Document d = reader.document(i); |
| + org.apache.lucene.document.Document d = reader.document(i); |
| List<Fieldable> fields = d.getFields(); |
| if (d.getField("content3") == null) { |
| final int numFields = 5; |
| assertEquals(numFields, fields.size()); |
| - Field f = d.getField("id"); |
| + org.apache.lucene.document.Field f = d.getField("id"); |
| assertEquals(""+i, f.stringValue()); |
| |
| f = d.getField("utf8"); |
| @@ -318,7 +320,7 @@
|
| |
| // First document should be #21 since it's norm was |
| // increased: |
| - Document d = searcher.getIndexReader().document(hits[0].doc); |
| + org.apache.lucene.document.Document d = searcher.getIndexReader().document(hits[0].doc); |
| assertEquals("didn't get the right document first", "21", d.get("id")); |
| |
| doTestHits(hits, 34, searcher.getIndexReader()); |
| @@ -364,7 +366,7 @@
|
| // make sure searching sees right # hits |
| IndexSearcher searcher = new IndexSearcher(dir, true); |
| ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; |
| - Document d = searcher.getIndexReader().document(hits[0].doc); |
| + org.apache.lucene.document.Document d = searcher.getIndexReader().document(hits[0].doc); |
| assertEquals("wrong first document", "21", d.get("id")); |
| doTestHits(hits, 44, searcher.getIndexReader()); |
| searcher.close(); |
| @@ -412,7 +414,7 @@
|
| IndexSearcher searcher = new IndexSearcher(dir, true); |
| ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; |
| assertEquals("wrong number of hits", 34, hits.length); |
| - Document d = searcher.doc(hits[0].doc); |
| + org.apache.lucene.document.Document d = searcher.doc(hits[0].doc); |
| assertEquals("wrong first document", "21", d.get("id")); |
| searcher.close(); |
| |
| @@ -582,12 +584,20 @@
|
| private void addDoc(IndexWriter writer, int id) throws IOException |
| { |
| Document doc = new Document(); |
| - doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); |
| - doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| - doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| - doc.add(new Field("content2", "here is more content with aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| - doc.add(new Field("fie\u2C77ld", "field with non-ascii name", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| + doc.add(new TextField("content", "aaa")); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + doc.add(new Field("id", customType, Integer.toString(id))); |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + customType2.setStoreTermVectors(true); |
| + customType2.setStoreTermVectorPositions(true); |
| + customType2.setStoreTermVectorOffsets(true); |
| + doc.add(new Field("autf8", customType2, "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd")); |
| + doc.add(new Field("utf8", customType2, "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd")); |
| + doc.add(new Field("content2", customType2, "here is more content with aaa aaa aaa")); |
| + doc.add(new Field("fie\u2C77ld", customType2, "field with non-ascii name")); |
| // add numeric fields, to test if flex preserves encoding |
| doc.add(new NumericField("trieInt", 4).setIntValue(id)); |
| doc.add(new NumericField("trieLong", 4).setLongValue(id)); |
| @@ -596,11 +606,15 @@
|
| |
| private void addNoProxDoc(IndexWriter writer) throws IOException { |
| Document doc = new Document(); |
| - Field f = new Field("content3", "aaa", Field.Store.YES, Field.Index.ANALYZED); |
| - f.setOmitTermFreqAndPositions(true); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setOmitTermFreqAndPositions(true); |
| + Field f = new Field("content3", customType, "aaa"); |
| doc.add(f); |
| - f = new Field("content4", "aaa", Field.Store.YES, Field.Index.NO); |
| - f.setOmitTermFreqAndPositions(true); |
| + FieldType customType2 = new FieldType(); |
| + customType2.setStored(true); |
| + customType2.setOmitTermFreqAndPositions(true); |
| + f = new Field("content4", customType2, "aaa"); |
| doc.add(f); |
| writer.addDocument(doc); |
| } |
| @@ -670,7 +684,7 @@
|
| for (int id=10; id<15; id++) { |
| ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs; |
| assertEquals("wrong number of hits", 1, hits.length); |
| - Document d = searcher.doc(hits[0].doc); |
| + org.apache.lucene.document.Document d = searcher.doc(hits[0].doc); |
| assertEquals(String.valueOf(id), d.get("id")); |
| |
| hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs; |
| Index: lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java (working copy)
|
| @@ -19,8 +19,10 @@
|
| |
| import java.io.IOException; |
| |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.codecs.CodecProvider; |
| import org.apache.lucene.search.IndexSearcher; |
| import org.apache.lucene.search.TermQuery; |
| @@ -47,8 +49,10 @@
|
| bytes.bytes[1] = (byte) (255 - i); |
| bytes.length = 2; |
| Document doc = new Document(); |
| - doc.add(new Field("id", "" + i, Field.Store.YES, Field.Index.NO)); |
| - doc.add(new Field("bytes", tokenStream)); |
| + FieldType customType = new FieldType(); |
| + customType.setStored(true); |
| + doc.add(new Field("id", customType, "" + i)); |
| + doc.add(new TextField("bytes", tokenStream)); |
| iw.addDocument(doc); |
| } |
| |
| Index: lucene/src/test/org/apache/lucene/index/TestCheckIndex.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestCheckIndex.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestCheckIndex.java (working copy)
|
| @@ -26,8 +26,10 @@
|
| import org.apache.lucene.util.LuceneTestCase; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.util.Constants; |
| |
| public class TestCheckIndex extends LuceneTestCase { |
| @@ -36,7 +38,12 @@
|
| Directory dir = newDirectory(); |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); |
| Document doc = new Document(); |
| - doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setStoreTermVectors(true); |
| + customType.setStoreTermVectorPositions(true); |
| + customType.setStoreTermVectorOffsets(true); |
| + doc.add(newField("field", "aaa", customType)); |
| for(int i=0;i<19;i++) { |
| writer.addDocument(doc); |
| } |
| Index: lucene/src/test/org/apache/lucene/index/TestCodecs.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestCodecs.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestCodecs.java (working copy)
|
| @@ -22,9 +22,10 @@
|
| import java.util.HashSet; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.Field.Store; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.codecs.CodecProvider; |
| import org.apache.lucene.index.codecs.FieldsConsumer; |
| import org.apache.lucene.index.codecs.FieldsProducer; |
| @@ -330,7 +331,9 @@
|
| pq.add(new Term("content", "ccc")); |
| |
| final Document doc = new Document(); |
| - doc.add(newField("content", "aaa bbb ccc ddd", Store.NO, Field.Index.ANALYZED_NO_NORMS)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setOmitNorms(true); |
| + doc.add(newField("content", "aaa bbb ccc ddd", customType)); |
| |
| // add document and force commit for creating a first segment |
| writer.addDocument(doc); |
| Index: lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (working copy)
|
| @@ -19,8 +19,10 @@
|
| |
| import org.apache.lucene.store.MockDirectoryWrapper; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| |
| import org.apache.lucene.util.LuceneTestCase; |
| @@ -75,7 +77,10 @@
|
| IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); |
| writer.setInfoStream(VERBOSE ? System.out : null); |
| Document doc = new Document(); |
| - Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + Field idField = newField("id", "", customType); |
| doc.add(idField); |
| int extraCount = 0; |
| |
| @@ -135,7 +140,10 @@
|
| writer.setInfoStream(VERBOSE ? System.out : null); |
| |
| Document doc = new Document(); |
| - Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + Field idField = newField("id", "", customType); |
| doc.add(idField); |
| for(int i=0;i<10;i++) { |
| if (VERBOSE) { |
| @@ -180,7 +188,7 @@
|
| |
| for(int j=0;j<21;j++) { |
| Document doc = new Document(); |
| - doc.add(newField("content", "a b c", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("content", "a b c", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| } |
| |
| @@ -202,7 +210,10 @@
|
| public void testNoWaitClose() throws IOException { |
| MockDirectoryWrapper directory = newDirectory(); |
| Document doc = new Document(); |
| - Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + Field idField = newField("id", "", customType); |
| doc.add(idField); |
| |
| IndexWriter writer = new IndexWriter( |
| Index: lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java (working copy)
|
| @@ -20,11 +20,11 @@
|
| import java.io.IOException; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.Field.Index; |
| -import org.apache.lucene.document.Field.Store; |
| -import org.apache.lucene.document.Field.TermVector; |
| +import org.apache.lucene.document2.BinaryField; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.LuceneTestCase; |
| import org.junit.Test; |
| @@ -38,8 +38,11 @@
|
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); |
| |
| Document d1 = new Document(); |
| - d1.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.NO)); |
| - d1.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + d1.add(new Field("f1", customType, "first field")); |
| + d1.add(new Field("f2", customType, "second field")); |
| writer.addDocument(d1); |
| |
| if (i == 1) { |
| @@ -50,10 +53,13 @@
|
| } |
| |
| Document d2 = new Document(); |
| - d2.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO)); |
| - d2.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.YES)); |
| - d2.add(new Field("f3", "third field", Store.YES, Index.ANALYZED, TermVector.NO)); |
| - d2.add(new Field("f4", "fourth field", Store.YES, Index.ANALYZED, TermVector.NO)); |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + customType2.setStoreTermVectors(true); |
| + d2.add(new TextField("f2", "second field")); |
| + d2.add(new Field("f1", customType2, "first field")); |
| + d2.add(new TextField("f3", "third field")); |
| + d2.add(new TextField("f4", "fourth field")); |
| writer.addDocument(d2); |
| |
| writer.close(); |
| @@ -99,18 +105,23 @@
|
| IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); |
| |
| Document d1 = new Document(); |
| - d1.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.NO)); |
| - d1.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + d1.add(new Field("f1", customType, "first field")); |
| + d1.add(new Field("f2", customType, "second field")); |
| writer.addDocument(d1); |
| |
| writer.close(); |
| writer = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); |
| |
| Document d2 = new Document(); |
| - d2.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO)); |
| - d2.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.YES)); |
| - d2.add(new Field("f3", "third field", Store.YES, Index.ANALYZED, TermVector.NO)); |
| - d2.add(new Field("f4", "fourth field", Store.YES, Index.ANALYZED, TermVector.NO)); |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + customType2.setStoreTermVectors(true); |
| + d2.add(new Field("f2", customType, "second field")); |
| + d2.add(new Field("f1", customType2, "first field")); |
| + d2.add(new Field("f3", customType, "third field")); |
| + d2.add(new Field("f4", customType, "fourth field")); |
| writer.addDocument(d2); |
| |
| writer.close(); |
| @@ -156,6 +167,8 @@
|
| |
| public void testFieldNumberGaps() throws IOException { |
| int numIters = atLeast(13); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| for (int i = 0; i < numIters; i++) { |
| Directory dir = newDirectory(); |
| { |
| @@ -163,10 +176,8 @@
|
| TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy( |
| NoMergePolicy.NO_COMPOUND_FILES)); |
| Document d = new Document(); |
| - d.add(new Field("f1", "d1 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f2", "d1 second field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| + d.add(new Field("f1", customType, "d1 first field")); |
| + d.add(new Field("f2", customType, "d1 second field")); |
| writer.addDocument(d); |
| writer.close(); |
| SegmentInfos sis = new SegmentInfos(); |
| @@ -185,9 +196,8 @@
|
| random.nextBoolean() ? NoMergePolicy.NO_COMPOUND_FILES |
| : NoMergePolicy.COMPOUND_FILES)); |
| Document d = new Document(); |
| - d.add(new Field("f1", "d2 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f3", new byte[] { 1, 2, 3 })); |
| + d.add(new Field("f1", customType, "d2 first field")); |
| + d.add(new BinaryField("f3", new byte[] { 1, 2, 3 })); |
| writer.addDocument(d); |
| writer.close(); |
| SegmentInfos sis = new SegmentInfos(); |
| @@ -210,11 +220,9 @@
|
| random.nextBoolean() ? NoMergePolicy.NO_COMPOUND_FILES |
| : NoMergePolicy.COMPOUND_FILES)); |
| Document d = new Document(); |
| - d.add(new Field("f1", "d3 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f2", "d3 second field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f3", new byte[] { 1, 2, 3, 4, 5 })); |
| + d.add(new Field("f1", customType, "d3 first field")); |
| + d.add(new Field("f2", customType, "d3 second field")); |
| + d.add(new BinaryField("f3", new byte[] { 1, 2, 3, 4, 5 })); |
| writer.addDocument(d); |
| writer.close(); |
| SegmentInfos sis = new SegmentInfos(); |
| @@ -303,10 +311,10 @@
|
| |
| for (FieldInfo fi : fis) { |
| Field expected = getField(Integer.parseInt(fi.name)); |
| - assertEquals(expected.isIndexed(), fi.isIndexed); |
| - assertEquals(expected.isTermVectorStored(), fi.storeTermVector); |
| - assertEquals(expected.isStorePositionWithTermVector(), fi.storePositionWithTermVector); |
| - assertEquals(expected.isStoreOffsetWithTermVector(), fi.storeOffsetWithTermVector); |
| + assertEquals(expected.indexed(), fi.isIndexed); |
| + assertEquals(expected.storeTermVectors(), fi.storeTermVector); |
| + assertEquals(expected.storeTermVectorPositions(), fi.storePositionWithTermVector); |
| + assertEquals(expected.storeTermVectorOffsets(), fi.storeOffsetWithTermVector); |
| } |
| } |
| |
| @@ -316,23 +324,99 @@
|
| private Field getField(int number) { |
| int mode = number % 16; |
| String fieldName = "" + number; |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + customType2.setTokenized(false); |
| + |
| + FieldType customType3 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType3.setTokenized(false); |
| + |
| + FieldType customType4 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType4.setTokenized(false); |
| + customType4.setStored(false); |
| + customType4.setStoreTermVectors(true); |
| + customType4.setStoreTermVectorOffsets(true); |
| + |
| + FieldType customType5 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType5.setStoreTermVectors(true); |
| + customType5.setStoreTermVectorOffsets(true); |
| + |
| + FieldType customType6 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType6.setTokenized(false); |
| + customType6.setStored(true); |
| + customType6.setStoreTermVectors(true); |
| + customType6.setStoreTermVectorOffsets(true); |
| + |
| + FieldType customType7 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType7.setTokenized(false); |
| + customType7.setStoreTermVectors(true); |
| + customType7.setStoreTermVectorOffsets(true); |
| + |
| + FieldType customType8 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType8.setTokenized(false); |
| + customType8.setStored(true); |
| + customType8.setStoreTermVectors(true); |
| + customType8.setStoreTermVectorPositions(true); |
| + |
| + FieldType customType9 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType9.setStoreTermVectors(true); |
| + customType9.setStoreTermVectorPositions(true); |
| + |
| + FieldType customType10 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType10.setTokenized(false); |
| + customType10.setStored(true); |
| + customType10.setStoreTermVectors(true); |
| + customType10.setStoreTermVectorPositions(true); |
| + |
| + FieldType customType11 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType11.setTokenized(false); |
| + customType11.setStoreTermVectors(true); |
| + customType11.setStoreTermVectorPositions(true); |
| + |
| + FieldType customType12 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType12.setStored(true); |
| + customType12.setStoreTermVectors(true); |
| + customType12.setStoreTermVectorOffsets(true); |
| + customType12.setStoreTermVectorPositions(true); |
| + |
| + FieldType customType13 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType13.setStoreTermVectors(true); |
| + customType13.setStoreTermVectorOffsets(true); |
| + customType13.setStoreTermVectorPositions(true); |
| + |
| + FieldType customType14 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType14.setStored(true); |
| + customType14.setTokenized(false); |
| + customType14.setStoreTermVectors(true); |
| + customType14.setStoreTermVectorOffsets(true); |
| + customType14.setStoreTermVectorPositions(true); |
| + |
| + FieldType customType15 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType15.setTokenized(false); |
| + customType15.setStoreTermVectors(true); |
| + customType15.setStoreTermVectorOffsets(true); |
| + customType15.setStoreTermVectorPositions(true); |
| + |
| switch (mode) { |
| - case 0: return new Field(fieldName, "some text", Store.YES, Index.ANALYZED, TermVector.NO); |
| - case 1: return new Field(fieldName, "some text", Store.NO, Index.ANALYZED, TermVector.NO); |
| - case 2: return new Field(fieldName, "some text", Store.YES, Index.NOT_ANALYZED, TermVector.NO); |
| - case 3: return new Field(fieldName, "some text", Store.NO, Index.NOT_ANALYZED, TermVector.NO); |
| - case 4: return new Field(fieldName, "some text", Store.YES, Index.ANALYZED, TermVector.WITH_OFFSETS); |
| - case 5: return new Field(fieldName, "some text", Store.NO, Index.ANALYZED, TermVector.WITH_OFFSETS); |
| - case 6: return new Field(fieldName, "some text", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_OFFSETS); |
| - case 7: return new Field(fieldName, "some text", Store.NO, Index.NOT_ANALYZED, TermVector.WITH_OFFSETS); |
| - case 8: return new Field(fieldName, "some text", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS); |
| - case 9: return new Field(fieldName, "some text", Store.NO, Index.ANALYZED, TermVector.WITH_POSITIONS); |
| - case 10: return new Field(fieldName, "some text", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS); |
| - case 11: return new Field(fieldName, "some text", Store.NO, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS); |
| - case 12: return new Field(fieldName, "some text", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS); |
| - case 13: return new Field(fieldName, "some text", Store.NO, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS); |
| - case 14: return new Field(fieldName, "some text", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS); |
| - case 15: return new Field(fieldName, "some text", Store.NO, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS); |
| + case 0: return new Field(fieldName, customType, "some text"); |
| + case 1: return new TextField(fieldName, "some text"); |
| + case 2: return new Field(fieldName, customType2, "some text"); |
| + case 3: return new Field(fieldName, customType3, "some text"); |
| + case 4: return new Field(fieldName, customType4, "some text"); |
| + case 5: return new Field(fieldName, customType5, "some text"); |
| + case 6: return new Field(fieldName, customType6, "some text"); |
| + case 7: return new Field(fieldName, customType7, "some text"); |
| + case 8: return new Field(fieldName, customType8, "some text"); |
| + case 9: return new Field(fieldName, customType9, "some text"); |
| + case 10: return new Field(fieldName, customType10, "some text"); |
| + case 11: return new Field(fieldName, customType11, "some text"); |
| + case 12: return new Field(fieldName, customType12, "some text"); |
| + case 13: return new Field(fieldName, customType13, "some text"); |
| + case 14: return new Field(fieldName, customType14, "some text"); |
| + case 15: return new Field(fieldName, customType15, "some text"); |
| default: return null; |
| } |
| } |
| Index: lucene/src/test/org/apache/lucene/index/TestCrash.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestCrash.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestCrash.java (working copy)
|
| @@ -24,8 +24,9 @@
|
| import org.apache.lucene.store.MockDirectoryWrapper; |
| import org.apache.lucene.store.NoLockFactory; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| |
| public class TestCrash extends LuceneTestCase { |
| |
| @@ -44,8 +45,10 @@
|
| } |
| |
| Document doc = new Document(); |
| - doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(newField("id", "0", Field.Store.YES, Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(false); |
| + doc.add(newField("content", "aaa", customType)); |
| + doc.add(newField("id", "0", customType)); |
| for(int i=0;i<157;i++) |
| writer.addDocument(doc); |
| |
| Index: lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java (working copy)
|
| @@ -24,8 +24,9 @@
|
| import java.util.Collection; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| import org.apache.lucene.search.IndexSearcher; |
| import org.apache.lucene.search.Query; |
| @@ -841,7 +842,7 @@
|
| private void addDoc(IndexWriter writer) throws IOException |
| { |
| Document doc = new Document(); |
| - doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("content", "aaa", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| } |
| } |
| Index: lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java (working copy)
|
| @@ -20,8 +20,10 @@
|
| import org.apache.lucene.util.LuceneTestCase; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.BytesRef; |
| @@ -79,12 +81,12 @@
|
| sis.read(dir); |
| IndexReader reader = openReader(); |
| assertTrue(reader != null); |
| - Document newDoc1 = reader.document(0); |
| + org.apache.lucene.document.Document newDoc1 = reader.document(0); |
| assertTrue(newDoc1 != null); |
| - assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); |
| - Document newDoc2 = reader.document(1); |
| + assertTrue(DocHelper.numFields2(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); |
| + org.apache.lucene.document.Document newDoc2 = reader.document(1); |
| assertTrue(newDoc2 != null); |
| - assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); |
| + assertTrue(DocHelper.numFields2(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); |
| TermFreqVector vector = reader.getTermFreqVector(0, DocHelper.TEXT_FIELD_2_KEY); |
| assertTrue(vector != null); |
| TestSegmentReader.checkNorms(reader); |
| @@ -202,7 +204,9 @@
|
| new MockAnalyzer(random)).setOpenMode( |
| create ? OpenMode.CREATE : OpenMode.APPEND)); |
| Document doc = new Document(); |
| - doc.add(newField("body", s, Field.Store.YES, Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(false); |
| + doc.add(newField("body", s, customType)); |
| iw.addDocument(doc); |
| iw.close(); |
| } |
| Index: lucene/src/test/org/apache/lucene/index/TestDoc.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestDoc.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestDoc.java (working copy)
|
| @@ -30,8 +30,9 @@
|
| import junit.textui.TestRunner; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.store.Directory; |
| @@ -184,7 +185,7 @@
|
| { |
| File file = new File(workDir, fileName); |
| Document doc = new Document(); |
| - doc.add(new Field("contents", new FileReader(file))); |
| + doc.add(new TextField("contents", new FileReader(file))); |
| writer.addDocument(doc); |
| writer.commit(); |
| return writer.newestSegment(); |
| Index: lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java (working copy)
|
| @@ -25,9 +25,11 @@
|
| import java.util.Set; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.NumericField; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.NumericField; |
| +import org.apache.lucene.document2.StringField; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.DocTermOrds.TermOrdsIterator; |
| import org.apache.lucene.index.codecs.BlockTermsReader; |
| import org.apache.lucene.index.codecs.BlockTermsWriter; |
| @@ -62,7 +64,7 @@
|
| Directory dir = newDirectory(); |
| final RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); |
| Document doc = new Document(); |
| - Field field = newField("field", "", Field.Index.ANALYZED); |
| + Field field = newField("field", "", TextField.DEFAULT_TYPE); |
| doc.add(field); |
| field.setValue("a b c"); |
| w.addDocument(doc); |
| @@ -264,7 +266,7 @@
|
| } |
| for(int ord : ordsForDocSet) { |
| ordsForDoc[upto++] = ord; |
| - Field field = newField("field", termsArray[ord].utf8ToString(), Field.Index.NOT_ANALYZED_NO_NORMS); |
| + Field field = newField("field", termsArray[ord].utf8ToString(), StringField.DEFAULT_TYPE); |
| if (VERBOSE) { |
| System.out.println(" f=" + termsArray[ord].utf8ToString()); |
| } |
| @@ -367,7 +369,7 @@
|
| } |
| for(int ord : ordsForDocSet) { |
| ordsForDoc[upto++] = ord; |
| - Field field = newField("field", termsArray[ord].utf8ToString(), Field.Index.NOT_ANALYZED_NO_NORMS); |
| + Field field = newField("field", termsArray[ord].utf8ToString(), StringField.DEFAULT_TYPE); |
| if (VERBOSE) { |
| System.out.println(" f=" + termsArray[ord].utf8ToString()); |
| } |
| Index: lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java (working copy)
|
| @@ -21,8 +21,9 @@
|
| import java.util.Arrays; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| import org.apache.lucene.index.IndexReader.ReaderContext; |
| import org.apache.lucene.store.Directory; |
| @@ -49,9 +50,11 @@
|
| newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| for (int i = 0; i < 39; i++) { |
| Document doc = new Document(); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setOmitNorms(true); |
| doc.add(newField(fieldName, "1 2 3 4 5 6 7 8 9 10 " |
| + "1 2 3 4 5 6 7 8 9 10 " + "1 2 3 4 5 6 7 8 9 10 " |
| - + "1 2 3 4 5 6 7 8 9 10", Field.Store.NO, Field.Index.ANALYZED_NO_NORMS)); |
| + + "1 2 3 4 5 6 7 8 9 10", customType)); |
| writer.addDocument(doc); |
| } |
| IndexReader reader = writer.getReader(); |
| @@ -117,6 +120,8 @@
|
| int max = 1051; |
| int term = random.nextInt(max); |
| Integer[][] positionsInDoc = new Integer[numDocs][]; |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setOmitNorms(true); |
| for (int i = 0; i < numDocs; i++) { |
| Document doc = new Document(); |
| ArrayList<Integer> positions = new ArrayList<Integer>(); |
| @@ -133,8 +138,7 @@
|
| builder.append(term); |
| positions.add(num); |
| } |
| - doc.add(newField(fieldName, builder.toString(), Field.Store.NO, |
| - Field.Index.ANALYZED_NO_NORMS)); |
| + doc.add(newField(fieldName, builder.toString(), customType)); |
| positionsInDoc[i] = positions.toArray(new Integer[0]); |
| writer.addDocument(doc); |
| } |
| @@ -199,6 +203,8 @@
|
| int max = 15678; |
| int term = random.nextInt(max); |
| int[] freqInDoc = new int[numDocs]; |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setOmitNorms(true); |
| for (int i = 0; i < numDocs; i++) { |
| Document doc = new Document(); |
| StringBuilder builder = new StringBuilder(); |
| @@ -209,8 +215,7 @@
|
| freqInDoc[i]++; |
| } |
| } |
| - doc.add(newField(fieldName, builder.toString(), Field.Store.NO, |
| - Field.Index.ANALYZED_NO_NORMS)); |
| + doc.add(newField(fieldName, builder.toString(), customType)); |
| writer.addDocument(doc); |
| } |
| |
| @@ -275,6 +280,8 @@
|
| RandomIndexWriter writer = new RandomIndexWriter(random, dir, |
| newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| int howMany = 1000; |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setOmitNorms(true); |
| for (int i = 0; i < 39; i++) { |
| Document doc = new Document(); |
| StringBuilder builder = new StringBuilder(); |
| @@ -285,8 +292,7 @@
|
| builder.append("odd "); |
| } |
| } |
| - doc.add(newField(fieldName, builder.toString(), Field.Store.NO, |
| - Field.Index.ANALYZED_NO_NORMS)); |
| + doc.add(newField(fieldName, builder.toString(), customType)); |
| writer.addDocument(doc); |
| } |
| |
| Index: lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (working copy)
|
| @@ -28,12 +28,11 @@
|
| import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; |
| import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; |
| import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.Field.Index; |
| -import org.apache.lucene.document.Field.Store; |
| -import org.apache.lucene.document.Field.TermVector; |
| import org.apache.lucene.document.Fieldable; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.util.AttributeSource; |
| import org.apache.lucene.util.BytesRef; |
| @@ -70,7 +69,7 @@
|
| //After adding the document, we should be able to read it back in |
| SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR); |
| assertTrue(reader != null); |
| - Document doc = reader.document(0); |
| + org.apache.lucene.document.Document doc = reader.document(0); |
| assertTrue(doc != null); |
| |
| //System.out.println("Document: " + doc); |
| @@ -122,8 +121,10 @@
|
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); |
| |
| Document doc = new Document(); |
| - doc.add(newField("repeated", "repeated one", Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(newField("repeated", "repeated two", Field.Store.YES, Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(newField("repeated", "repeated one", customType)); |
| + doc.add(newField("repeated", "repeated two", customType)); |
| |
| writer.addDocument(doc); |
| writer.commit(); |
| @@ -187,7 +188,9 @@
|
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); |
| |
| Document doc = new Document(); |
| - doc.add(newField("f1", "a 5 a a", Field.Store.YES, Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(newField("f1", "a 5 a a", customType)); |
| |
| writer.addDocument(doc); |
| writer.commit(); |
| @@ -213,8 +216,8 @@
|
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( |
| TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| Document doc = new Document(); |
| - |
| - doc.add(new Field("preanalyzed", new TokenStream() { |
| + |
| + doc.add(new TextField("preanalyzed", new TokenStream() { |
| private String[] tokens = new String[] {"term1", "term2", "term3", "term2"}; |
| private int index = 0; |
| |
| @@ -231,7 +234,7 @@
|
| } |
| } |
| |
| - }, TermVector.NO)); |
| + })); |
| |
| writer.addDocument(doc); |
| writer.commit(); |
| @@ -264,11 +267,20 @@
|
| public void testMixedTermVectorSettingsSameField() throws Exception { |
| Document doc = new Document(); |
| // f1 first without tv then with tv |
| - doc.add(newField("f1", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.NO)); |
| - doc.add(newField("f1", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + doc.add(newField("f1", "v1", customType)); |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + customType2.setTokenized(false); |
| + customType2.setStoreTermVectors(true); |
| + customType2.setStoreTermVectorOffsets(true); |
| + customType2.setStoreTermVectorPositions(true); |
| + doc.add(newField("f1", "v2", customType2)); |
| // f2 first with tv then without tv |
| - doc.add(newField("f2", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); |
| - doc.add(newField("f2", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.NO)); |
| + doc.add(newField("f2", "v1", customType2)); |
| + doc.add(newField("f2", "v2", customType)); |
| |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( |
| TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| @@ -297,13 +309,19 @@
|
| public void testLUCENE_1590() throws Exception { |
| Document doc = new Document(); |
| // f1 has no norms |
| - doc.add(newField("f1", "v1", Store.NO, Index.ANALYZED_NO_NORMS)); |
| - doc.add(newField("f1", "v2", Store.YES, Index.NO)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setOmitNorms(true); |
| + FieldType customType2 = new FieldType(); |
| + customType2.setStored(true); |
| + doc.add(newField("f1", "v1", customType)); |
| + doc.add(newField("f1", "v2", customType2)); |
| // f2 has no TF |
| - Field f = newField("f2", "v1", Store.NO, Index.ANALYZED); |
| - f.setOmitTermFreqAndPositions(true); |
| + FieldType customType3 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType3.setStored(true); |
| + customType3.setOmitTermFreqAndPositions(true); |
| + Field f = newField("f2", "v1", customType3); |
| doc.add(f); |
| - doc.add(newField("f2", "v2", Store.YES, Index.NO)); |
| + doc.add(newField("f2", "v2", customType2)); |
| |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( |
| TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| Index: lucene/src/test/org/apache/lucene/index/TestFieldInfos.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestFieldInfos.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestFieldInfos.java (working copy)
|
| @@ -19,7 +19,7 @@
|
| |
| import org.apache.lucene.util.LuceneTestCase; |
| import org.apache.lucene.util._TestUtil; |
| -import org.apache.lucene.document.Document; |
| +import org.apache.lucene.document2.Document; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.store.IndexOutput; |
| |
| Index: lucene/src/test/org/apache/lucene/index/TestFieldsReader.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (working copy)
|
| @@ -44,7 +44,7 @@
|
| |
| public class TestFieldsReader extends LuceneTestCase { |
| private static Directory dir; |
| - private static Document testDoc = new Document(); |
| + private static org.apache.lucene.document2.Document testDoc = new org.apache.lucene.document2.Document(); |
| private static FieldInfos fieldInfos = null; |
| private final static String TEST_SEGMENT_NAME = "_0"; |
| |
| Index: lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java (working copy)
|
| @@ -22,8 +22,9 @@
|
| |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.util.BytesRef; |
| import org.apache.lucene.util.Bits; |
| |
| @@ -128,16 +129,18 @@
|
| Directory directory = newDirectory(); |
| IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| Document d1 = new Document(); |
| - d1.add(newField("default","one two", Field.Store.YES, Field.Index.ANALYZED)); |
| + d1.add(newField("default","one two", customType)); |
| writer.addDocument(d1); |
| |
| Document d2 = new Document(); |
| - d2.add(newField("default","one three", Field.Store.YES, Field.Index.ANALYZED)); |
| + d2.add(newField("default","one three", customType)); |
| writer.addDocument(d2); |
| |
| Document d3 = new Document(); |
| - d3.add(newField("default","two four", Field.Store.YES, Field.Index.ANALYZED)); |
| + d3.add(newField("default","two four", customType)); |
| writer.addDocument(d3); |
| |
| writer.close(); |
| Index: lucene/src/test/org/apache/lucene/index/TestFlex.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestFlex.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestFlex.java (working copy)
|
| @@ -19,7 +19,7 @@
|
| |
| import org.apache.lucene.store.*; |
| import org.apache.lucene.analysis.*; |
| -import org.apache.lucene.document.*; |
| +import org.apache.lucene.document2.*; |
| import org.apache.lucene.util.*; |
| |
| public class TestFlex extends LuceneTestCase { |
| @@ -39,10 +39,10 @@
|
| for(int iter=0;iter<2;iter++) { |
| if (iter == 0) { |
| Document doc = new Document(); |
| - doc.add(newField("field1", "this is field1", Field.Store.NO, Field.Index.ANALYZED)); |
| - doc.add(newField("field2", "this is field2", Field.Store.NO, Field.Index.ANALYZED)); |
| - doc.add(newField("field3", "aaa", Field.Store.NO, Field.Index.ANALYZED)); |
| - doc.add(newField("field4", "bbb", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("field1", "this is field1", TextField.DEFAULT_TYPE)); |
| + doc.add(newField("field2", "this is field2", TextField.DEFAULT_TYPE)); |
| + doc.add(newField("field3", "aaa", TextField.DEFAULT_TYPE)); |
| + doc.add(newField("field4", "bbb", TextField.DEFAULT_TYPE)); |
| for(int i=0;i<DOC_COUNT;i++) { |
| w.addDocument(doc); |
| } |
| @@ -66,7 +66,7 @@
|
| IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, |
| new MockAnalyzer(random)).setCodecProvider(_TestUtil.alwaysCodec("Standard"))); |
| Document doc = new Document(); |
| - doc.add(newField("f", "a b c", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("f", "a b c", TextField.DEFAULT_TYPE)); |
| w.addDocument(doc); |
| IndexReader r = w.getReader(); |
| TermsEnum terms = r.getSequentialSubReaders()[0].fields().terms("f").iterator(); |
| Index: lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java (working copy)
|
| @@ -27,11 +27,11 @@
|
| import java.util.TreeMap; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.Field.Index; |
| -import org.apache.lucene.document.Field.Store; |
| -import org.apache.lucene.document.Field.TermVector; |
| +import org.apache.lucene.document2.BinaryField; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.FieldInfos.FieldNumberBiMap; |
| import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter; |
| import org.apache.lucene.store.Directory; |
| @@ -43,6 +43,8 @@
|
| |
| public void testGlobalFieldNumberFiles() throws IOException { |
| int num = atLeast(3); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| for (int i = 0; i < num; i++) { |
| Directory dir = newDirectory(); |
| { |
| @@ -50,10 +52,8 @@
|
| new MockAnalyzer(random)); |
| IndexWriter writer = new IndexWriter(dir, config); |
| Document d = new Document(); |
| - d.add(new Field("f1", "d1 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f2", "d1 second field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| + d.add(new Field("f1", customType, "d1 first field")); |
| + d.add(new Field("f2", customType, "d1 second field")); |
| writer.addDocument(d); |
| for (String string : writer.getIndexFileNames()) { |
| assertFalse(string.endsWith(".fnx")); |
| @@ -67,9 +67,8 @@
|
| |
| assertFNXFiles(dir, "1.fnx"); |
| d = new Document(); |
| - d.add(new Field("f1", "d2 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f3", new byte[] { 1, 2, 3 })); |
| + d.add(new Field("f1", customType, "d2 first field")); |
| + d.add(new BinaryField("f3", new byte[] { 1, 2, 3 })); |
| writer.addDocument(d); |
| writer.commit(); |
| files = writer.getIndexFileNames(); |
| @@ -86,11 +85,9 @@
|
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( |
| TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| Document d = new Document(); |
| - d.add(new Field("f1", "d3 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f2", "d3 second field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f3", new byte[] { 1, 2, 3, 4, 5 })); |
| + d.add(new Field("f1", customType, "d3 first field")); |
| + d.add(new Field("f2", customType, "d3 second field")); |
| + d.add(new BinaryField("f3", new byte[] { 1, 2, 3, 4, 5 })); |
| writer.addDocument(d); |
| writer.close(); |
| Collection<String> files = writer.getIndexFileNames(); |
| @@ -115,6 +112,8 @@
|
| |
| public void testIndexReaderCommit() throws IOException { |
| int num = atLeast(3); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| for (int i = 0; i < num; i++) { |
| Directory dir = newDirectory(); |
| { |
| @@ -122,17 +121,14 @@
|
| new MockAnalyzer(random)); |
| IndexWriter writer = new IndexWriter(dir, config); |
| Document d = new Document(); |
| - d.add(new Field("f1", "d1 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f2", "d1 second field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| + d.add(new Field("f1", customType, "d1 first field")); |
| + d.add(new Field("f2", customType, "d1 second field")); |
| writer.addDocument(d); |
| writer.commit(); |
| assertFNXFiles(dir, "1.fnx"); |
| d = new Document(); |
| - d.add(new Field("f1", "d2 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f3", new byte[] { 1, 2, 3 })); |
| + d.add(new Field("f1", customType, "d2 first field")); |
| + d.add(new BinaryField("f3", new byte[] { 1, 2, 3 })); |
| writer.addDocument(d); |
| writer.commit(); |
| assertFNXFiles(dir, "2.fnx"); |
| @@ -159,6 +155,8 @@
|
| |
| public void testGlobalFieldNumberFilesAcrossCommits() throws IOException { |
| int num = atLeast(3); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| for (int i = 0; i < num; i++) { |
| Directory dir = newDirectory(); |
| { |
| @@ -166,17 +164,14 @@
|
| TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy( |
| new KeepAllDeletionPolicy())); |
| Document d = new Document(); |
| - d.add(new Field("f1", "d1 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f2", "d1 second field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| + d.add(new Field("f1", customType, "d1 first field")); |
| + d.add(new Field("f2", customType, "d1 second field")); |
| writer.addDocument(d); |
| writer.commit(); |
| assertFNXFiles(dir, "1.fnx"); |
| d = new Document(); |
| - d.add(new Field("f1", "d2 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f3", new byte[] { 1, 2, 3 })); |
| + d.add(new Field("f1", customType, "d2 first field")); |
| + d.add(new BinaryField("f3", new byte[] { 1, 2, 3 })); |
| writer.addDocument(d); |
| writer.commit(); |
| writer.commit(); |
| @@ -190,11 +185,9 @@
|
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( |
| TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| Document d = new Document(); |
| - d.add(new Field("f1", "d3 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f2", "d3 second field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f3", new byte[] { 1, 2, 3, 4, 5 })); |
| + d.add(new Field("f1", customType, "d3 first field")); |
| + d.add(new Field("f2", customType, "d3 second field")); |
| + d.add(new BinaryField("f3", new byte[] { 1, 2, 3, 4, 5 })); |
| writer.addDocument(d); |
| writer.close(); |
| assertFNXFiles(dir, "2.fnx"); |
| @@ -211,23 +204,22 @@
|
| |
| public void testGlobalFieldNumberOnOldCommit() throws IOException { |
| int num = atLeast(3); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| for (int i = 0; i < num; i++) { |
| Directory dir = newDirectory(); |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( |
| TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy( |
| new KeepAllDeletionPolicy())); |
| Document d = new Document(); |
| - d.add(new Field("f1", "d1 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f2", "d1 second field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| + d.add(new Field("f1", customType, "d1 first field")); |
| + d.add(new Field("f2", customType, "d1 second field")); |
| writer.addDocument(d); |
| writer.commit(); |
| assertFNXFiles(dir, "1.fnx"); |
| d = new Document(); |
| - d.add(new Field("f1", "d2 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f3", new byte[] { 1, 2, 3 })); |
| + d.add(new Field("f1", customType, "d2 first field")); |
| + d.add(new BinaryField("f3", new byte[] { 1, 2, 3 })); |
| writer.addDocument(d); |
| assertFNXFiles(dir, "1.fnx"); |
| writer.close(); |
| @@ -240,9 +232,8 @@
|
| new KeepAllDeletionPolicy()).setIndexCommit(listCommits.get(0))); |
| |
| d = new Document(); |
| - d.add(new Field("f1", "d2 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| - d.add(new Field("f3", new byte[] { 1, 2, 3 })); |
| + d.add(new Field("f1", customType, "d2 first field")); |
| + d.add(new BinaryField("f3", new byte[] { 1, 2, 3 })); |
| writer.addDocument(d); |
| writer.commit(); |
| // now we have 3 files since f3 is not present in the first commit |
| @@ -271,9 +262,13 @@
|
| Document doc = new Document(); |
| final int numFields = 1 + random.nextInt(fieldNames.length); |
| for (int j = 0; j < numFields; j++) { |
| + FieldType customType = new FieldType(); |
| + customType.setIndexed(true); |
| + customType.setTokenized(random.nextBoolean()); |
| + customType.setOmitNorms(random.nextBoolean()); |
| doc.add(newField(fieldNames[random.nextInt(fieldNames.length)], |
| _TestUtil.randomRealisticUnicodeString(random), |
| - Index.toIndex(true, random.nextBoolean(), random.nextBoolean()))); |
| + customType)); |
| |
| } |
| writer.addDocument(doc); |
| @@ -322,9 +317,13 @@
|
| TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| Document doc = new Document(); |
| for (String string : fieldNames) { |
| + FieldType customType = new FieldType(); |
| + customType.setIndexed(true); |
| + customType.setTokenized(random.nextBoolean()); |
| + customType.setOmitNorms(random.nextBoolean()); |
| doc.add(newField(string, |
| _TestUtil.randomRealisticUnicodeString(random), |
| - Index.toIndex(true, random.nextBoolean(), random.nextBoolean()))); |
| + customType)); |
| |
| } |
| writer.addDocument(doc); |
| @@ -419,8 +418,12 @@
|
| String name = copySortedMap.get(nextField); |
| assertNotNull(name); |
| |
| + FieldType customType = new FieldType(); |
| + customType.setIndexed(true); |
| + customType.setTokenized(random.nextBoolean()); |
| + customType.setOmitNorms(random.nextBoolean()); |
| doc.add(newField(name, _TestUtil.randomRealisticUnicodeString(random), |
| - Index.toIndex(true, random.nextBoolean(), random.nextBoolean()))); |
| + customType)); |
| writer.addDocument(doc); |
| if (random.nextInt(10) == 0) { |
| writer.commit(); |
| @@ -480,8 +483,9 @@
|
| } |
| |
| Document d = new Document(); |
| - d.add(new Field("f1", "d1 first field", Store.YES, Index.ANALYZED, |
| - TermVector.NO)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + d.add(new Field("f1", customType, "d1 first field")); |
| writer.addDocument(d); |
| writer.prepareCommit(); |
| // the fnx file should still be under control of the SIS |
| Index: lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (working copy)
|
| @@ -25,8 +25,9 @@
|
| import org.apache.lucene.store.IndexOutput; |
| import org.apache.lucene.store.MockDirectoryWrapper; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| |
| import java.io.*; |
| @@ -229,8 +230,11 @@
|
| private void addDoc(IndexWriter writer, int id) throws IOException |
| { |
| Document doc = new Document(); |
| - doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); |
| - doc.add(newField("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| + doc.add(newField("content", "aaa", TextField.DEFAULT_TYPE)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setIndexed(true); |
| + customType.setTokenized(false); |
| + doc.add(newField("id", Integer.toString(id), customType)); |
| writer.addDocument(doc); |
| } |
| } |
| Index: lucene/src/test/org/apache/lucene/index/TestIndexReader.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestIndexReader.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestIndexReader.java (working copy)
|
| @@ -31,8 +31,12 @@
|
| import java.util.SortedSet; |
| import org.junit.Assume; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.BinaryField; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.StringField; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.document.FieldSelector; |
| import org.apache.lucene.document.Fieldable; |
| import org.apache.lucene.document.SetBasedFieldSelector; |
| @@ -154,10 +158,20 @@
|
| ); |
| |
| Document doc = new Document(); |
| - doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO)); |
| - doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + |
| + FieldType customType3 = new FieldType(); |
| + customType3.setStored(true); |
| + |
| + doc.add(new Field("keyword",customType,"test1")); |
| + doc.add(new Field("text",customType2,"test1")); |
| + doc.add(new Field("unindexed",customType3,"test1")); |
| + doc.add(new TextField("unstored","test1")); |
| writer.addDocument(doc); |
| |
| writer.close(); |
| @@ -180,29 +194,49 @@
|
| int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor(); |
| for (int i = 0; i < 5*mergeFactor; i++) { |
| doc = new Document(); |
| - doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO)); |
| - doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(new Field("keyword",customType,"test1")); |
| + doc.add(new Field("text",customType2, "test1")); |
| + doc.add(new Field("unindexed",customType3,"test1")); |
| + doc.add(new TextField("unstored","test1")); |
| writer.addDocument(doc); |
| } |
| // new fields are in some different segments (we hope) |
| for (int i = 0; i < 5*mergeFactor; i++) { |
| doc = new Document(); |
| - doc.add(new Field("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - doc.add(new Field("text2","test1", Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(new Field("unindexed2","test1", Field.Store.YES, Field.Index.NO)); |
| - doc.add(new Field("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(new Field("keyword2",customType,"test1")); |
| + doc.add(new Field("text2",customType2, "test1")); |
| + doc.add(new Field("unindexed2",customType3,"test1")); |
| + doc.add(new TextField("unstored2","test1")); |
| writer.addDocument(doc); |
| } |
| // new termvector fields |
| + |
| + FieldType customType4 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType4.setStored(true); |
| + FieldType customType5 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType5.setStored(true); |
| + customType5.setStoreTermVectors(true); |
| + FieldType customType6 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType6.setStored(true); |
| + customType6.setStoreTermVectors(true); |
| + customType6.setStoreTermVectorOffsets(true); |
| + FieldType customType7 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType7.setStored(true); |
| + customType7.setStoreTermVectors(true); |
| + customType7.setStoreTermVectorPositions(true); |
| + FieldType customType8 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType8.setStored(true); |
| + customType8.setStoreTermVectors(true); |
| + customType8.setStoreTermVectorOffsets(true); |
| + customType8.setStoreTermVectorPositions(true); |
| + |
| for (int i = 0; i < 5*mergeFactor; i++) { |
| doc = new Document(); |
| - doc.add(new Field("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); |
| - doc.add(new Field("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES)); |
| - doc.add(new Field("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS)); |
| - doc.add(new Field("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); |
| - doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| + doc.add(new Field("tvnot",customType4,"tvnot")); |
| + doc.add(new Field("termvector",customType5,"termvector")); |
| + doc.add(new Field("tvoffset",customType6,"tvoffset")); |
| + doc.add(new Field("tvposition",customType7,"tvposition")); |
| + doc.add(new Field("tvpositionoffset",customType8, "tvpositionoffset")); |
| writer.addDocument(doc); |
| } |
| |
| @@ -277,14 +311,32 @@
|
| // want to get some more segments here |
| // new termvector fields |
| int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor(); |
| + FieldType customType4 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType4.setStored(true); |
| + FieldType customType5 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType5.setStored(true); |
| + customType5.setStoreTermVectors(true); |
| + FieldType customType6 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType6.setStored(true); |
| + customType6.setStoreTermVectors(true); |
| + customType6.setStoreTermVectorOffsets(true); |
| + FieldType customType7 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType7.setStored(true); |
| + customType7.setStoreTermVectors(true); |
| + customType7.setStoreTermVectorPositions(true); |
| + FieldType customType8 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType8.setStored(true); |
| + customType8.setStoreTermVectors(true); |
| + customType8.setStoreTermVectorOffsets(true); |
| + customType8.setStoreTermVectorPositions(true); |
| for (int i = 0; i < 5 * mergeFactor; i++) { |
| Document doc = new Document(); |
| - doc.add(new Field("tvnot","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); |
| - doc.add(new Field("termvector","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES)); |
| - doc.add(new Field("tvoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS)); |
| - doc.add(new Field("tvposition","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); |
| - doc.add(new Field("tvpositionoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| - |
| + doc.add(new Field("tvnot",customType4,"one two two three three three")); |
| + doc.add(new Field("termvector",customType5,"one two two three three three")); |
| + doc.add(new Field("tvoffset",customType6,"one two two three three three")); |
| + doc.add(new Field("tvposition",customType7,"one two two three three three")); |
| + doc.add(new Field("tvpositionoffset",customType8, "one two two three three three")); |
| + |
| writer.addDocument(doc); |
| } |
| writer.close(); |
| @@ -338,16 +390,16 @@
|
| writer.close(); |
| writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy())); |
| Document doc = new Document(); |
| - doc.add(new Field("bin1", bin)); |
| - doc.add(new Field("junk", "junk text", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(new BinaryField("bin1", bin)); |
| + doc.add(new TextField("junk", "junk text")); |
| writer.addDocument(doc); |
| writer.close(); |
| IndexReader reader = IndexReader.open(dir, false); |
| - doc = reader.document(reader.maxDoc() - 1); |
| - Field[] fields = doc.getFields("bin1"); |
| + org.apache.lucene.document.Document doc2 = reader.document(reader.maxDoc() - 1); |
| + org.apache.lucene.document.Field[] fields = doc2.getFields("bin1"); |
| assertNotNull(fields); |
| assertEquals(1, fields.length); |
| - Field b1 = fields[0]; |
| + org.apache.lucene.document.Field b1 = fields[0]; |
| assertTrue(b1.isBinary()); |
| BytesRef bytesRef = b1.binaryValue(null); |
| assertEquals(bin.length, bytesRef.length); |
| @@ -357,8 +409,8 @@
|
| Set<String> lazyFields = new HashSet<String>(); |
| lazyFields.add("bin1"); |
| FieldSelector sel = new SetBasedFieldSelector(new HashSet<String>(), lazyFields); |
| - doc = reader.document(reader.maxDoc() - 1, sel); |
| - Fieldable[] fieldables = doc.getFieldables("bin1"); |
| + doc2 = reader.document(reader.maxDoc() - 1, sel); |
| + Fieldable[] fieldables = doc2.getFieldables("bin1"); |
| assertNotNull(fieldables); |
| assertEquals(1, fieldables.length); |
| Fieldable fb1 = fieldables[0]; |
| @@ -377,8 +429,8 @@
|
| writer.optimize(); |
| writer.close(); |
| reader = IndexReader.open(dir, false); |
| - doc = reader.document(reader.maxDoc() - 1); |
| - fields = doc.getFields("bin1"); |
| + doc2 = reader.document(reader.maxDoc() - 1); |
| + fields = doc2.getFields("bin1"); |
| assertNotNull(fields); |
| assertEquals(1, fields.length); |
| b1 = fields[0]; |
| @@ -778,38 +830,76 @@
|
| static void addDocumentWithFields(IndexWriter writer) throws IOException |
| { |
| Document doc = new Document(); |
| - doc.add(newField("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - doc.add(newField("text","test1", Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(newField("unindexed","test1", Field.Store.YES, Field.Index.NO)); |
| - doc.add(newField("unstored","test1", Field.Store.NO, Field.Index.ANALYZED)); |
| + |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + |
| + FieldType customType3 = new FieldType(); |
| + customType3.setStored(true); |
| + doc.add(newField("keyword", "test1", customType)); |
| + doc.add(newField("text", "test1", customType2)); |
| + doc.add(newField("unindexed", "test1", customType3)); |
| + doc.add(new TextField("unstored","test1")); |
| writer.addDocument(doc); |
| } |
| |
| static void addDocumentWithDifferentFields(IndexWriter writer) throws IOException |
| { |
| - Document doc = new Document(); |
| - doc.add(newField("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - doc.add(newField("text2","test1", Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(newField("unindexed2","test1", Field.Store.YES, Field.Index.NO)); |
| - doc.add(newField("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED)); |
| - writer.addDocument(doc); |
| + Document doc = new Document(); |
| + |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + |
| + FieldType customType3 = new FieldType(); |
| + customType3.setStored(true); |
| + doc.add(newField("keyword2", "test1", customType)); |
| + doc.add(newField("text2", "test1", customType2)); |
| + doc.add(newField("unindexed2", "test1", customType3)); |
| + doc.add(new TextField("unstored2","test1")); |
| + writer.addDocument(doc); |
| } |
| |
| static void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException |
| { |
| Document doc = new Document(); |
| - doc.add(newField("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); |
| - doc.add(newField("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES)); |
| - doc.add(newField("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS)); |
| - doc.add(newField("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); |
| - doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| + FieldType customType4 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType4.setStored(true); |
| + FieldType customType5 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType5.setStored(true); |
| + customType5.setStoreTermVectors(true); |
| + FieldType customType6 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType6.setStored(true); |
| + customType6.setStoreTermVectors(true); |
| + customType6.setStoreTermVectorOffsets(true); |
| + FieldType customType7 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType7.setStored(true); |
| + customType7.setStoreTermVectors(true); |
| + customType7.setStoreTermVectorPositions(true); |
| + FieldType customType8 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType8.setStored(true); |
| + customType8.setStoreTermVectors(true); |
| + customType8.setStoreTermVectorOffsets(true); |
| + customType8.setStoreTermVectorPositions(true); |
| + doc.add(newField("tvnot","tvnot",customType4)); |
| + doc.add(newField("termvector","termvector",customType5)); |
| + doc.add(newField("tvoffset","tvoffset", customType6)); |
| + doc.add(newField("tvposition","tvposition", customType7)); |
| + doc.add(newField("tvpositionoffset","tvpositionoffset", customType8)); |
| |
| writer.addDocument(doc); |
| } |
| |
| static void addDoc(IndexWriter writer, String value) throws IOException { |
| Document doc = new Document(); |
| - doc.add(newField("content", value, Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("content", value, TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| } |
| |
| @@ -860,16 +950,16 @@
|
| // check stored fields |
| for (int i = 0; i < index1.maxDoc(); i++) { |
| if (delDocs1 == null || !delDocs1.get(i)) { |
| - Document doc1 = index1.document(i); |
| - Document doc2 = index2.document(i); |
| + org.apache.lucene.document.Document doc1 = index1.document(i); |
| + org.apache.lucene.document.Document doc2 = index2.document(i); |
| List<Fieldable> fieldable1 = doc1.getFields(); |
| List<Fieldable> fieldable2 = doc2.getFields(); |
| assertEquals("Different numbers of fields for doc " + i + ".", fieldable1.size(), fieldable2.size()); |
| Iterator<Fieldable> itField1 = fieldable1.iterator(); |
| Iterator<Fieldable> itField2 = fieldable2.iterator(); |
| while (itField1.hasNext()) { |
| - Field curField1 = (Field) itField1.next(); |
| - Field curField2 = (Field) itField2.next(); |
| + org.apache.lucene.document.Field curField1 = (org.apache.lucene.document.Field) itField1.next(); |
| + org.apache.lucene.document.Field curField2 = (org.apache.lucene.document.Field) itField2.next(); |
| assertEquals("Different fields names for doc " + i + ".", curField1.name(), curField2.name()); |
| assertEquals("Different field values for doc " + i + ".", curField1.stringValue(), curField2.stringValue()); |
| } |
| @@ -1047,7 +1137,12 @@
|
| |
| static Document createDocument(String id) { |
| Document doc = new Document(); |
| - doc.add(newField("id", id, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + customType.setOmitNorms(true); |
| + |
| + doc.add(newField("id", id, customType)); |
| return doc; |
| } |
| |
| @@ -1097,7 +1192,7 @@
|
| Directory dir = newDirectory(); |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| Document doc = new Document(); |
| - doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED)); |
| + doc.add(newField("number", "17", StringField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| writer.close(); |
| |
| @@ -1132,7 +1227,7 @@
|
| setMergePolicy(newLogMergePolicy(10)) |
| ); |
| Document doc = new Document(); |
| - doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED)); |
| + doc.add(newField("number", "17", StringField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| writer.commit(); |
| |
| @@ -1164,8 +1259,8 @@
|
| Directory dir = newDirectory(); |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodecProvider(_TestUtil.alwaysCodec("Standard"))); |
| Document doc = new Document(); |
| - doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED)); |
| - doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", TextField.DEFAULT_TYPE)); |
| + doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| writer.addDocument(doc); |
| writer.commit(); |
| @@ -1197,8 +1292,8 @@
|
| Directory dir = newDirectory(); |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodecProvider(_TestUtil.alwaysCodec("Standard"))); |
| Document doc = new Document(); |
| - doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED)); |
| - doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", TextField.DEFAULT_TYPE)); |
| + doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| writer.addDocument(doc); |
| writer.close(); |
| @@ -1302,7 +1397,7 @@
|
| Directory dir = newDirectory(); |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| Document d = new Document(); |
| - d.add(newField("f", "a a b", Field.Index.ANALYZED)); |
| + d.add(newField("f", "a a b", TextField.DEFAULT_TYPE)); |
| writer.addDocument(d); |
| IndexReader r = writer.getReader(); |
| writer.close(); |
| Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java (working copy)
|
| @@ -21,8 +21,9 @@
|
| import org.apache.lucene.search.DefaultSimilarity; |
| import org.apache.lucene.search.Similarity; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.store.LockObtainFailedException; |
| import org.apache.lucene.util.LuceneTestCase; |
| @@ -500,7 +501,9 @@
|
| setMergePolicy(newLogMergePolicy(false)) |
| ); |
| Document doc = new Document(); |
| - doc.add(newField("field", "yes it's stored", Field.Store.YES, Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(newField("field", "yes it's stored", customType)); |
| w.addDocument(doc); |
| w.close(); |
| IndexReader r1 = IndexReader.open(dir, false); |
| Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (working copy)
|
| @@ -24,10 +24,11 @@
|
| import java.util.concurrent.atomic.AtomicInteger; |
| import org.apache.lucene.analysis.Analyzer; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.Field.Index; |
| -import org.apache.lucene.document.Field.Store; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.StringField; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| import org.apache.lucene.index.SegmentNorms; |
| import org.apache.lucene.search.DefaultSimilarity; |
| @@ -329,8 +330,11 @@
|
| private Document newDoc() { |
| Document d = new Document(); |
| float boost = nextNorm("anyfield"); // in this test the same similarity is used for all fields so it does not matter what field is passed |
| + |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setTokenized(false); |
| for (int i = 0; i < 10; i++) { |
| - Field f = newField("f" + i, "v" + i, Store.NO, Index.NOT_ANALYZED); |
| + Field f = newField("f" + i, "v" + i, customType); |
| f.setBoost(boost); |
| d.add(f); |
| } |
| Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java (working copy)
|
| @@ -20,8 +20,9 @@
|
| import java.io.IOException; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| import org.apache.lucene.store.Directory; |
| import org.apache.lucene.store.MockDirectoryWrapper; |
| @@ -276,11 +277,13 @@
|
| Directory dir = newDirectory(); |
| RandomIndexWriter w= new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); |
| Document doc = new Document(); |
| - doc.add(newField("f", "doctor", Field.Store.NO, Field.Index.NOT_ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setTokenized(false); |
| + doc.add(newField("f", "doctor", customType)); |
| w.addDocument(doc); |
| doc = new Document(); |
| w.commit(); |
| - doc.add(newField("f", "who", Field.Store.NO, Field.Index.NOT_ANALYZED)); |
| + doc.add(newField("f", "who", customType)); |
| w.addDocument(doc); |
| IndexReader r = new SlowMultiReaderWrapper(w.getReader()); |
| w.close(); |
| Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java (working copy)
|
| @@ -20,8 +20,9 @@
|
| import java.io.IOException; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.search.DefaultSimilarity; |
| import org.apache.lucene.search.IndexSearcher; |
| import org.apache.lucene.search.ScoreDoc; |
| @@ -50,10 +51,13 @@
|
| System.out.println("TEST: create initial index"); |
| writer.setInfoStream(System.out); |
| } |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| for(int i=0;i<157;i++) { |
| Document d = new Document(); |
| - d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); |
| + d.add(newField("id", Integer.toString(i), customType)); |
| + d.add(newField("content", "aaa " + i, TextField.DEFAULT_TYPE)); |
| writer.addDocument(d); |
| if (0==i%10) |
| writer.commit(); |
| Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (working copy)
|
| @@ -30,10 +30,11 @@
|
| import java.util.Set; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field.Index; |
| -import org.apache.lucene.document.Field.Store; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.StringField; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| import org.apache.lucene.search.DefaultSimilarity; |
| import org.apache.lucene.search.FieldCache; |
| @@ -168,17 +169,26 @@
|
| IndexReader reader = IndexReader.open(dir, false); |
| try { |
| int M = 3; |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + customType2.setTokenized(false); |
| + customType2.setOmitNorms(true); |
| + FieldType customType3 = new FieldType(); |
| + customType3.setStored(true); |
| for (int i=0; i<4; i++) { |
| for (int j=0; j<M; j++) { |
| Document doc = new Document(); |
| - doc.add(newField("id", i+"_"+j, Store.YES, Index.NOT_ANALYZED)); |
| - doc.add(newField("id2", i+"_"+j, Store.YES, Index.NOT_ANALYZED_NO_NORMS)); |
| - doc.add(newField("id3", i+"_"+j, Store.YES, Index.NO)); |
| + doc.add(newField("id", i+"_"+j, customType)); |
| + doc.add(newField("id2", i+"_"+j, customType2)); |
| + doc.add(newField("id3", i+"_"+j, customType3)); |
| iwriter.addDocument(doc); |
| if (i>0) { |
| int k = i-1; |
| int n = j + k*M; |
| - Document prevItereationDoc = reader.document(n); |
| + org.apache.lucene.document.Document prevItereationDoc = reader.document(n); |
| assertNotNull(prevItereationDoc); |
| String id = prevItereationDoc.get("id"); |
| assertEquals(k+"_"+j, id); |
| @@ -956,13 +966,21 @@
|
| Document doc = new Document(); |
| sb.append("a"); |
| sb.append(n); |
| - doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED)); |
| - doc.add(new Field("fielda", sb.toString(), Store.YES, Index.NOT_ANALYZED_NO_NORMS)); |
| - doc.add(new Field("fieldb", sb.toString(), Store.YES, Index.NO)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + customType2.setTokenized(false); |
| + customType2.setOmitNorms(true); |
| + FieldType customType3 = new FieldType(); |
| + customType3.setStored(true); |
| + doc.add(new Field("field1", customType, sb.toString())); |
| + doc.add(new Field("fielda", customType2, sb.toString())); |
| + doc.add(new Field("fieldb", customType3, sb.toString())); |
| sb.append(" b"); |
| sb.append(n); |
| for (int i = 1; i < numFields; i++) { |
| - doc.add(new Field("field" + (i+1), sb.toString(), Store.YES, Index.ANALYZED)); |
| + doc.add(new Field("field" + (i+1), customType, sb.toString())); |
| } |
| return doc; |
| } |
| @@ -1177,7 +1195,7 @@
|
| ); |
| for(int i=0;i<4;i++) { |
| Document doc = new Document(); |
| - doc.add(newField("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED)); |
| + doc.add(newField("id", ""+i, StringField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| Map<String,String> data = new HashMap<String,String>(); |
| data.put("index", i+""); |
| @@ -1238,7 +1256,7 @@
|
| setMergePolicy(newLogMergePolicy(10)) |
| ); |
| Document doc = new Document(); |
| - doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED)); |
| + doc.add(newField("number", "17", StringField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| writer.commit(); |
| |
| Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy)
|
| @@ -42,12 +42,12 @@
|
| import org.apache.lucene.analysis.Tokenizer; |
| import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; |
| import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field.Index; |
| -import org.apache.lucene.document.Field.Store; |
| -import org.apache.lucene.document.Field.TermVector; |
| -import org.apache.lucene.document.Field; |
| import org.apache.lucene.document.Fieldable; |
| +import org.apache.lucene.document2.BinaryField; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexWriterConfig.OpenMode; |
| import org.apache.lucene.search.DocIdSetIterator; |
| import org.apache.lucene.search.FieldCache; |
| @@ -77,6 +77,7 @@
|
| |
| public class TestIndexWriter extends LuceneTestCase { |
| |
| + private static final FieldType storedTextType = new FieldType(TextField.DEFAULT_TYPE); |
| public void testDocCount() throws IOException { |
| Directory dir = newDirectory(); |
| |
| @@ -137,15 +138,15 @@
|
| static void addDoc(IndexWriter writer) throws IOException |
| { |
| Document doc = new Document(); |
| - doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("content", "aaa", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| } |
| |
| static void addDocWithIndex(IndexWriter writer, int index) throws IOException |
| { |
| Document doc = new Document(); |
| - doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(newField("content", "aaa " + index, storedTextType)); |
| + doc.add(newField("id", "" + index, storedTextType)); |
| writer.addDocument(doc); |
| } |
| |
| @@ -255,12 +256,12 @@
|
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10)); |
| for(int j=0;j<100;j++) { |
| Document doc = new Document(); |
| - doc.add(newField("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(newField("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(newField("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(newField("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(newField("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(newField("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(newField("a"+j, "aaa" + j, storedTextType)); |
| + doc.add(newField("b"+j, "aaa" + j, storedTextType)); |
| + doc.add(newField("c"+j, "aaa" + j, storedTextType)); |
| + doc.add(newField("d"+j, "aaa", storedTextType)); |
| + doc.add(newField("e"+j, "aaa", storedTextType)); |
| + doc.add(newField("f"+j, "aaa", storedTextType)); |
| writer.addDocument(doc); |
| } |
| writer.close(); |
| @@ -291,7 +292,7 @@
|
| int lastNumFile = dir.listAll().length; |
| for(int j=0;j<9;j++) { |
| Document doc = new Document(); |
| - doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(newField("field", "aaa" + j, storedTextType)); |
| writer.addDocument(doc); |
| int numFile = dir.listAll().length; |
| // Verify that with a tiny RAM buffer we see new |
| @@ -314,7 +315,7 @@
|
| int lastFlushCount = -1; |
| for(int j=1;j<52;j++) { |
| Document doc = new Document(); |
| - doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(new Field("field", storedTextType, "aaa" + j)); |
| writer.addDocument(doc); |
| _TestUtil.syncConcurrentMerges(writer); |
| int flushCount = writer.getFlushCount(); |
| @@ -368,7 +369,7 @@
|
| |
| for(int j=1;j<52;j++) { |
| Document doc = new Document(); |
| - doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(new Field("field", storedTextType, "aaa" + j)); |
| writer.addDocument(doc); |
| } |
| |
| @@ -429,7 +430,7 @@
|
| for(int j=0;j<100;j++) { |
| Document doc = new Document(); |
| for(int k=0;k<100;k++) { |
| - doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(newField("field", Integer.toString(random.nextInt()), storedTextType)); |
| } |
| writer.addDocument(doc); |
| } |
| @@ -438,7 +439,7 @@
|
| // occurs (heavy on byte blocks) |
| for(int j=0;j<100;j++) { |
| Document doc = new Document(); |
| - doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", storedTextType)); |
| writer.addDocument(doc); |
| } |
| |
| @@ -453,7 +454,7 @@
|
| String longTerm = b.toString(); |
| |
| Document doc = new Document(); |
| - doc.add(newField("field", longTerm, Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(newField("field", longTerm, storedTextType)); |
| writer.addDocument(doc); |
| } |
| } |
| @@ -471,12 +472,18 @@
|
| MockDirectoryWrapper dir = newDirectory(); |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10)); |
| // Enable norms for only 1 doc, pre flush |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setOmitNorms(true); |
| for(int j=0;j<10;j++) { |
| Document doc = new Document(); |
| - Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED); |
| + Field f = null; |
| if (j != 8) { |
| - f.setOmitNorms(true); |
| + f = newField("field", "aaa", customType); |
| } |
| + else { |
| + f = newField("field", "aaa", storedTextType); |
| + } |
| doc.add(f); |
| writer.addDocument(doc); |
| } |
| @@ -494,10 +501,13 @@
|
| // Enable norms for only 1 doc, post flush |
| for(int j=0;j<27;j++) { |
| Document doc = new Document(); |
| - Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED); |
| + Field f = null; |
| if (j != 26) { |
| - f.setOmitNorms(true); |
| + f = newField("field", "aaa", customType); |
| } |
| + else { |
| + f = newField("field", "aaa", storedTextType); |
| + } |
| doc.add(f); |
| writer.addDocument(doc); |
| } |
| @@ -526,7 +536,12 @@
|
| b.append(" a a a a a a a a"); |
| } |
| Document doc = new Document(); |
| - doc.add(newField("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setStoreTermVectors(true); |
| + customType.setStoreTermVectorPositions(true); |
| + customType.setStoreTermVectorOffsets(true); |
| + doc.add(newField("field", b.toString(), customType)); |
| writer.addDocument(doc); |
| writer.close(); |
| |
| @@ -594,7 +609,12 @@
|
| setMergePolicy(newLogMergePolicy(10)) |
| ); |
| Document doc = new Document(); |
| - doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setStoreTermVectors(true); |
| + customType.setStoreTermVectorPositions(true); |
| + customType.setStoreTermVectorOffsets(true); |
| + doc.add(newField("field", "aaa", customType)); |
| for(int i=0;i<19;i++) |
| writer.addDocument(doc); |
| writer.flush(false, true); |
| @@ -614,7 +634,12 @@
|
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| writer.setInfoStream(VERBOSE ? System.out : null); |
| Document doc = new Document(); |
| - doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setStoreTermVectors(true); |
| + customType.setStoreTermVectorPositions(true); |
| + customType.setStoreTermVectorOffsets(true); |
| + doc.add(newField("field", "aaa", customType)); |
| writer.addDocument(doc); |
| writer.commit(); |
| if (VERBOSE) { |
| @@ -643,7 +668,9 @@
|
| TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| |
| Document document = new Document(); |
| - document.add(newField("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStoreTermVectors(true); |
| + document.add(newField("tvtest", "", customType)); |
| iw.addDocument(document); |
| iw.close(); |
| dir.close(); |
| @@ -660,8 +687,9 @@
|
| ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2); |
| IndexWriter iw = new IndexWriter(dir, conf); |
| Document document = new Document(); |
| - document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, |
| - Field.TermVector.YES)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStoreTermVectors(true); |
| + document.add(newField("tvtest", "a b c", customType)); |
| Thread.currentThread().setPriority(Thread.MAX_PRIORITY); |
| for(int i=0;i<4;i++) |
| iw.addDocument(document); |
| @@ -687,24 +715,21 @@
|
| Document doc = new Document(); |
| String contents = "aa bb cc dd ee ff gg hh ii jj kk"; |
| |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + FieldType type = null; |
| if (i == 7) { |
| // Add empty docs here |
| - doc.add(newField("content3", "", Field.Store.NO, |
| - Field.Index.ANALYZED)); |
| + doc.add(newField("content3", "", TextField.DEFAULT_TYPE)); |
| } else { |
| - Field.Store storeVal; |
| if (i%2 == 0) { |
| - doc.add(newField("content4", contents, Field.Store.YES, |
| - Field.Index.ANALYZED)); |
| - storeVal = Field.Store.YES; |
| + doc.add(newField("content4", contents, customType)); |
| + type = customType; |
| } else |
| - storeVal = Field.Store.NO; |
| - doc.add(newField("content1", contents, storeVal, |
| - Field.Index.ANALYZED)); |
| - doc.add(newField("content3", "", Field.Store.YES, |
| - Field.Index.ANALYZED)); |
| - doc.add(newField("content5", "", storeVal, |
| - Field.Index.ANALYZED)); |
| + type = TextField.DEFAULT_TYPE; |
| + doc.add(newField("content1", contents, TextField.DEFAULT_TYPE)); |
| + doc.add(newField("content3", "", customType)); |
| + doc.add(newField("content5", "", type)); |
| } |
| |
| for(int j=0;j<4;j++) |
| @@ -730,7 +755,11 @@
|
| Directory directory = newDirectory(); |
| |
| final Document doc = new Document(); |
| - Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + |
| + Field idField = newField("id", "", customType); |
| doc.add(idField); |
| |
| for(int pass=0;pass<2;pass++) { |
| @@ -834,7 +863,7 @@
|
| for(int i=0;i<10000;i++) |
| b.append(" a"); |
| b.append(" x"); |
| - doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("field", b.toString(), TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| writer.close(); |
| |
| @@ -852,7 +881,7 @@
|
| Directory dir = newDirectory(); |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| Document doc = new Document(); |
| - doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("", "a b c", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| writer.close(); |
| dir.close(); |
| @@ -886,8 +915,9 @@
|
| Directory dir = newDirectory(); |
| MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| Document doc = new Document(); |
| - doc.add(newField("field", "a field", Field.Store.YES, |
| - Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(newField("field", "a field", customType)); |
| w.addDocument(doc); |
| w.commit(); |
| assertTrue(w.beforeWasCalled); |
| @@ -930,7 +960,7 @@
|
| Directory dir = newDirectory(); |
| IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| Document doc = new Document(); |
| - doc.add(new Field("field", tokens)); |
| + doc.add(new TextField("field", tokens)); |
| w.addDocument(doc); |
| w.commit(); |
| |
| @@ -971,7 +1001,7 @@
|
| b[i] = (byte) (i+77); |
| |
| Document doc = new Document(); |
| - Field f = new Field("binary", b, 10, 17); |
| + Field f = new BinaryField("binary", b, 10, 17); |
| byte[] bx = f.binaryValue(null).bytes; |
| assertTrue(bx != null); |
| assertEquals(50, bx.length); |
| @@ -982,9 +1012,9 @@
|
| w.close(); |
| |
| IndexReader ir = IndexReader.open(dir, true); |
| - doc = ir.document(0); |
| - f = doc.getField("binary"); |
| - b = f.binaryValue(null).bytes; |
| + org.apache.lucene.document.Document doc2 = ir.document(0); |
| + org.apache.lucene.document.Field f2 = doc2.getField("binary"); |
| + b = f2.binaryValue(null).bytes; |
| assertTrue(b != null); |
| assertEquals(17, b.length, 17); |
| assertEquals(87, b[0]); |
| @@ -1000,10 +1030,11 @@
|
| IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( |
| TEST_VERSION_CURRENT, analyzer)); |
| Document doc = new Document(); |
| - Field f = newField("field", "", Field.Store.NO, |
| - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS); |
| - Field f2 = newField("field", "crunch man", Field.Store.NO, |
| - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStoreTermVectors(true); |
| + customType.setStoreTermVectorPositions(true); |
| + Field f = newField("field", "", customType); |
| + Field f2 = newField("field", "crunch man", customType); |
| doc.add(f); |
| doc.add(f2); |
| w.addDocument(doc); |
| @@ -1045,8 +1076,14 @@
|
| Directory dir = newDirectory(); |
| IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); |
| Document doc = new Document(); |
| - doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, |
| - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); |
| + |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setStoreTermVectors(true); |
| + customType.setStoreTermVectorPositions(true); |
| + customType.setStoreTermVectorOffsets(true); |
| + |
| + doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType)); |
| writer.addDocument(doc); |
| writer.addDocument(doc); |
| writer.addDocument(doc); |
| @@ -1098,7 +1135,7 @@
|
| w = new IndexWriter(dir, conf); |
| |
| Document doc = new Document(); |
| - doc.add(newField("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(newField("field", "some text contents", storedTextType)); |
| for(int i=0;i<100;i++) { |
| w.addDocument(doc); |
| if (i%10 == 0) { |
| @@ -1212,9 +1249,18 @@
|
| b[i] = (byte) (i+77); |
| |
| Document doc = new Document(); |
| - Field f = new Field("binary", b, 10, 17); |
| + |
| + FieldType customType = new FieldType(BinaryField.DEFAULT_TYPE); |
| + customType.setTokenized(true); |
| + customType.setIndexed(true); |
| + |
| + Field f = new Field("binary", customType, b, 10, 17); |
| f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false)); |
| - Field f2 = newField("string", "value", Field.Store.YES,Field.Index.ANALYZED); |
| + |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + |
| + Field f2 = newField("string", "value", customType2); |
| f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false)); |
| doc.add(f); |
| doc.add(f2); |
| @@ -1237,9 +1283,9 @@
|
| w.close(); |
| |
| IndexReader ir = IndexReader.open(dir, true); |
| - doc = ir.document(0); |
| - f = doc.getField("binary"); |
| - b = f.binaryValue(null).bytes; |
| + org.apache.lucene.document.Document doc2 = ir.document(0); |
| + org.apache.lucene.document.Field f3 = doc2.getField("binary"); |
| + b = f3.binaryValue(null).bytes; |
| assertTrue(b != null); |
| assertEquals(17, b.length, 17); |
| assertEquals(87, b[0]); |
| @@ -1271,25 +1317,28 @@
|
| Directory d = newDirectory(); |
| IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); |
| Document doc = new Document(); |
| - doc.add(newField("zzz", "a b c", Field.Store.YES, Field.Index.NO)); |
| - doc.add(newField("aaa", "a b c", Field.Store.YES, Field.Index.NO)); |
| - doc.add(newField("zzz", "1 2 3", Field.Store.YES, Field.Index.NO)); |
| + |
| + FieldType customType = new FieldType(); |
| + customType.setStored(true); |
| + doc.add(newField("zzz", "a b c", customType)); |
| + doc.add(newField("aaa", "a b c", customType)); |
| + doc.add(newField("zzz", "1 2 3", customType)); |
| w.addDocument(doc); |
| IndexReader r = w.getReader(); |
| - doc = r.document(0); |
| - Iterator<Fieldable> it = doc.getFields().iterator(); |
| + org.apache.lucene.document.Document doc2 = r.document(0); |
| + Iterator<Fieldable> it = doc2.getFields().iterator(); |
| assertTrue(it.hasNext()); |
| - Field f = (Field) it.next(); |
| + org.apache.lucene.document.Field f = (org.apache.lucene.document.Field) it.next(); |
| assertEquals(f.name(), "zzz"); |
| assertEquals(f.stringValue(), "a b c"); |
| |
| assertTrue(it.hasNext()); |
| - f = (Field) it.next(); |
| + f = (org.apache.lucene.document.Field) it.next(); |
| assertEquals(f.name(), "aaa"); |
| assertEquals(f.stringValue(), "a b c"); |
| |
| assertTrue(it.hasNext()); |
| - f = (Field) it.next(); |
| + f = (org.apache.lucene.document.Field) it.next(); |
| assertEquals(f.name(), "zzz"); |
| assertEquals(f.stringValue(), "1 2 3"); |
| assertFalse(it.hasNext()); |
| @@ -1321,7 +1370,7 @@
|
| s.append(' ').append(i); |
| } |
| Document d = new Document(); |
| - Field f = newField("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED); |
| + Field f = newField("field", s.toString(), TextField.DEFAULT_TYPE); |
| d.add(f); |
| w.addDocument(d); |
| |
| @@ -1353,7 +1402,7 @@
|
| setMergePolicy(mergePolicy) |
| ); |
| Document doc = new Document(); |
| - doc.add(newField("field", "go", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("field", "go", TextField.DEFAULT_TYPE)); |
| w.addDocument(doc); |
| IndexReader r; |
| if (iter == 0) { |
| @@ -1416,7 +1465,14 @@
|
| |
| // First commit |
| Document doc = new Document(); |
| - doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); |
| + |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setStoreTermVectors(true); |
| + customType.setStoreTermVectorPositions(true); |
| + customType.setStoreTermVectorOffsets(true); |
| + |
| + doc.add(newField("c", "val", customType)); |
| writer.addDocument(doc); |
| writer.commit(); |
| assertEquals(1, IndexReader.listCommits(dir).size()); |
| @@ -1426,7 +1482,7 @@
|
| |
| // Second commit - now KeepOnlyLastCommit cannot delete the prev commit. |
| doc = new Document(); |
| - doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); |
| + doc.add(newField("c", "val", customType)); |
| writer.addDocument(doc); |
| writer.commit(); |
| assertEquals(2, IndexReader.listCommits(dir).size()); |
| @@ -1473,14 +1529,19 @@
|
| } |
| |
| Document doc = new Document(); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setStoreTermVectors(true); |
| + customType.setStoreTermVectorPositions(true); |
| + customType.setStoreTermVectorOffsets(true); |
| // create as many files as possible |
| - doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); |
| + doc.add(newField("c", "val", customType)); |
| writer.addDocument(doc); |
| // Adding just one document does not call flush yet. |
| assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length); |
| |
| doc = new Document(); |
| - doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); |
| + doc.add(newField("c", "val", customType)); |
| writer.addDocument(doc); |
| |
| // The second document should cause a flush. |
| @@ -1503,7 +1564,12 @@
|
| TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); |
| |
| Document doc = new Document(); |
| - doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setStoreTermVectors(true); |
| + customType.setStoreTermVectorPositions(true); |
| + customType.setStoreTermVectorOffsets(true); |
| + doc.add(newField("c", "val", customType)); |
| w.addDocument(doc); |
| w.addDocument(doc); |
| IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig( |
| @@ -1530,7 +1596,10 @@
|
| |
| final List<Integer> fieldIDs = new ArrayList<Integer>(); |
| |
| - Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + Field idField = newField("id", "", customType); |
| |
| for(int i=0;i<fieldCount;i++) { |
| fieldIDs.add(i); |
| @@ -1542,6 +1611,8 @@
|
| System.out.println("TEST: build index docCount=" + docCount); |
| } |
| |
| + FieldType customType2 = new FieldType(); |
| + customType2.setStored(true); |
| for(int i=0;i<docCount;i++) { |
| Document doc = new Document(); |
| doc.add(idField); |
| @@ -1556,7 +1627,7 @@
|
| final String s; |
| if (rand.nextInt(4) != 3) { |
| s = _TestUtil.randomUnicodeString(rand, 1000); |
| - doc.add(newField("f"+field, s, Field.Store.YES, Field.Index.NO)); |
| + doc.add(newField("f"+field, s, customType2)); |
| } else { |
| s = null; |
| } |
| @@ -1598,7 +1669,7 @@
|
| } |
| TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1); |
| assertEquals(1, hits.totalHits); |
| - Document doc = r.document(hits.scoreDocs[0].doc); |
| + org.apache.lucene.document.Document doc = r.document(hits.scoreDocs[0].doc); |
| Document docExp = docs.get(testID); |
| for(int i=0;i<fieldCount;i++) { |
| assertEquals("doc " + testID + ", field f" + fieldCount + " is wrong", docExp.get("f"+i), doc.get("f"+i)); |
| @@ -1622,12 +1693,23 @@
|
| String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg"; |
| BIG=BIG+BIG+BIG+BIG; |
| |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setOmitNorms(true); |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setStored(true); |
| + customType2.setTokenized(false); |
| + FieldType customType3 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType3.setStored(true); |
| + customType3.setTokenized(false); |
| + customType3.setOmitNorms(true); |
| + |
| for (int i=0; i<2; i++) { |
| Document doc = new Document(); |
| - doc.add(new Field("id", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); |
| - doc.add(new Field("str", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.NOT_ANALYZED)); |
| - doc.add(new Field("str2", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(new Field("str3", Integer.toString(i)+BIG, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS)); |
| + doc.add(new Field("id", customType3, Integer.toString(i)+BIG)); |
| + doc.add(new Field("str", customType2, Integer.toString(i)+BIG)); |
| + doc.add(new Field("str2", storedTextType, Integer.toString(i)+BIG)); |
| + doc.add(new Field("str3", customType, Integer.toString(i)+BIG)); |
| indexWriter.addDocument(doc); |
| } |
| |
| @@ -1701,12 +1783,12 @@
|
| |
| // This contents produces a too-long term: |
| String contents = "abc xyz x" + bigTerm + " another term"; |
| - doc.add(new Field("content", contents, Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(new TextField("content", contents)); |
| w.addDocument(doc); |
| |
| // Make sure we can add another normal document |
| doc = new Document(); |
| - doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(new TextField("content", "abc bbb ccc")); |
| w.addDocument(doc); |
| |
| IndexReader reader = w.getReader(); |
| @@ -1736,7 +1818,9 @@
|
| // Make sure we can add a document with exactly the |
| // maximum length term, and search on that term: |
| doc = new Document(); |
| - Field contentField = new Field("content", "", Field.Store.NO, Field.Index.NOT_ANALYZED); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setTokenized(false); |
| + Field contentField = new Field("content", customType, ""); |
| doc.add(contentField); |
| |
| w = new RandomIndexWriter(random, dir); |
| @@ -1773,7 +1857,7 @@
|
| iwc.setReaderTermsIndexDivisor(1); |
| IndexWriter writer = new IndexWriter(dir, iwc); |
| Document doc = new Document(); |
| - doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED)); |
| + doc.add(newField("", "a b c", TextField.DEFAULT_TYPE)); |
| writer.addDocument(doc); |
| writer.close(); |
| dir.close(); |
| Index: lucene/src/test/org/apache/lucene/index/TestSegmentReader.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/TestSegmentReader.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/TestSegmentReader.java (working copy)
|
| @@ -25,8 +25,8 @@
|
| import org.apache.lucene.util.LuceneTestCase; |
| import org.apache.lucene.util.BytesRef; |
| |
| -import org.apache.lucene.document.Document; |
| import org.apache.lucene.document.Fieldable; |
| +import org.apache.lucene.document2.Document; |
| import org.apache.lucene.store.Directory; |
| |
| public class TestSegmentReader extends LuceneTestCase { |
| @@ -61,10 +61,10 @@
|
| public void testDocument() throws IOException { |
| assertTrue(reader.numDocs() == 1); |
| assertTrue(reader.maxDoc() >= 1); |
| - Document result = reader.document(0); |
| + org.apache.lucene.document.Document result = reader.document(0); |
| assertTrue(result != null); |
| //There are 2 unstored fields on the document that are not preserved across writing |
| - assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size()); |
| + assertTrue(DocHelper.numFields2(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size()); |
| |
| List<Fieldable> fields = result.getFields(); |
| for (final Fieldable field : fields ) { |
| @@ -174,9 +174,9 @@
|
| public static void checkNorms(IndexReader reader) throws IOException { |
| // test omit norms |
| for (int i=0; i<DocHelper.fields.length; i++) { |
| - Fieldable f = DocHelper.fields[i]; |
| - if (f.isIndexed()) { |
| - assertEquals(reader.hasNorms(f.name()), !f.getOmitNorms()); |
| + IndexableField f = DocHelper.fields[i]; |
| + if (f.indexed()) { |
| + assertEquals(reader.hasNorms(f.name()), !f.omitNorms()); |
| assertEquals(reader.hasNorms(f.name()), !DocHelper.noNorms.containsKey(f.name())); |
| if (!reader.hasNorms(f.name())) { |
| // test for norms of null |
| Index: lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java (working copy)
|
| @@ -18,7 +18,7 @@
|
| */ |
| |
| import org.apache.lucene.store.*; |
| -import org.apache.lucene.document.*; |
| +import org.apache.lucene.document2.*; |
| import org.apache.lucene.analysis.*; |
| import org.apache.lucene.index.*; |
| import org.apache.lucene.index.codecs.preflexrw.PreFlexRWCodec; |
| @@ -296,7 +296,9 @@
|
| uniqueTerms.add(term); |
| fieldTerms.add(new Term(field, term)); |
| Document doc = new Document(); |
| - doc.add(newField(field, term, Field.Store.NO, Field.Index.NOT_ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setTokenized(false); |
| + doc.add(newField(field, term, customType)); |
| w.addDocument(doc); |
| } |
| uniqueTermCount += uniqueTerms.size(); |
| Index: lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/search/cache/TestEntryCreators.java (working copy)
|
| @@ -23,8 +23,9 @@
|
| import java.util.Set; |
| |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.RandomIndexWriter; |
| import org.apache.lucene.search.FieldCache.*; |
| @@ -85,8 +86,9 @@
|
| for( NumberTypeTester tester : typeTests ) { |
| if (random.nextInt(20) != 17 && i > 1) { |
| tester.values[i] = 10 + random.nextInt( 20 ); // get some field overlap |
| - doc.add(newField(tester.field, String.valueOf(tester.values[i]), |
| - Field.Store.NO, Field.Index.NOT_ANALYZED )); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setTokenized(false); |
| + doc.add(newField(tester.field, String.valueOf(tester.values[i]), customType)); |
| } |
| } |
| writer.addDocument(doc); |
| Index: lucene/src/test/org/apache/lucene/search/function/FunctionTestSetup.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/search/function/FunctionTestSetup.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/search/function/FunctionTestSetup.java (working copy)
|
| @@ -19,9 +19,10 @@
|
| |
| import org.apache.lucene.analysis.Analyzer; |
| import org.apache.lucene.analysis.MockAnalyzer; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| -import org.apache.lucene.document.Fieldable; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.Field; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.RandomIndexWriter; |
| import org.apache.lucene.index.IndexWriterConfig; |
| import org.apache.lucene.store.Directory; |
| @@ -116,23 +117,26 @@
|
| |
| private static void addDoc(RandomIndexWriter iw, int i) throws Exception { |
| Document d = new Document(); |
| - Fieldable f; |
| + Field f; |
| int scoreAndID = i + 1; |
| |
| - f = newField(ID_FIELD, id2String(scoreAndID), Field.Store.YES, Field.Index.NOT_ANALYZED); // for debug purposes |
| - f.setOmitNorms(true); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + customType.setTokenized(false); |
| + customType.setOmitNorms(true); |
| + |
| + f = newField(ID_FIELD, id2String(scoreAndID), customType); // for debug purposes |
| d.add(f); |
| |
| - f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), Field.Store.NO, Field.Index.ANALYZED); // for regular search |
| - f.setOmitNorms(true); |
| + FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); |
| + customType2.setOmitNorms(true); |
| + f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), customType2); // for regular search |
| d.add(f); |
| |
| - f = newField(INT_FIELD, "" + scoreAndID, Field.Store.NO, Field.Index.NOT_ANALYZED); // for function scoring |
| - f.setOmitNorms(true); |
| + f = newField(INT_FIELD, "" + scoreAndID, customType); // for function scoring |
| d.add(f); |
| |
| - f = newField(FLOAT_FIELD, scoreAndID + ".000", Field.Store.NO, Field.Index.NOT_ANALYZED); // for function scoring |
| - f.setOmitNorms(true); |
| + f = newField(FLOAT_FIELD, scoreAndID + ".000", customType); // for function scoring |
| d.add(f); |
| |
| iw.addDocument(d); |
| Index: lucene/src/test/org/apache/lucene/search/function/TestValueSource.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/search/function/TestValueSource.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/search/function/TestValueSource.java (working copy)
|
| @@ -23,7 +23,7 @@
|
| import org.apache.lucene.analysis.*; |
| import org.apache.lucene.index.*; |
| import org.apache.lucene.index.IndexReader.AtomicReaderContext; |
| -import org.apache.lucene.document.*; |
| +import org.apache.lucene.document2.*; |
| |
| public class TestValueSource extends LuceneTestCase { |
| |
| @@ -32,7 +32,9 @@
|
| IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); |
| ((LogMergePolicy) w.getConfig().getMergePolicy()).setMergeFactor(10); |
| Document doc = new Document(); |
| - Field f = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setTokenized(false); |
| + Field f = newField("field", "", customType); |
| doc.add(f); |
| |
| for(int i=0;i<17;i++) { |
| Index: lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (working copy)
|
| @@ -24,8 +24,9 @@
|
| import org.apache.lucene.analysis.TokenFilter; |
| import org.apache.lucene.analysis.TokenStream; |
| import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.FieldInvertState; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.Payload; |
| @@ -112,9 +113,11 @@
|
| //writer.infoStream = System.out; |
| for (int i = 0; i < 1000; i++) { |
| Document doc = new Document(); |
| - doc.add(newField("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(newField("field", English.intToEnglish(i), customType)); |
| String txt = English.intToEnglish(i) +' '+English.intToEnglish(i+1); |
| - doc.add(newField("field2", txt, Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(newField("field2", txt, customType)); |
| writer.addDocument(doc); |
| } |
| reader = writer.getReader(); |
| Index: lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (working copy)
|
| @@ -43,8 +43,10 @@
|
| import org.apache.lucene.index.RandomIndexWriter; |
| import org.apache.lucene.index.Term; |
| import org.apache.lucene.store.Directory; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| +import org.apache.lucene.document2.Field; |
| |
| import java.io.Reader; |
| import java.io.IOException; |
| @@ -115,13 +117,15 @@
|
| newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()) |
| .setSimilarityProvider(similarityProvider).setMergePolicy(newLogMergePolicy())); |
| //writer.infoStream = System.out; |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| for (int i = 0; i < 1000; i++) { |
| Document doc = new Document(); |
| - Field noPayloadField = newField(PayloadHelper.NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED); |
| + Field noPayloadField = newField(PayloadHelper.NO_PAYLOAD_FIELD, English.intToEnglish(i), customType); |
| //noPayloadField.setBoost(0); |
| doc.add(noPayloadField); |
| - doc.add(newField("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); |
| - doc.add(newField("multiField", English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); |
| + doc.add(newField("field", English.intToEnglish(i), customType)); |
| + doc.add(newField("multiField", English.intToEnglish(i) + " " + English.intToEnglish(i), customType)); |
| writer.addDocument(doc); |
| } |
| reader = writer.getReader(); |
| Index: lucene/src/test/org/apache/lucene/search/spans/TestBasics.java
|
| ===================================================================
|
| --- lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (revision 1143083)
|
| +++ lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (working copy)
|
| @@ -31,8 +31,9 @@
|
| import org.apache.lucene.analysis.TokenStream; |
| import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; |
| import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; |
| -import org.apache.lucene.document.Document; |
| -import org.apache.lucene.document.Field; |
| +import org.apache.lucene.document2.Document; |
| +import org.apache.lucene.document2.FieldType; |
| +import org.apache.lucene.document2.TextField; |
| import org.apache.lucene.index.IndexReader; |
| import org.apache.lucene.index.Payload; |
| import org.apache.lucene.index.RandomIndexWriter; |
| @@ -120,7 +121,9 @@
|
| //writer.infoStream = System.out; |
| for (int i = 0; i < 2000; i++) { |
| Document doc = new Document(); |
| - doc.add(newField("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); |
| + FieldType customType = new FieldType(TextField.DEFAULT_TYPE); |
| + customType.setStored(true); |
| + doc.add(newField("field", English.intToEnglish(i), customType)); |
| writer.addDocument(doc); |
| } |
| reader = writer.getReader(); |