blob: 342760a9e5a8bf8448845b285402cfb59c8b0ef1 [file] [log] [blame]
Index: CHANGES.txt
===================================================================
--- CHANGES.txt (revision 908485)
+++ CHANGES.txt (working copy)
@@ -233,6 +233,10 @@
* LUCENE-2207, LUCENE-2219: Improve BaseTokenStreamTestCase to check if
end() is implemented correctly. (Koji Sekiguchi, Robert Muir)
+* LUCENE-2248, LUCENE-2251: Refactor tests to not use Version.LUCENE_CURRENT,
+ but instead use a global static value from LuceneTestCase(J4), that
+ contains the release version. (Uwe Schindler, Simon Willnauer)
+
======================= Release 3.0.0 2009-11-25 =======================
Changes in backwards compatibility policy
Index: src/test/org/apache/lucene/analysis/TestAnalyzers.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestAnalyzers.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestAnalyzers.java (working copy)
@@ -26,7 +26,6 @@
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.Payload;
-import org.apache.lucene.util.Version;
public class TestAnalyzers extends BaseTokenStreamTestCase {
@@ -35,7 +34,7 @@
}
public void testSimple() throws Exception {
- Analyzer a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "foo bar FOO BAR",
new String[] { "foo", "bar", "foo", "bar" });
assertAnalyzesTo(a, "foo bar . FOO <> BAR",
@@ -55,7 +54,7 @@
}
public void testNull() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "foo bar FOO BAR",
new String[] { "foo", "bar", "FOO", "BAR" });
assertAnalyzesTo(a, "foo bar . FOO <> BAR",
@@ -75,7 +74,7 @@
}
public void testStop() throws Exception {
- Analyzer a = new StopAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new StopAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "foo bar FOO BAR",
new String[] { "foo", "bar", "foo", "bar" });
assertAnalyzesTo(a, "foo a bar such FOO THESE BAR",
@@ -97,11 +96,11 @@
public void testPayloadCopy() throws IOException {
String s = "how now brown cow";
TokenStream ts;
- ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(s));
+ ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(s));
ts = new PayloadSetter(ts);
verifyPayload(ts);
- ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(s));
+ ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(s));
ts = new PayloadSetter(ts);
verifyPayload(ts);
}
@@ -122,12 +121,12 @@
private static class MyStandardAnalyzer extends StandardAnalyzer {
public MyStandardAnalyzer() {
- super(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ super(TEST_VERSION_CURRENT);
}
@Override
public TokenStream tokenStream(String field, Reader reader) {
- return new WhitespaceAnalyzer(Version.LUCENE_CURRENT).tokenStream(field, reader);
+ return new WhitespaceAnalyzer(TEST_VERSION_CURRENT).tokenStream(field, reader);
}
}
@@ -144,8 +143,8 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- return new LowerCaseFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader));
+ return new LowerCaseFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
}
}
@@ -192,9 +191,9 @@
public void testLowerCaseFilterLowSurrogateLeftover() throws IOException {
// test if the limit of the termbuffer is correctly used with supplementary
// chars
- WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader("BogustermBogusterm\udc16"));
- LowerCaseFilter filter = new LowerCaseFilter(Version.LUCENE_CURRENT,
+ LowerCaseFilter filter = new LowerCaseFilter(TEST_VERSION_CURRENT,
tokenizer);
assertTokenStreamContents(filter, new String[] {"bogustermbogusterm\udc16"});
filter.reset();
Index: src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java (working copy)
@@ -18,8 +18,6 @@
*/
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
-
import java.io.StringReader;
import java.util.List;
import java.util.ArrayList;
@@ -29,7 +27,7 @@
// testLain1Accents() is a copy of TestLatin1AccentFilter.testU().
public void testLatin1Accents() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader
("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ"
+" Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij"
+" ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
@@ -1890,7 +1888,7 @@
expectedOutputTokens.add(expected.toString());
}
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(inputText.toString()));
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(inputText.toString()));
ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
Iterator<String> expectedIter = expectedOutputTokens.iterator();
Index: src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (working copy)
@@ -31,14 +31,13 @@
import org.apache.lucene.index.TermPositions;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
public void testCaching() throws IOException {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
TokenStream stream = new TokenStream() {
private int index = 0;
Index: src/test/org/apache/lucene/analysis/TestCharArrayMap.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestCharArrayMap.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestCharArrayMap.java (working copy)
@@ -19,13 +19,12 @@
import java.util.*;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
public class TestCharArrayMap extends LuceneTestCase {
Random r = newRandom();
public void doRandom(int iter, boolean ignoreCase) {
- CharArrayMap<Integer> map = new CharArrayMap<Integer>(Version.LUCENE_CURRENT, 1, ignoreCase);
+ CharArrayMap<Integer> map = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 1, ignoreCase);
HashMap<String,Integer> hmap = new HashMap<String,Integer>();
char[] key;
@@ -63,7 +62,7 @@
}
public void testMethods() {
- CharArrayMap<Integer> cm = new CharArrayMap<Integer>(Version.LUCENE_CURRENT, 2, false);
+ CharArrayMap<Integer> cm = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 2, false);
HashMap<String,Integer> hm = new HashMap<String,Integer>();
hm.put("foo",1);
hm.put("bar",2);
@@ -131,7 +130,7 @@
}
public void testModifyOnUnmodifiable(){
- CharArrayMap<Integer> map = new CharArrayMap<Integer>(Version.LUCENE_CURRENT, 2, false);
+ CharArrayMap<Integer> map = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 2, false);
map.put("foo",1);
map.put("bar",2);
final int size = map.size();
@@ -228,7 +227,7 @@
}
public void testToString() {
- CharArrayMap<Integer> cm = new CharArrayMap<Integer>(Version.LUCENE_CURRENT, Collections.singletonMap("test",1), false);
+ CharArrayMap<Integer> cm = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, Collections.singletonMap("test",1), false);
assertEquals("[test]",cm.keySet().toString());
assertEquals("[1]",cm.values().toString());
assertEquals("[test=1]",cm.entrySet().toString());
Index: src/test/org/apache/lucene/analysis/TestCharArraySet.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestCharArraySet.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestCharArraySet.java (working copy)
@@ -41,7 +41,7 @@
public void testRehash() throws Exception {
- CharArraySet cas = new CharArraySet(Version.LUCENE_CURRENT, 0, true);
+ CharArraySet cas = new CharArraySet(TEST_VERSION_CURRENT, 0, true);
for(int i=0;i<TEST_STOP_WORDS.length;i++)
cas.add(TEST_STOP_WORDS[i]);
assertEquals(TEST_STOP_WORDS.length, cas.size());
@@ -52,7 +52,7 @@
public void testNonZeroOffset() {
String[] words={"Hello","World","this","is","a","test"};
char[] findme="xthisy".toCharArray();
- CharArraySet set=new CharArraySet(Version.LUCENE_CURRENT, 10,true);
+ CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10,true);
set.addAll(Arrays.asList(words));
assertTrue(set.contains(findme, 1, 4));
assertTrue(set.contains(new String(findme,1,4)));
@@ -64,7 +64,7 @@
}
public void testObjectContains() {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 10, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
Integer val = Integer.valueOf(1);
set.add(val);
assertTrue(set.contains(val));
@@ -80,7 +80,7 @@
}
public void testClear(){
- CharArraySet set=new CharArraySet(Version.LUCENE_CURRENT, 10,true);
+ CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10,true);
set.addAll(Arrays.asList(TEST_STOP_WORDS));
assertEquals("Not all words added", TEST_STOP_WORDS.length, set.size());
set.clear();
@@ -94,7 +94,7 @@
}
public void testModifyOnUnmodifiable(){
- CharArraySet set=new CharArraySet(Version.LUCENE_CURRENT, 10, true);
+ CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10, true);
set.addAll(Arrays.asList(TEST_STOP_WORDS));
final int size = set.size();
set = CharArraySet.unmodifiableSet(set);
@@ -150,7 +150,7 @@
// current key (now a char[]) on a Set<String> would not hit any element of the CAS and therefor never call
// remove() on the iterator
try{
- set.removeAll(new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList(TEST_STOP_WORDS), true));
+ set.removeAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true));
fail("Modified unmodifiable set");
}catch (UnsupportedOperationException e) {
// expected
@@ -158,7 +158,7 @@
}
try{
- set.retainAll(new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList(NOT_IN_SET), true));
+ set.retainAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(NOT_IN_SET), true));
fail("Modified unmodifiable set");
}catch (UnsupportedOperationException e) {
// expected
@@ -179,7 +179,7 @@
}
public void testUnmodifiableSet(){
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 10,true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10,true);
set.addAll(Arrays.asList(TEST_STOP_WORDS));
set.add(Integer.valueOf(1));
final int size = set.size();
@@ -209,7 +209,7 @@
"\ud801\udc1c\ud801\udc1cCDE", "A\ud801\udc1cB"};
String[] lowerArr = new String[] {"abc\ud801\udc44",
"\ud801\udc44\ud801\udc44cde", "a\ud801\udc44b"};
- CharArraySet set = new CharArraySet(Version.LUCENE_31, Arrays.asList(TEST_STOP_WORDS), true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true);
for (String upper : upperArr) {
set.add(upper);
}
@@ -217,7 +217,7 @@
assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
assertTrue(String.format(missing, lowerArr[i]), set.contains(lowerArr[i]));
}
- set = new CharArraySet(Version.LUCENE_31, Arrays.asList(TEST_STOP_WORDS), false);
+ set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), false);
for (String upper : upperArr) {
set.add(upper);
}
@@ -235,7 +235,7 @@
String[] lowerArr = new String[] { "abc\uD800", "abc\uD800efg",
"\uD800efg", "\uD800\ud801\udc44b" };
- CharArraySet set = new CharArraySet(Version.LUCENE_31, Arrays
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays
.asList(TEST_STOP_WORDS), true);
for (String upper : upperArr) {
set.add(upper);
@@ -244,7 +244,7 @@
assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
assertTrue(String.format(missing, lowerArr[i]), set.contains(lowerArr[i]));
}
- set = new CharArraySet(Version.LUCENE_31, Arrays.asList(TEST_STOP_WORDS),
+ set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS),
false);
for (String upper : upperArr) {
set.add(upper);
@@ -328,8 +328,8 @@
}
public void testCopyCharArraySetBWCompat() {
- CharArraySet setIngoreCase = new CharArraySet(Version.LUCENE_CURRENT, 10, true);
- CharArraySet setCaseSensitive = new CharArraySet(Version.LUCENE_CURRENT, 10, false);
+ CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+ CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
List<String> stopwordsUpper = new ArrayList<String>();
@@ -375,8 +375,8 @@
* Test the static #copy() function with a CharArraySet as a source
*/
public void testCopyCharArraySet() {
- CharArraySet setIngoreCase = new CharArraySet(Version.LUCENE_CURRENT, 10, true);
- CharArraySet setCaseSensitive = new CharArraySet(Version.LUCENE_CURRENT, 10, false);
+ CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
+ CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
List<String> stopwordsUpper = new ArrayList<String>();
@@ -388,8 +388,8 @@
setCaseSensitive.addAll(Arrays.asList(TEST_STOP_WORDS));
setCaseSensitive.add(Integer.valueOf(1));
- CharArraySet copy = CharArraySet.copy(Version.LUCENE_CURRENT, setIngoreCase);
- CharArraySet copyCaseSens = CharArraySet.copy(Version.LUCENE_CURRENT, setCaseSensitive);
+ CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, setIngoreCase);
+ CharArraySet copyCaseSens = CharArraySet.copy(TEST_VERSION_CURRENT, setCaseSensitive);
assertEquals(setIngoreCase.size(), copy.size());
assertEquals(setCaseSensitive.size(), copy.size());
@@ -431,7 +431,7 @@
}
set.addAll(Arrays.asList(TEST_STOP_WORDS));
- CharArraySet copy = CharArraySet.copy(Version.LUCENE_CURRENT, set);
+ CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, set);
assertEquals(set.size(), copy.size());
assertEquals(set.size(), copy.size());
@@ -461,7 +461,7 @@
*/
public void testCopyEmptySet() {
assertSame(CharArraySet.EMPTY_SET,
- CharArraySet.copy(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET));
+ CharArraySet.copy(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET));
}
/**
@@ -483,7 +483,7 @@
* Test for NPE
*/
public void testContainsWithNull() {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
try {
set.contains((char[]) null, 0, 10);
fail("null value must raise NPE");
@@ -506,7 +506,7 @@
assertTrue("in 3.0 version, iterator should be CharArraySetIterator",
((Iterator) CharArraySet.copy(Version.LUCENE_30, hset).iterator()) instanceof CharArraySet.CharArraySetIterator);
- CharArraySet set = CharArraySet.copy(Version.LUCENE_CURRENT, hset);
+ CharArraySet set = CharArraySet.copy(TEST_VERSION_CURRENT, hset);
assertFalse("in current version, iterator should not be CharArraySetIterator",
((Iterator) set.iterator()) instanceof CharArraySet.CharArraySetIterator);
@@ -525,7 +525,7 @@
}
public void testToString() {
- CharArraySet set = CharArraySet.copy(Version.LUCENE_CURRENT, Collections.singleton("test"));
+ CharArraySet set = CharArraySet.copy(TEST_VERSION_CURRENT, Collections.singleton("test"));
assertEquals("[test]", set.toString());
set.add("test2");
assertTrue(set.toString().contains(", "));
Index: src/test/org/apache/lucene/analysis/TestCharTokenizers.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestCharTokenizers.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestCharTokenizers.java (working copy)
@@ -46,7 +46,7 @@
// internal buffer size is 1024 make sure we have a surrogate pair right at the border
builder.insert(1023, "\ud801\udc1c");
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
- Version.LUCENE_CURRENT, new StringReader(builder.toString()));
+ TEST_VERSION_CURRENT, new StringReader(builder.toString()));
assertTokenStreamContents(tokenizer, builder.toString().toLowerCase().split(" "));
}
@@ -64,7 +64,7 @@
}
builder.append("\ud801\udc1cabc");
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
- Version.LUCENE_CURRENT, new StringReader(builder.toString()));
+ TEST_VERSION_CURRENT, new StringReader(builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase()});
}
}
@@ -79,7 +79,7 @@
builder.append("A");
}
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
- Version.LUCENE_CURRENT, new StringReader(builder.toString() + builder.toString()));
+ TEST_VERSION_CURRENT, new StringReader(builder.toString() + builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()});
}
@@ -94,13 +94,13 @@
}
builder.append("\ud801\udc1c");
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
- Version.LUCENE_CURRENT, new StringReader(builder.toString() + builder.toString()));
+ TEST_VERSION_CURRENT, new StringReader(builder.toString() + builder.toString()));
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()});
}
public void testLowerCaseTokenizer() throws IOException {
StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
- LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(Version.LUCENE_CURRENT,
+ LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT,
reader);
assertTokenStreamContents(tokenizer, new String[] { "tokenizer",
"\ud801\udc44test" });
@@ -115,7 +115,7 @@
public void testWhitespaceTokenizer() throws IOException {
StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
- WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
reader);
assertTokenStreamContents(tokenizer, new String[] { "Tokenizer",
"\ud801\udc1ctest" });
@@ -132,7 +132,7 @@
public void testIsTokenCharCharInSubclass() {
new TestingCharTokenizer(Version.LUCENE_30, new StringReader(""));
try {
- new TestingCharTokenizer(Version.LUCENE_CURRENT, new StringReader(""));
+ new TestingCharTokenizer(TEST_VERSION_CURRENT, new StringReader(""));
fail("version 3.1 is not permitted if char based method is implemented");
} catch (IllegalArgumentException e) {
// expected
@@ -142,7 +142,7 @@
public void testNormalizeCharInSubclass() {
new TestingCharTokenizerNormalize(Version.LUCENE_30, new StringReader(""));
try {
- new TestingCharTokenizerNormalize(Version.LUCENE_CURRENT,
+ new TestingCharTokenizerNormalize(TEST_VERSION_CURRENT,
new StringReader(""));
fail("version 3.1 is not permitted if char based method is implemented");
} catch (IllegalArgumentException e) {
@@ -154,7 +154,7 @@
new TestingCharTokenizerNormalizeIsTokenChar(Version.LUCENE_30,
new StringReader(""));
try {
- new TestingCharTokenizerNormalizeIsTokenChar(Version.LUCENE_CURRENT,
+ new TestingCharTokenizerNormalizeIsTokenChar(TEST_VERSION_CURRENT,
new StringReader(""));
fail("version 3.1 is not permitted if char based method is implemented");
} catch (IllegalArgumentException e) {
Index: src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestISOLatin1AccentFilter.java (working copy)
@@ -18,13 +18,11 @@
*/
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
-
import java.io.StringReader;
public class TestISOLatin1AccentFilter extends BaseTokenStreamTestCase {
public void testU() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream);
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
assertTermEquals("Des", filter, termAtt);
Index: src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (working copy)
@@ -31,7 +31,6 @@
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
@@ -43,7 +42,7 @@
super.setUp();
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory,
- new SimpleAnalyzer(Version.LUCENE_CURRENT),
+ new SimpleAnalyzer(TEST_VERSION_CURRENT),
true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
@@ -57,10 +56,10 @@
}
public void testPerFieldAnalyzer() throws Exception {
- PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer(Version.LUCENE_CURRENT));
+ PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer(TEST_VERSION_CURRENT));
analyzer.addAnalyzer("partnum", new KeywordAnalyzer());
- QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, "description", analyzer);
+ QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, "description", analyzer);
Query query = queryParser.parse("partnum:Q36 AND SPACE");
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
Index: src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestKeywordMarkerTokenFilter.java (working copy)
@@ -7,7 +7,6 @@
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
import org.junit.Test;
/**
@@ -34,21 +33,21 @@
@Test
public void testIncrementToken() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_31, 5, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 5, true);
set.add("lucenefox");
String[] output = new String[] { "the", "quick", "brown", "LuceneFox",
"jumps" };
assertTokenStreamContents(new LowerCaseFilterMock(
- new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"The quIck browN LuceneFox Jumps")), set)), output);
Set<String> jdkSet = new HashSet<String>();
jdkSet.add("LuceneFox");
assertTokenStreamContents(new LowerCaseFilterMock(
- new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"The quIck browN LuceneFox Jumps")), jdkSet)), output);
Set<?> set2 = set;
assertTokenStreamContents(new LowerCaseFilterMock(
- new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"The quIck browN LuceneFox Jumps")), set2)), output);
}
Index: src/test/org/apache/lucene/analysis/TestLengthFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestLengthFilter.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestLengthFilter.java (working copy)
@@ -18,14 +18,12 @@
*/
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
-
import java.io.StringReader;
public class TestLengthFilter extends BaseTokenStreamTestCase {
public void testFilter() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader("short toolong evenmuchlongertext a ab toolong foo"));
LengthFilter filter = new LengthFilter(stream, 2, 6);
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
Index: src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestMappingCharFilter.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestMappingCharFilter.java (working copy)
@@ -19,8 +19,6 @@
import java.io.StringReader;
-import org.apache.lucene.util.Version;
-
public class TestMappingCharFilter extends BaseTokenStreamTestCase {
NormalizeCharMap normMap;
@@ -60,55 +58,55 @@
public void testNothingChange() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "x" ) );
- TokenStream ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"x"}, new int[]{0}, new int[]{1});
}
public void test1to1() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "h" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"i"}, new int[]{0}, new int[]{1});
}
public void test1to2() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "j" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"jj"}, new int[]{0}, new int[]{1});
}
public void test1to3() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "k" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"kkk"}, new int[]{0}, new int[]{1});
}
public void test2to4() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "ll" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"llll"}, new int[]{0}, new int[]{2});
}
public void test2to1() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "aa" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"a"}, new int[]{0}, new int[]{2});
}
public void test3to1() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "bbb" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"b"}, new int[]{0}, new int[]{3});
}
public void test4to2() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "cccc" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[]{"cc"}, new int[]{0}, new int[]{4});
}
public void test5to0() throws Exception {
CharStream cs = new MappingCharFilter( normMap, new StringReader( "empty" ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts, new String[0]);
}
@@ -132,7 +130,7 @@
//
public void testTokenStream() throws Exception {
CharStream cs = new MappingCharFilter( normMap, CharReader.get( new StringReader( "h i j k ll cccc bbb aa" ) ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts,
new String[]{"i","i","jj","kkk","llll","cc","b","a"},
new int[]{0,2,4,6,8,11,16,20},
@@ -153,7 +151,7 @@
public void testChained() throws Exception {
CharStream cs = new MappingCharFilter( normMap,
new MappingCharFilter( normMap, CharReader.get( new StringReader( "aaaa ll h" ) ) ) );
- TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
+ TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
assertTokenStreamContents(ts,
new String[]{"a","llllllll","i"},
new int[]{0,5,8},
Index: src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java (working copy)
@@ -3,7 +3,6 @@
import java.io.StringReader;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -26,8 +25,8 @@
public void testPerField() throws Exception {
String text = "Qwerty";
PerFieldAnalyzerWrapper analyzer =
- new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
- analyzer.addAnalyzer("special", new SimpleAnalyzer(Version.LUCENE_CURRENT));
+ new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
+ analyzer.addAnalyzer("special", new SimpleAnalyzer(TEST_VERSION_CURRENT));
TokenStream tokenStream = analyzer.tokenStream("field",
new StringReader(text));
Index: src/test/org/apache/lucene/analysis/TestPorterStemFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestPorterStemFilter.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestPorterStemFilter.java (working copy)
@@ -25,8 +25,6 @@
import java.io.StringReader;
import java.util.zip.ZipFile;
-import org.apache.lucene.util.Version;
-
/**
* Test the PorterStemFilter with Martin Porter's test data.
*/
@@ -60,9 +58,9 @@
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("yourselves");
- Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("yourselves yours"));
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("yourselves yours"));
TokenStream filter = new PorterStemFilter(new KeywordMarkerTokenFilter(tokenizer, set));
assertTokenStreamContents(filter, new String[] {"yourselves", "your"});
}
Index: src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java (working copy)
@@ -23,16 +23,16 @@
public class TestStandardAnalyzer extends BaseTokenStreamTestCase {
- private Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ private Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
public void testMaxTermLength() throws Exception {
- StandardAnalyzer sa = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer sa = new StandardAnalyzer(TEST_VERSION_CURRENT);
sa.setMaxTokenLength(5);
assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "xy", "z"});
}
public void testMaxTermLength2() throws Exception {
- StandardAnalyzer sa = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer sa = new StandardAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "toolong", "xy", "z"});
sa.setMaxTokenLength(5);
@@ -96,7 +96,7 @@
public void testLucene1140() throws Exception {
try {
- StandardAnalyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(analyzer, "www.nutch.org.", new String[]{ "www.nutch.org" }, new String[] { "<HOST>" });
} catch (NullPointerException e) {
fail("Should not throw an NPE and it did");
@@ -106,7 +106,7 @@
public void testDomainNames() throws Exception {
// Current lucene should not show the bug
- StandardAnalyzer a2 = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer a2 = new StandardAnalyzer(TEST_VERSION_CURRENT);
// domain names
assertAnalyzesTo(a2, "www.nutch.org", new String[]{"www.nutch.org"});
Index: src/test/org/apache/lucene/analysis/TestStopAnalyzer.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestStopAnalyzer.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestStopAnalyzer.java (working copy)
@@ -29,7 +29,7 @@
public class TestStopAnalyzer extends BaseTokenStreamTestCase {
- private StopAnalyzer stop = new StopAnalyzer(Version.LUCENE_CURRENT);
+ private StopAnalyzer stop = new StopAnalyzer(TEST_VERSION_CURRENT);
private Set<Object> inValidTokens = new HashSet<Object>();
public TestStopAnalyzer(String s) {
@@ -82,7 +82,7 @@
stopWordsSet.add("good");
stopWordsSet.add("test");
stopWordsSet.add("analyzer");
- StopAnalyzer newStop = new StopAnalyzer(Version.LUCENE_CURRENT, stopWordsSet);
+ StopAnalyzer newStop = new StopAnalyzer(TEST_VERSION_CURRENT, stopWordsSet);
StringReader reader = new StringReader("This is a good test of the english stop analyzer with positions");
int expectedIncr[] = { 1, 1, 1, 3, 1, 1, 1, 2, 1};
TokenStream stream = newStop.tokenStream("test", reader);
Index: src/test/org/apache/lucene/analysis/TestStopFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestStopFilter.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestStopFilter.java (working copy)
@@ -38,7 +38,7 @@
public void testExactCase() throws IOException {
StringReader reader = new StringReader("Now is The Time");
Set<String> stopWords = new HashSet<String>(Arrays.asList("is", "the", "Time"));
- TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopWords, false);
+ TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopWords, false);
final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
assertTrue(stream.incrementToken());
assertEquals("Now", termAtt.term());
@@ -50,7 +50,7 @@
public void testIgnoreCase() throws IOException {
StringReader reader = new StringReader("Now is The Time");
Set<Object> stopWords = new HashSet<Object>(Arrays.asList( "is", "the", "Time" ));
- TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopWords, true);
+ TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopWords, true);
final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
assertTrue(stream.incrementToken());
assertEquals("Now", termAtt.term());
@@ -60,8 +60,8 @@
public void testStopFilt() throws IOException {
StringReader reader = new StringReader("Now is The Time");
String[] stopWords = new String[] { "is", "the", "Time" };
- Set<Object> stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
- TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet);
+ Set<Object> stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords);
+ TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet);
final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
assertTrue(stream.incrementToken());
assertEquals("Now", termAtt.term());
@@ -84,14 +84,14 @@
log(sb.toString());
String stopWords[] = a.toArray(new String[0]);
for (int i=0; i<a.size(); i++) log("Stop: "+stopWords[i]);
- Set<Object> stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
+ Set<Object> stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords);
// with increments
StringReader reader = new StringReader(sb.toString());
- StopFilter stpf = new StopFilter(Version.LUCENE_24, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet);
+ StopFilter stpf = new StopFilter(Version.LUCENE_24, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet);
doTestStopPositons(stpf,true);
// without increments
reader = new StringReader(sb.toString());
- stpf = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet);
+ stpf = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet);
doTestStopPositons(stpf,false);
// with increments, concatenating two stop filters
ArrayList<String> a0 = new ArrayList<String>();
@@ -107,12 +107,12 @@
for (int i=0; i<a0.size(); i++) log("Stop0: "+stopWords0[i]);
String stopWords1[] = a1.toArray(new String[0]);
for (int i=0; i<a1.size(); i++) log("Stop1: "+stopWords1[i]);
- Set<Object> stopSet0 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords0);
- Set<Object> stopSet1 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords1);
+ Set<Object> stopSet0 = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords0);
+ Set<Object> stopSet1 = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords1);
reader = new StringReader(sb.toString());
- StopFilter stpf0 = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet0); // first part of the set
+ StopFilter stpf0 = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet0); // first part of the set
stpf0.setEnablePositionIncrements(true);
- StopFilter stpf01 = new StopFilter(Version.LUCENE_CURRENT, stpf0, stopSet1); // two stop filters concatenated!
+ StopFilter stpf01 = new StopFilter(TEST_VERSION_CURRENT, stpf0, stopSet1); // two stop filters concatenated!
doTestStopPositons(stpf01,true);
}
Index: src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java (revision 908485)
+++ src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java (working copy)
@@ -22,8 +22,6 @@
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.English;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
import java.io.StringReader;
@@ -76,7 +74,7 @@
public void testGeneral() throws IOException {
- final TeeSinkTokenFilter source = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer1.toString())));
+ final TeeSinkTokenFilter source = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer1.toString())));
final TokenStream sink1 = source.newSinkTokenStream();
final TokenStream sink2 = source.newSinkTokenStream(theFilter);
@@ -90,7 +88,7 @@
}
public void testMultipleSources() throws Exception {
- final TeeSinkTokenFilter tee1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer1.toString())));
+ final TeeSinkTokenFilter tee1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer1.toString())));
final TeeSinkTokenFilter.SinkTokenStream dogDetector = tee1.newSinkTokenStream(dogFilter);
final TeeSinkTokenFilter.SinkTokenStream theDetector = tee1.newSinkTokenStream(theFilter);
final TokenStream source1 = new CachingTokenFilter(tee1);
@@ -99,7 +97,7 @@
dogDetector.addAttribute(CheckClearAttributesAttribute.class);
theDetector.addAttribute(CheckClearAttributesAttribute.class);
- final TeeSinkTokenFilter tee2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer2.toString())));
+ final TeeSinkTokenFilter tee2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer2.toString())));
tee2.addSinkTokenStream(dogDetector);
tee2.addSinkTokenStream(theDetector);
final TokenStream source2 = tee2;
@@ -111,7 +109,7 @@
assertTokenStreamContents(dogDetector, new String[]{"Dogs", "Dogs"});
source1.reset();
- TokenStream lowerCasing = new LowerCaseFilter(Version.LUCENE_CURRENT, source1);
+ TokenStream lowerCasing = new LowerCaseFilter(TEST_VERSION_CURRENT, source1);
String[] lowerCaseTokens = new String[tokens1.length];
for (int i = 0; i < tokens1.length; i++)
lowerCaseTokens[i] = tokens1[i].toLowerCase();
@@ -133,10 +131,10 @@
buffer.append(English.intToEnglish(i).toUpperCase()).append(' ');
}
//make sure we produce the same tokens
- TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))));
+ TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))));
TokenStream sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(100));
teeStream.consumeAllTokens();
- TokenStream stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))), 100);
+ TokenStream stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))), 100);
TermAttribute tfTok = stream.addAttribute(TermAttribute.class);
TermAttribute sinkTok = sink.addAttribute(TermAttribute.class);
for (int i=0; stream.incrementToken(); i++) {
@@ -149,12 +147,12 @@
int tfPos = 0;
long start = System.currentTimeMillis();
for (int i = 0; i < 20; i++) {
- stream = new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString())));
+ stream = new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString())));
PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
while (stream.incrementToken()) {
tfPos += posIncrAtt.getPositionIncrement();
}
- stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))), modCounts[j]);
+ stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))), modCounts[j]);
posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
while (stream.incrementToken()) {
tfPos += posIncrAtt.getPositionIncrement();
@@ -166,7 +164,7 @@
//simulate one field with one sink
start = System.currentTimeMillis();
for (int i = 0; i < 20; i++) {
- teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))));
+ teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))));
sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(modCounts[j]));
PositionIncrementAttribute posIncrAtt = teeStream.getAttribute(PositionIncrementAttribute.class);
while (teeStream.incrementToken()) {
Index: src/test/org/apache/lucene/collation/CollationTestBase.java
===================================================================
--- src/test/org/apache/lucene/collation/CollationTestBase.java (revision 908485)
+++ src/test/org/apache/lucene/collation/CollationTestBase.java (working copy)
@@ -18,7 +18,6 @@
*/
-import junit.framework.TestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
@@ -38,14 +37,14 @@
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Document;
import org.apache.lucene.util.IndexableBinaryStringTools;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
import java.nio.CharBuffer;
import java.nio.ByteBuffer;
-public class CollationTestBase extends TestCase {
+public class CollationTestBase extends LuceneTestCase {
protected String firstRangeBeginningOriginal = "\u062F";
protected String firstRangeEndOriginal = "\u0698";
@@ -179,7 +178,7 @@
String usResult) throws Exception {
RAMDirectory indexStore = new RAMDirectory();
PerFieldAnalyzerWrapper analyzer
- = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
analyzer.addAnalyzer("US", usAnalyzer);
analyzer.addAnalyzer("France", franceAnalyzer);
analyzer.addAnalyzer("Sweden", swedenAnalyzer);
Index: src/test/org/apache/lucene/document/TestBinaryDocument.java
===================================================================
--- src/test/org/apache/lucene/document/TestBinaryDocument.java (revision 908485)
+++ src/test/org/apache/lucene/document/TestBinaryDocument.java (working copy)
@@ -59,7 +59,7 @@
/** add the doc to a ram index */
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(doc);
writer.close();
@@ -97,7 +97,7 @@
/** add the doc to a ram index */
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(doc);
writer.close();
Index: src/test/org/apache/lucene/document/TestDocument.java
===================================================================
--- src/test/org/apache/lucene/document/TestDocument.java (revision 908485)
+++ src/test/org/apache/lucene/document/TestDocument.java (working copy)
@@ -154,7 +154,7 @@
public void testGetValuesForIndexedDocument() throws Exception
{
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(makeDocumentWithFields());
writer.close();
@@ -225,7 +225,7 @@
doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED));
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(doc);
field.setValue("id2");
writer.addDocument(doc);
Index: src/test/org/apache/lucene/index/DocHelper.java
===================================================================
--- src/test/org/apache/lucene/index/DocHelper.java (revision 908485)
+++ src/test/org/apache/lucene/index/DocHelper.java (working copy)
@@ -29,7 +29,7 @@
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Version;
+import static org.apache.lucene.util.LuceneTestCaseJ4.TEST_VERSION_CURRENT;
class DocHelper {
public static final String FIELD_1_TEXT = "field one text";
@@ -219,7 +219,7 @@
*/
public static SegmentInfo writeDoc(Directory dir, Document doc) throws IOException
{
- return writeDoc(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), Similarity.getDefault(), doc);
+ return writeDoc(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), Similarity.getDefault(), doc);
}
/**
Index: src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java
===================================================================
--- src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (working copy)
@@ -20,8 +20,6 @@
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -429,7 +427,7 @@
private IndexWriter newWriter(Directory dir, boolean create)
throws IOException {
- final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), create, IndexWriter.MaxFieldLength.UNLIMITED);
+ final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), create, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMergePolicy(new LogDocMergePolicy(writer));
return writer;
}
@@ -503,7 +501,7 @@
public void testHangOnClose() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMergePolicy(new LogByteSizeMergePolicy(writer));
writer.setMaxBufferedDocs(5);
writer.setUseCompoundFile(false);
@@ -529,7 +527,7 @@
writer.close();
Directory dir2 = new MockRAMDirectory();
- writer = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(writer);
lmp.setMinMergeMB(0.0001);
writer.setMergePolicy(lmp);
Index: src/test/org/apache/lucene/index/TestAtomicUpdate.java
===================================================================
--- src/test/org/apache/lucene/index/TestAtomicUpdate.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestAtomicUpdate.java (working copy)
@@ -26,7 +26,7 @@
import java.io.IOException;
public class TestAtomicUpdate extends LuceneTestCase {
- private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT);
private Random RANDOM;
public class MockIndexWriter extends IndexWriter {
Index: src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
===================================================================
--- src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy)
@@ -45,7 +45,6 @@
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
/*
@@ -218,7 +217,7 @@
hasTested29++;
}
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
w.optimize();
w.close();
@@ -273,7 +272,7 @@
}
public void searchIndex(String dirName, String oldName) throws IOException {
- //QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ //QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
//Query query = parser.parse("handle:1");
dirName = fullDir(dirName);
@@ -358,7 +357,7 @@
Directory dir = FSDirectory.open(new File(dirName));
// open writer
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
// add 10 docs
for(int i=0;i<10;i++) {
@@ -402,7 +401,7 @@
searcher.close();
// optimize
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
writer.optimize();
writer.close();
@@ -452,7 +451,7 @@
searcher.close();
// optimize
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
writer.optimize();
writer.close();
@@ -474,7 +473,7 @@
dirName = fullDir(dirName);
Directory dir = FSDirectory.open(new File(dirName));
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(doCFS);
writer.setMaxBufferedDocs(10);
@@ -485,7 +484,7 @@
writer.close();
// open fresh writer so we get no prx file in the added segment
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(doCFS);
writer.setMaxBufferedDocs(10);
addNoProxDoc(writer);
@@ -512,7 +511,7 @@
try {
Directory dir = FSDirectory.open(new File(fullDir(outputDir)));
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setRAMBufferSizeMB(16.0);
for(int i=0;i<35;i++) {
addDoc(writer, i);
Index: src/test/org/apache/lucene/index/TestCheckIndex.java
===================================================================
--- src/test/org/apache/lucene/index/TestCheckIndex.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestCheckIndex.java (working copy)
@@ -24,7 +24,6 @@
import java.util.ArrayList;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@@ -35,7 +34,7 @@
public void testDeletedDocs() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
Document doc = new Document();
Index: src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
===================================================================
--- src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (working copy)
@@ -25,13 +25,11 @@
import org.apache.lucene.document.Field;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
public class TestConcurrentMergeScheduler extends LuceneTestCase {
- private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT);
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
boolean doFail;
Index: src/test/org/apache/lucene/index/TestCrash.java
===================================================================
--- src/test/org/apache/lucene/index/TestCrash.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestCrash.java (working copy)
@@ -20,7 +20,6 @@
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.NoLockFactory;
@@ -36,7 +35,7 @@
private IndexWriter initIndex(MockRAMDirectory dir) throws IOException {
dir.setLockFactory(NoLockFactory.getNoLockFactory());
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
//writer.setMaxBufferedDocs(2);
writer.setMaxBufferedDocs(10);
((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions();
Index: src/test/org/apache/lucene/index/TestDeletionPolicy.java
===================================================================
--- src/test/org/apache/lucene/index/TestDeletionPolicy.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestDeletionPolicy.java (working copy)
@@ -34,7 +34,6 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/*
Verify we can read the pre-2.1 file format, do searches
@@ -202,7 +201,7 @@
Directory dir = new RAMDirectory();
ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS);
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setUseCompoundFile(useCompoundFile);
writer.close();
@@ -211,7 +210,7 @@
// Record last time when writer performed deletes of
// past commits
lastDeleteTime = System.currentTimeMillis();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setUseCompoundFile(useCompoundFile);
for(int j=0;j<17;j++) {
addDoc(writer);
@@ -272,7 +271,7 @@
Directory dir = new RAMDirectory();
policy.dir = dir;
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDocs(10);
writer.setUseCompoundFile(useCompoundFile);
writer.setMergeScheduler(new SerialMergeScheduler());
@@ -281,7 +280,7 @@
}
writer.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setUseCompoundFile(useCompoundFile);
writer.optimize();
writer.close();
@@ -319,7 +318,7 @@
// Open & close a writer and assert that it
// actually removed something:
int preCount = dir.listAll().length;
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.LIMITED);
writer.close();
int postCount = dir.listAll().length;
assertTrue(postCount < preCount);
@@ -341,7 +340,7 @@
Directory dir = new MockRAMDirectory();
policy.dir = dir;
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
for(int i=0;i<10;i++) {
addDoc(writer);
@@ -360,7 +359,7 @@
assertTrue(lastCommit != null);
// Now add 1 doc and optimize
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
addDoc(writer);
assertEquals(11, writer.numDocs());
writer.optimize();
@@ -369,7 +368,7 @@
assertEquals(7, IndexReader.listCommits(dir).size());
// Now open writer on the commit just before optimize:
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit);
assertEquals(10, writer.numDocs());
// Should undo our rollback:
@@ -381,7 +380,7 @@
assertEquals(11, r.numDocs());
r.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit);
assertEquals(10, writer.numDocs());
// Commits the rollback:
writer.close();
@@ -397,7 +396,7 @@
r.close();
// Reoptimize
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
writer.optimize();
writer.close();
@@ -408,7 +407,7 @@
// Now open writer on the commit just before optimize,
// but this time keeping only the last commit:
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), new KeepOnlyLastCommitDeletionPolicy(), IndexWriter.MaxFieldLength.LIMITED, lastCommit);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), new KeepOnlyLastCommitDeletionPolicy(), IndexWriter.MaxFieldLength.LIMITED, lastCommit);
assertEquals(10, writer.numDocs());
// Reader still sees optimized index, because writer
@@ -444,7 +443,7 @@
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDocs(10);
writer.setUseCompoundFile(useCompoundFile);
for(int i=0;i<107;i++) {
@@ -452,7 +451,7 @@
}
writer.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setUseCompoundFile(useCompoundFile);
writer.optimize();
writer.close();
@@ -487,7 +486,7 @@
KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
for(int j=0;j<N+1;j++) {
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDocs(10);
writer.setUseCompoundFile(useCompoundFile);
for(int i=0;i<17;i++) {
@@ -542,14 +541,14 @@
KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setUseCompoundFile(useCompoundFile);
writer.close();
Term searchTerm = new Term("content", "aaa");
Query query = new TermQuery(searchTerm);
for(int i=0;i<N+1;i++) {
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setUseCompoundFile(useCompoundFile);
for(int j=0;j<17;j++) {
addDoc(writer);
@@ -566,7 +565,7 @@
reader.close();
searcher.close();
}
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setUseCompoundFile(useCompoundFile);
writer.optimize();
// this is a commit
@@ -637,7 +636,7 @@
KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDocs(10);
writer.setUseCompoundFile(useCompoundFile);
writer.close();
@@ -646,7 +645,7 @@
for(int i=0;i<N+1;i++) {
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDocs(10);
writer.setUseCompoundFile(useCompoundFile);
for(int j=0;j<17;j++) {
@@ -664,7 +663,7 @@
reader.close();
searcher.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
// This will not commit: there are no changes
// pending because we opened for "create":
writer.close();
Index: src/test/org/apache/lucene/index/TestDirectoryReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestDirectoryReader.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestDirectoryReader.java (working copy)
@@ -194,7 +194,7 @@
}
private void addDoc(RAMDirectory ramDir1, String s, boolean create) throws IOException {
- IndexWriter iw = new IndexWriter(ramDir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), create, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter iw = new IndexWriter(ramDir1, new StandardAnalyzer(TEST_VERSION_CURRENT), create, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED));
iw.addDocument(doc);
Index: src/test/org/apache/lucene/index/TestDoc.java
===================================================================
--- src/test/org/apache/lucene/index/TestDoc.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestDoc.java (working copy)
@@ -35,7 +35,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/** JUnit adaptation of an older test case DocTest. */
@@ -110,7 +109,7 @@
PrintWriter out = new PrintWriter(sw, true);
Directory directory = FSDirectory.open(indexDir);
- IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
SegmentInfo si1 = indexDoc(writer, "test.txt");
printSegment(out, si1);
@@ -138,7 +137,7 @@
out = new PrintWriter(sw, true);
directory = FSDirectory.open(indexDir);
- writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
si1 = indexDoc(writer, "test.txt");
printSegment(out, si1);
Index: src/test/org/apache/lucene/index/TestDocumentWriter.java
===================================================================
--- src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestDocumentWriter.java (working copy)
@@ -39,7 +39,6 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
public class TestDocumentWriter extends LuceneTestCase {
@@ -62,7 +61,7 @@
public void testAddDocument() throws Exception {
Document testDoc = new Document();
DocHelper.setupDoc(testDoc);
- Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(testDoc);
writer.commit();
@@ -111,7 +110,7 @@
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- return new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader);
+ return new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
}
@Override
@@ -144,7 +143,7 @@
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- return new TokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader)) {
+ return new TokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader)) {
boolean first=true;
AttributeSource.State state;
@@ -208,7 +207,7 @@
public void testPreAnalyzedField() throws IOException {
- IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("preanalyzed", new TokenStream() {
@@ -267,7 +266,7 @@
doc.add(new Field("f2", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("f2", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.NO));
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(doc);
writer.close();
@@ -300,7 +299,7 @@
doc.add(f);
doc.add(new Field("f2", "v2", Store.YES, Index.NO));
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(doc);
writer.optimize(); // be sure to have a single segment
writer.close();
Index: src/test/org/apache/lucene/index/TestFieldsReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestFieldsReader.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestFieldsReader.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.store.FSDirectory;
@@ -51,7 +50,7 @@
fieldInfos = new FieldInfos();
DocHelper.setupDoc(testDoc);
fieldInfos.add(testDoc);
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(false);
writer.addDocument(testDoc);
writer.close();
@@ -212,7 +211,7 @@
FSDirectory tmpDir = FSDirectory.open(file);
assertTrue(tmpDir != null);
- IndexWriter writer = new IndexWriter(tmpDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(tmpDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(false);
writer.addDocument(testDoc);
writer.close();
@@ -393,7 +392,7 @@
try {
Directory dir = new FaultyFSDirectory(indexDir);
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for(int i=0;i<2;i++)
writer.addDocument(testDoc);
writer.optimize();
Index: src/test/org/apache/lucene/index/TestFilterIndexReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestFilterIndexReader.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestFilterIndexReader.java (working copy)
@@ -19,8 +19,6 @@
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import junit.framework.TestSuite;
import junit.textui.TestRunner;
@@ -99,7 +97,7 @@
*/
public void testFilterIndexReader() throws Exception {
RAMDirectory directory = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document d1 = new Document();
Index: src/test/org/apache/lucene/index/TestIndexFileDeleter.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexFileDeleter.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestIndexFileDeleter.java (working copy)
@@ -18,8 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
@@ -41,7 +39,7 @@
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
int i;
for(i=0;i<35;i++) {
@@ -146,7 +144,7 @@
// Open & close a writer: it should delete the above 4
// files and nothing more:
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.close();
String[] files2 = dir.listAll();
Index: src/test/org/apache/lucene/index/TestIndexReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReader.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestIndexReader.java (working copy)
@@ -54,7 +54,6 @@
import org.apache.lucene.store.NoSuchDirectoryException;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
public class TestIndexReader extends LuceneTestCase
@@ -79,7 +78,7 @@
commitUserData.put("foo", "fighters");
// set up writer
- IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
for(int i=0;i<27;i++)
addDocumentWithFields(writer);
@@ -101,7 +100,7 @@
assertTrue(c.equals(r.getIndexCommit()));
// Change the index
- writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
for(int i=0;i<7;i++)
addDocumentWithFields(writer);
@@ -112,7 +111,7 @@
assertFalse(r2.getIndexCommit().isOptimized());
r3.close();
- writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.optimize();
writer.close();
@@ -126,19 +125,19 @@
public void testIsCurrent() throws Exception
{
RAMDirectory d = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
writer.close();
// set up reader:
IndexReader reader = IndexReader.open(d, false);
assertTrue(reader.isCurrent());
// modify index by adding another document:
- writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
writer.close();
assertFalse(reader.isCurrent());
// re-create index:
- writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
writer.close();
assertFalse(reader.isCurrent());
@@ -154,7 +153,7 @@
{
RAMDirectory d = new MockRAMDirectory();
// set up writer
- IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
writer.close();
// set up reader
@@ -166,7 +165,7 @@
assertTrue(fieldNames.contains("unstored"));
reader.close();
// add more documents
- writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
// want to get some more segments here
for (int i = 0; i < 5*writer.getMergeFactor(); i++)
{
@@ -246,7 +245,7 @@
public void testTermVectors() throws Exception {
RAMDirectory d = new MockRAMDirectory();
// set up writer
- IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
// want to get some more segments here
// new termvector fields
for (int i = 0; i < 5 * writer.getMergeFactor(); i++) {
@@ -314,7 +313,7 @@
Term searchTerm = new Term("content", "aaa");
// add 100 documents with term : aaa
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 100; i++)
{
addDoc(writer, searchTerm.text());
@@ -356,7 +355,7 @@
Directory dir = new RAMDirectory();
byte[] bin = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
for (int i = 0; i < 10; i++) {
addDoc(writer, "document number " + (i + 1));
@@ -365,7 +364,7 @@
addDocumentWithTermVectorFields(writer);
}
writer.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("bin1", bin, Field.Store.YES));
doc.add(new Field("junk", "junk text", Field.Store.NO, Field.Index.ANALYZED));
@@ -402,7 +401,7 @@
// force optimize
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.optimize();
writer.close();
reader = IndexReader.open(dir, false);
@@ -431,7 +430,7 @@
Term searchTerm = new Term("content", "aaa");
// add 11 documents with term : aaa
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 11; i++)
{
addDoc(writer, searchTerm.text());
@@ -476,7 +475,7 @@
Term searchTerm = new Term("content", "aaa");
// add 11 documents with term : aaa
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 11; i++)
{
addDoc(writer, searchTerm.text());
@@ -525,7 +524,7 @@
Term searchTerm = new Term("content", "aaa");
// add 1 documents with term : aaa
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDoc(writer, searchTerm.text());
writer.close();
@@ -570,7 +569,7 @@
Term searchTerm = new Term("content", "aaa");
// add 1 documents with term : aaa
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(false);
addDoc(writer, searchTerm.text());
writer.close();
@@ -624,7 +623,7 @@
Term searchTerm2 = new Term("content", "bbb");
// add 100 documents with term : aaa
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 100; i++)
{
addDoc(writer, searchTerm.text());
@@ -640,7 +639,7 @@
assertTermDocsCount("first reader", reader, searchTerm2, 0);
// add 100 documents with term : bbb
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 100; i++)
{
addDoc(writer, searchTerm2.text());
@@ -707,7 +706,7 @@
// Create initial data set
File dirFile = new File(System.getProperty("tempDir"), "testIndex");
Directory dir = getDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDoc(writer, "test");
writer.close();
dir.close();
@@ -717,7 +716,7 @@
dir = getDirectory();
// Now create the data set again, just as before
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDoc(writer, "test");
writer.close();
dir.close();
@@ -743,7 +742,7 @@
else
dir = getDirectory();
assertFalse(IndexReader.indexExists(dir));
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
assertTrue(IndexWriter.isLocked(dir)); // writer open, so dir is locked
writer.close();
@@ -760,7 +759,7 @@
// incremented:
Thread.sleep(1000);
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
writer.close();
reader = IndexReader.open(dir, false);
@@ -777,7 +776,7 @@
public void testVersion() throws IOException {
Directory dir = new MockRAMDirectory();
assertFalse(IndexReader.indexExists(dir));
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
assertTrue(IndexWriter.isLocked(dir)); // writer open, so dir is locked
writer.close();
@@ -788,7 +787,7 @@
reader.close();
// modify index and check version has been
// incremented:
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
writer.close();
reader = IndexReader.open(dir, false);
@@ -799,10 +798,10 @@
public void testLock() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
writer.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
IndexReader reader = IndexReader.open(dir, false);
try {
reader.deleteDocument(0);
@@ -819,7 +818,7 @@
public void testUndeleteAll() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
addDocumentWithFields(writer);
writer.close();
@@ -836,7 +835,7 @@
public void testUndeleteAllAfterClose() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
addDocumentWithFields(writer);
writer.close();
@@ -853,7 +852,7 @@
public void testUndeleteAllAfterCloseThenReopen() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
addDocumentWithFields(writer);
writer.close();
@@ -891,7 +890,7 @@
// First build up a starting index:
RAMDirectory startDir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for(int i=0;i<157;i++) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
@@ -1081,7 +1080,7 @@
public void testDocsOutOfOrderJIRA140() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for(int i=0;i<11;i++) {
addDoc(writer, "aaa");
}
@@ -1099,7 +1098,7 @@
}
reader.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
// We must add more docs to get a new segment written
for(int i=0;i<11;i++) {
@@ -1121,7 +1120,7 @@
public void testExceptionReleaseWriteLockJIRA768() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDoc(writer, "aaa");
writer.close();
@@ -1197,7 +1196,7 @@
// add 100 documents with term : aaa
// add 100 documents with term : bbb
// add 100 documents with term : ccc
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 100; i++)
{
addDoc(writer, searchTerm1.text());
@@ -1421,7 +1420,7 @@
RAMDirectory d = new MockRAMDirectory();
// set up writer
- IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
for(int i=0;i<27;i++)
addDocumentWithFields(writer);
@@ -1437,7 +1436,7 @@
assertTrue(c.equals(r.getIndexCommit()));
// Change the index
- writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
for(int i=0;i<7;i++)
addDocumentWithFields(writer);
@@ -1448,7 +1447,7 @@
assertFalse(r2.getIndexCommit().isOptimized());
r2.close();
- writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.optimize();
writer.close();
@@ -1462,7 +1461,7 @@
public void testReadOnly() throws Throwable {
RAMDirectory d = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
writer.commit();
addDocumentWithFields(writer);
@@ -1476,7 +1475,7 @@
// expected
}
- writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
addDocumentWithFields(writer);
writer.close();
@@ -1493,7 +1492,7 @@
// expected
}
- writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.optimize();
writer.close();
@@ -1511,7 +1510,7 @@
}
// Make sure write lock isn't held
- writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.close();
r3.close();
@@ -1521,7 +1520,7 @@
// LUCENE-1474
public void testIndexReader() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.UNLIMITED);
writer.addDocument(createDocument("a"));
writer.addDocument(createDocument("b"));
@@ -1539,7 +1538,7 @@
public void testIndexReaderUnDeleteAll() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
dir.setPreventDoubleWrite(false);
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.UNLIMITED);
writer.addDocument(createDocument("a"));
writer.addDocument(createDocument("b"));
@@ -1581,7 +1580,7 @@
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
@@ -1607,7 +1606,7 @@
// reuse the doc values arrays in FieldCache
public void testFieldCacheReuseAfterClone() throws Exception {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
@@ -1638,7 +1637,7 @@
// FieldCache
public void testFieldCacheReuseAfterReopen() throws Exception {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
@@ -1670,7 +1669,7 @@
// reopen switches readOnly
public void testReopenChangeReadonly() throws Exception {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
@@ -1711,7 +1710,7 @@
// LUCENE-1586: getUniqueTermCount
public void testUniqueTermCount() throws Exception {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED));
@@ -1744,7 +1743,7 @@
// LUCENE-1609: don't load terms index
public void testNoTermsIndex() throws Throwable {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED));
doc.add(new Field("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED));
@@ -1762,7 +1761,7 @@
assertFalse(((SegmentReader) r.getSequentialSubReaders()[0]).termsIndexLoaded());
assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders()[0]).getTermInfosIndexDivisor());
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
writer.addDocument(doc);
writer.close();
@@ -1781,7 +1780,7 @@
// LUCENE-2046
public void testPrepareCommitIsCurrent() throws Throwable {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
writer.addDocument(doc);
IndexReader r = IndexReader.open(dir, true);
Index: src/test/org/apache/lucene/index/TestIndexReaderClone.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderClone.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestIndexReaderClone.java (working copy)
@@ -26,7 +26,6 @@
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* Tests cloning multiple types of readers, modifying the deletedDocs and norms
@@ -198,7 +197,7 @@
TestIndexReaderReopen.createIndex(dir1, true);
IndexReader reader1 = IndexReader.open(dir1, false);
- IndexWriter w = new IndexWriter(dir1, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir1, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
w.optimize();
w.close();
IndexReader reader2 = reader1.clone(true);
@@ -485,7 +484,7 @@
public void testCloseStoredFields() throws Exception {
final Directory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
w.setUseCompoundFile(false);
Document doc = new Document();
doc.add(new Field("field", "yes it's stored", Field.Store.YES, Field.Index.ANALYZED));
Index: src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (working copy)
@@ -72,7 +72,7 @@
protected void setUp() throws Exception {
super.setUp();
similarityOne = new SimilarityOne();
- anlzr = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ anlzr = new StandardAnalyzer(TEST_VERSION_CURRENT);
}
/**
Index: src/test/org/apache/lucene/index/TestIndexReaderReopen.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderReopen.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestIndexReaderReopen.java (working copy)
@@ -47,7 +47,6 @@
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.BitVector;
-import org.apache.lucene.util.Version;
public class TestIndexReaderReopen extends LuceneTestCase {
@@ -703,7 +702,7 @@
final Directory dir = new MockRAMDirectory();
final int n = 30;
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < n; i++) {
writer.addDocument(createDocument(i, 3));
}
@@ -722,7 +721,7 @@
modifier.deleteDocument(i % modifier.maxDoc());
modifier.close();
} else {
- IndexWriter modifier = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter modifier = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
modifier.addDocument(createDocument(n + i, 6));
modifier.close();
}
@@ -947,7 +946,7 @@
public static void createIndex(Directory dir, boolean multiSegment) throws IOException {
IndexWriter.unlock(dir);
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
w.setMergePolicy(new LogDocMergePolicy(w));
@@ -992,7 +991,7 @@
static void modifyIndex(int i, Directory dir) throws IOException {
switch (i) {
case 0: {
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
w.deleteDocuments(new Term("field2", "a11"));
w.deleteDocuments(new Term("field2", "b30"));
w.close();
@@ -1007,13 +1006,13 @@
break;
}
case 2: {
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
w.optimize();
w.close();
break;
}
case 3: {
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
w.addDocument(createDocument(101, 4));
w.optimize();
w.addDocument(createDocument(102, 4));
@@ -1029,7 +1028,7 @@
break;
}
case 5: {
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
w.addDocument(createDocument(101, 4));
w.close();
break;
@@ -1193,7 +1192,7 @@
public void testReopenOnCommit() throws Throwable {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), new KeepAllCommits(), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), new KeepAllCommits(), IndexWriter.MaxFieldLength.UNLIMITED);
for(int i=0;i<4;i++) {
Document doc = new Document();
doc.add(new Field("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
Index: src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy)
@@ -67,7 +67,6 @@
import org.apache.lucene.store.SingleInstanceLockFactory;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util._TestUtil;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util.ThreadInterruptedException;
public class TestIndexWriter extends LuceneTestCase {
@@ -86,7 +85,7 @@
IndexWriter.setDefaultWriteLockTimeout(2000);
assertEquals(2000, IndexWriter.getDefaultWriteLockTimeout());
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
IndexWriter.setDefaultWriteLockTimeout(1000);
@@ -105,7 +104,7 @@
reader.close();
// test doc count before segments are merged/index is optimized
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
assertEquals(100, writer.maxDoc());
writer.close();
@@ -115,7 +114,7 @@
reader.close();
// optimize the index and check that the new doc count is correct
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
assertEquals(100, writer.maxDoc());
assertEquals(60, writer.numDocs());
writer.optimize();
@@ -131,7 +130,7 @@
// make sure opening a new index for create over
// this existing one works correctly:
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
assertEquals(0, writer.maxDoc());
assertEquals(0, writer.numDocs());
writer.close();
@@ -174,7 +173,7 @@
long inputDiskUsage = 0;
for(int i=0;i<NUM_DIR;i++) {
dirs[i] = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dirs[i], new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dirs[i], new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for(int j=0;j<25;j++) {
addDocWithIndex(writer, 25*i+j);
}
@@ -188,7 +187,7 @@
// Now, build a starting index that has START_COUNT docs. We
// will then try to addIndexesNoOptimize into a copy of this:
RAMDirectory startDir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for(int j=0;j<START_COUNT;j++) {
addDocWithIndex(writer, j);
}
@@ -251,7 +250,7 @@
// Make a new dir that will enforce disk usage:
MockRAMDirectory dir = new MockRAMDirectory(startDir);
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
IOException err = null;
MergeScheduler ms = writer.getMergeScheduler();
@@ -463,7 +462,7 @@
System.out.println("TEST: cycle: diskFree=" + diskFree);
MockRAMDirectory dir = new MockRAMDirectory();
dir.setMaxSizeInBytes(diskFree);
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
MergeScheduler ms = writer.getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler)
@@ -541,7 +540,7 @@
*/
public void testWickedLongTerm() throws IOException {
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
char[] chars = new char[DocumentsWriter.CHAR_BLOCK_SIZE-1];
Arrays.fill(chars, 'x');
@@ -585,7 +584,7 @@
// maximum length term, and search on that term:
doc = new Document();
doc.add(new Field("content", bigTerm, Field.Store.NO, Field.Index.ANALYZED));
- StandardAnalyzer sa = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer sa = new StandardAnalyzer(TEST_VERSION_CURRENT);
sa.setMaxTokenLength(100000);
writer = new IndexWriter(dir, sa, IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(doc);
@@ -605,7 +604,7 @@
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
LogDocMergePolicy ldmp = new LogDocMergePolicy(writer);
ldmp.setMinMergeDocs(1);
writer.setMergePolicy(ldmp);
@@ -619,7 +618,7 @@
sis.read(dir);
final int segCount = sis.size();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer.setMergePolicy(ldmp);
writer.setMergeFactor(5);
writer.optimize(3);
@@ -642,7 +641,7 @@
final Document doc = new Document();
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
LogDocMergePolicy ldmp = new LogDocMergePolicy(writer);
ldmp.setMinMergeDocs(1);
writer.setMergePolicy(ldmp);
@@ -684,7 +683,7 @@
public void testOptimizeTempSpaceUsage() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for(int j=0;j<500;j++) {
addDocWithIndex(writer, j);
}
@@ -697,7 +696,7 @@
}
dir.resetMaxUsedSizeInBytes();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.optimize();
writer.close();
long maxDiskUsage = dir.getMaxUsedSizeInBytes();
@@ -728,7 +727,7 @@
Directory dir = FSDirectory.open(indexDir);
// add one document & close writer
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDoc(writer);
writer.close();
@@ -737,7 +736,7 @@
assertEquals("should be one document", reader.numDocs(), 1);
// now open index for create:
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
assertEquals("should be zero documents", writer.maxDoc(), 0);
addDoc(writer);
writer.close();
@@ -761,7 +760,7 @@
IndexWriter writer = null;
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
// add 100 documents
for (int i = 0; i < 100; i++) {
@@ -799,7 +798,7 @@
reader.close();
try {
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
} catch (Exception e) {
fail("writer failed to open on a crashed index");
}
@@ -821,7 +820,7 @@
IndexWriter writer = null;
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
// add 100 documents
for (int i = 0; i < 100; i++) {
@@ -864,7 +863,7 @@
IndexWriter writer = null;
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDoc(writer);
// close
@@ -886,7 +885,7 @@
IndexWriter writer = null;
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
// add 100 documents
for (int i = 0; i < 100; i++) {
@@ -925,7 +924,7 @@
*/
public void testCommitOnClose() throws IOException {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 14; i++) {
addDoc(writer);
}
@@ -939,7 +938,7 @@
IndexReader reader = IndexReader.open(dir, true);
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
for(int i=0;i<3;i++) {
for(int j=0;j<11;j++) {
addDoc(writer);
@@ -971,7 +970,7 @@
*/
public void testCommitOnCloseAbort() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
for (int i = 0; i < 14; i++) {
addDoc(writer);
@@ -984,7 +983,7 @@
assertEquals("first number of hits", 14, hits.length);
searcher.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
for(int j=0;j<17;j++) {
addDoc(writer);
@@ -1009,7 +1008,7 @@
// Now make sure we can re-open the index, add docs,
// and all is good:
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
// On abort, writer in fact may write to the same
@@ -1044,7 +1043,7 @@
*/
public void testCommitOnCloseDiskUsage() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for(int j=0;j<30;j++) {
addDocWithIndex(writer, j);
}
@@ -1052,7 +1051,7 @@
dir.resetMaxUsedSizeInBytes();
long startDiskUsage = dir.getMaxUsedSizeInBytes();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
writer.setMergeScheduler(new SerialMergeScheduler());
for(int j=0;j<1470;j++) {
@@ -1087,14 +1086,14 @@
*/
public void testCommitOnCloseOptimize() throws IOException {
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
for(int j=0;j<17;j++) {
addDocWithIndex(writer, j);
}
writer.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.optimize();
// Open a reader before closing (commiting) the writer:
@@ -1116,7 +1115,7 @@
assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
reader.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.optimize();
writer.close();
assertNoUnreferencedFiles(dir, "aborted writer after optimize");
@@ -1131,7 +1130,7 @@
public void testIndexNoDocuments() throws IOException {
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.commit();
writer.close();
@@ -1140,7 +1139,7 @@
assertEquals(0, reader.numDocs());
reader.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.commit();
writer.close();
@@ -1152,7 +1151,7 @@
public void testManyFields() throws IOException {
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
for(int j=0;j<100;j++) {
Document doc = new Document();
@@ -1183,7 +1182,7 @@
public void testSmallRAMBuffer() throws IOException {
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setRAMBufferSizeMB(0.000001);
int lastNumFile = dir.listAll().length;
for(int j=0;j<9;j++) {
@@ -1204,7 +1203,7 @@
// maxBufferedDocs in a write session
public void testChangingRAMBuffer() throws IOException {
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
@@ -1258,7 +1257,7 @@
public void testChangingRAMBuffer2() throws IOException {
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
writer.setMaxBufferedDeleteTerms(10);
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
@@ -1318,7 +1317,7 @@
public void testDiverseDocs() throws IOException {
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setRAMBufferSizeMB(0.5);
Random rand = newRandom();
for(int i=0;i<3;i++) {
@@ -1367,7 +1366,7 @@
public void testEnablingNorms() throws IOException {
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
@@ -1388,7 +1387,7 @@
assertEquals(10, hits.length);
searcher.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
@@ -1414,7 +1413,7 @@
public void testHighFreqTerm() throws IOException {
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, new IndexWriter.MaxFieldLength(100000000));
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, new IndexWriter.MaxFieldLength(100000000));
writer.setRAMBufferSizeMB(0.01);
// Massive doc that has 128 K a's
StringBuilder b = new StringBuilder(1024*1024);
@@ -1461,7 +1460,7 @@
}
Directory dir = new MyRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 100; i++) {
addDoc(writer);
}
@@ -1472,7 +1471,7 @@
assertEquals("did not get right number of hits", 100, hits.length);
writer.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.close();
dir.close();
@@ -1480,7 +1479,7 @@
public void testFlushWithNoMerging() throws IOException {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
@@ -1499,7 +1498,7 @@
// empty doc (no norms) and flush
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
@@ -1518,7 +1517,7 @@
Directory dir = new MockRAMDirectory();
for(int pass=0;pass<2;pass++) {
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMergeScheduler(new ConcurrentMergeScheduler());
Document doc = new Document();
doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
@@ -1572,7 +1571,7 @@
*/
public void testBadSegment() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter ir = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter ir = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document document = new Document();
document.add(new Field("tvtest", "", Field.Store.NO, Field.Index.ANALYZED,
@@ -1585,7 +1584,7 @@
// LUCENE-1008
public void testNoTermVectorAfterTermVector() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
@@ -1611,7 +1610,7 @@
// LUCENE-1010
public void testNoTermVectorAfterTermVectorMerge() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
@@ -1643,7 +1642,7 @@
int pri = Thread.currentThread().getPriority();
try {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document document = new Document();
document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
@@ -1683,7 +1682,7 @@
// LUCENE-1013
public void testSetMaxMergeDocs() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
iw.setMergeScheduler(new MyMergeScheduler());
iw.setMaxMergeDocs(20);
iw.setMaxBufferedDocs(2);
@@ -1703,7 +1702,7 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- return new TokenFilter(new StandardTokenizer(Version.LUCENE_CURRENT, reader)) {
+ return new TokenFilter(new StandardTokenizer(TEST_VERSION_CURRENT, reader)) {
private int count = 0;
@Override
@@ -1801,7 +1800,7 @@
failure.setDoFail();
dir.failOn(failure);
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
@@ -1851,7 +1850,7 @@
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- return new CrashingFilter(fieldName, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader));
+ return new CrashingFilter(fieldName, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
}
};
@@ -1934,7 +1933,7 @@
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- return new CrashingFilter(fieldName, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader));
+ return new CrashingFilter(fieldName, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
}
};
@@ -2048,7 +2047,7 @@
MockRAMDirectory dir = new MockRAMDirectory();
int delID = 0;
for(int i=0;i<20;i++) {
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(2);
writer.setUseCompoundFile(false);
@@ -2084,7 +2083,7 @@
reader.close();
if (0 == i % 4) {
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(false);
writer.optimize();
writer.close();
@@ -2101,7 +2100,7 @@
for(int pass=0;pass<2;pass++) {
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
//System.out.println("TEST: pass=" + pass + " cms=" + (pass >= 2));
for(int iter=0;iter<10;iter++) {
@@ -2173,7 +2172,7 @@
reader.close();
// Reopen
- writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
}
writer.close();
}
@@ -2253,7 +2252,7 @@
for(int iter=0;iter<7;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
// We expect AlreadyClosedException
@@ -2312,7 +2311,7 @@
// OK:
public void testImmediateDiskFull() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
dir.setMaxSizeInBytes(dir.getRecomputedActualSizeInBytes());
writer.setMaxBufferedDocs(2);
final Document doc = new Document();
@@ -2350,7 +2349,7 @@
for(int iter=0;iter<10;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
// We expect disk full exceptions in the merge threads
cms.setSuppressExceptions();
@@ -2411,7 +2410,7 @@
public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDocs(2);
final Document doc = new Document();
doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
@@ -2441,7 +2440,7 @@
for(int iter=0;iter<2;iter++) {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
// We expect disk full exceptions in the merge threads
cms.setSuppressExceptions();
@@ -2601,7 +2600,7 @@
public void testUnlimitedMaxFieldLength() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
StringBuilder b = new StringBuilder();
@@ -2625,7 +2624,7 @@
IndexWriter writer = null;
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
// add 100 documents
for (int i = 0; i < 100; i++) {
@@ -2661,7 +2660,7 @@
public void testForceCommit() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(5);
@@ -2715,7 +2714,7 @@
FailOnlyInSync failure = new FailOnlyInSync();
dir.failOn(failure);
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
failure.setDoFail();
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
@@ -2751,7 +2750,7 @@
Directory dir = new MockRAMDirectory();
for(int iter=0;iter<2;iter++) {
IndexWriter writer = new IndexWriter(dir,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDocs(2);
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
writer.setMergeScheduler(new SerialMergeScheduler());
@@ -2784,7 +2783,7 @@
reader.close();
writer = new IndexWriter(dir,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDocs(2);
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
writer.setMergeScheduler(new SerialMergeScheduler());
@@ -2803,7 +2802,7 @@
Directory dir = new MockRAMDirectory();
for(int iter=0;iter<2;iter++) {
IndexWriter writer = new IndexWriter(dir,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDocs(2);
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
writer.setMergeScheduler(new SerialMergeScheduler());
@@ -2840,7 +2839,7 @@
public void testTermVectorCorruption3() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ new StandardAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
@@ -2862,7 +2861,7 @@
writer.close();
writer = new IndexWriter(dir,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ new StandardAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
@@ -2887,7 +2886,7 @@
public void testUserSpecifiedMaxFieldLength() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), new IndexWriter.MaxFieldLength(100000));
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), new IndexWriter.MaxFieldLength(100000));
Document doc = new Document();
StringBuilder b = new StringBuilder();
@@ -2910,7 +2909,7 @@
public void testExpungeDeletes() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ new StandardAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
@@ -2938,7 +2937,7 @@
ir.close();
writer = new IndexWriter(dir,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ new StandardAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
assertEquals(8, writer.numDocs());
assertEquals(10, writer.maxDoc());
@@ -2956,7 +2955,7 @@
public void testExpungeDeletes2() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ new StandardAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(50);
@@ -2985,7 +2984,7 @@
ir.close();
writer = new IndexWriter(dir,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ new StandardAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setMergeFactor(3);
assertEquals(49, writer.numDocs());
@@ -3003,7 +3002,7 @@
public void testExpungeDeletes3() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ new StandardAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(50);
@@ -3032,7 +3031,7 @@
ir.close();
writer = new IndexWriter(dir,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ new StandardAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
// Force many merges to happen
writer.setMergeFactor(3);
@@ -3048,7 +3047,7 @@
// LUCENE-1179
public void testEmptyFieldName() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
@@ -3074,7 +3073,7 @@
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- MockIndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ MockIndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
@@ -3094,7 +3093,7 @@
// LUCENE-1208
public void testExceptionJustBeforeFlush() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- MockIndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ MockIndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
w.setMaxBufferedDocs(2);
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
@@ -3104,7 +3103,7 @@
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- return new CrashingFilter(fieldName, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader));
+ return new CrashingFilter(fieldName, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
}
};
@@ -3144,7 +3143,7 @@
// LUCENE-1210
public void testExceptionOnMergeInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- MockIndexWriter2 w = new MockIndexWriter2(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ MockIndexWriter2 w = new MockIndexWriter2(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
w.setMaxBufferedDocs(2);
w.setMergeFactor(2);
w.doFail = true;
@@ -3182,7 +3181,7 @@
// LUCENE-1222
public void testDoAfterFlush() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- MockIndexWriter3 w = new MockIndexWriter3(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ MockIndexWriter3 w = new MockIndexWriter3(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
@@ -3235,7 +3234,7 @@
public void testExceptionsDuringCommit() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
FailOnlyInCommit failure = new FailOnlyInCommit();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.ANALYZED));
@@ -3283,7 +3282,7 @@
// LUCENE-510
public void testInvalidUTF16() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
final int count = utf8Data.length/2;
@@ -3496,7 +3495,7 @@
};
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", tokens));
w.addDocument(doc);
@@ -3528,7 +3527,7 @@
public void testPrepareCommit() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(5);
@@ -3580,7 +3579,7 @@
MockRAMDirectory dir = new MockRAMDirectory();
dir.setPreventDoubleWrite(false);
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(5);
@@ -3605,7 +3604,7 @@
reader.close();
reader2.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 17; i++)
addDoc(writer);
@@ -3633,7 +3632,7 @@
public void testPrepareCommitNoChanges() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer.prepareCommit();
writer.commit();
writer.close();
@@ -3660,14 +3659,14 @@
public RunAddIndexesThreads(int numCopy) throws Throwable {
NUM_COPY = numCopy;
dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
for (int i = 0; i < NUM_INIT_DOCS; i++)
addDoc(writer);
writer.close();
dir2 = new MockRAMDirectory();
- writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
cms = (ConcurrentMergeScheduler) writer2.getMergeScheduler();
readers = new IndexReader[NUM_COPY];
@@ -3932,7 +3931,7 @@
// LUCENE-1347
public void testRollbackExceptionHang() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
- MockIndexWriter4 w = new MockIndexWriter4(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ MockIndexWriter4 w = new MockIndexWriter4(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDoc(w);
w.doFail = true;
@@ -3951,7 +3950,7 @@
// LUCENE-1219
public void testBinaryFieldOffsetLength() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
@@ -3981,7 +3980,7 @@
// LUCENE-1382
public void testCommitUserData() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
w.setMaxBufferedDocs(2);
for(int j=0;j<17;j++)
addDoc(w);
@@ -3994,7 +3993,7 @@
assertEquals(0, r.getCommitUserData().size());
r.close();
- w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
w.setMaxBufferedDocs(2);
for(int j=0;j<17;j++)
addDoc(w);
@@ -4009,7 +4008,7 @@
assertEquals("test1", r.getCommitUserData().get("label"));
r.close();
- w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
w.optimize();
w.close();
@@ -4020,7 +4019,7 @@
public void testOptimizeExceptions() throws IOException {
RAMDirectory startDir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(startDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(startDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
w.setMaxBufferedDocs(2);
w.setMergeFactor(100);
for(int i=0;i<27;i++)
@@ -4029,7 +4028,7 @@
for(int i=0;i<200;i++) {
MockRAMDirectory dir = new MockRAMDirectory(startDir);
- w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
((ConcurrentMergeScheduler) w.getMergeScheduler()).setSuppressExceptions();
dir.setRandomIOExceptionRate(0.5, 100);
try {
@@ -4048,7 +4047,7 @@
final List<Throwable> thrown = new ArrayList<Throwable>();
- final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED) {
+ final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED) {
@Override
public void message(final String message) {
if (message.startsWith("now flush at close") && 0 == thrown.size()) {
@@ -4073,7 +4072,7 @@
// LUCENE-1442
public void testDoubleOffsetCounting() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
@@ -4108,7 +4107,7 @@
// LUCENE-1442
public void testDoubleOffsetCounting2() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
@@ -4130,7 +4129,7 @@
// LUCENE-1448
public void testEndOffsetPositionCharAnalyzer() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
Field f = new Field("field", "abcd ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
@@ -4152,7 +4151,7 @@
// LUCENE-1448
public void testEndOffsetPositionWithCachingTokenFilter() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
- Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
IndexWriter w = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
TokenStream stream = new CachingTokenFilter(analyzer.tokenStream("field", new StringReader("abcd ")));
@@ -4176,7 +4175,7 @@
// LUCENE-1448
public void testEndOffsetPositionWithTeeSinkTokenFilter() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
- Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
IndexWriter w = new IndexWriter(dir, analyzer, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
TeeSinkTokenFilter tee = new TeeSinkTokenFilter(analyzer.tokenStream("field", new StringReader("abcd ")));
@@ -4202,7 +4201,7 @@
// LUCENE-1448
public void testEndOffsetPositionStopFilter() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new StopAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new StopAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
Field f = new Field("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(f);
@@ -4224,7 +4223,7 @@
// LUCENE-1448
public void testEndOffsetPositionStandard() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
Field f = new Field("field", "abcd the ", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
@@ -4254,7 +4253,7 @@
// LUCENE-1448
public void testEndOffsetPositionStandardEmptyField() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
Field f = new Field("field", "", Field.Store.NO,
Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
@@ -4281,7 +4280,7 @@
// LUCENE-1448
public void testEndOffsetPositionStandardEmptyField2() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
Field f = new Field("field", "abcd", Field.Store.NO,
@@ -4323,7 +4322,7 @@
out.writeByte((byte) 42);
out.close();
- new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED).close();
+ new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED).close();
assertTrue(dir.fileExists("myrandomfile"));
@@ -4339,7 +4338,7 @@
public void testDeadlock() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDocs(2);
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
@@ -4351,7 +4350,7 @@
// index has 2 segments
MockRAMDirectory dir2 = new MockRAMDirectory();
- IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer2.addDocument(doc);
writer2.close();
@@ -4389,7 +4388,7 @@
if (w != null) {
w.close();
}
- w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
//((ConcurrentMergeScheduler) w.getMergeScheduler()).setSuppressExceptions();
if (!first && !allowInterrupt) {
@@ -4498,30 +4497,30 @@
public void testIndexStoreCombos() throws Exception {
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
byte[] b = new byte[50];
for(int i=0;i<50;i++)
b[i] = (byte) (i+77);
Document doc = new Document();
Field f = new Field("binary", b, 10, 17, Field.Store.YES);
- f.setTokenStream(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("doc1field1")));
+ f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field1")));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
- f2.setTokenStream(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("doc1field2")));
+ f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field2")));
doc.add(f);
doc.add(f2);
w.addDocument(doc);
// add 2 docs to test in-memory merging
- f.setTokenStream(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("doc2field1")));
- f2.setTokenStream(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("doc2field2")));
+ f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc2field1")));
+ f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc2field2")));
w.addDocument(doc);
// force segment flush so we can force a segment merge with doc3 later.
w.commit();
- f.setTokenStream(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("doc3field1")));
- f2.setTokenStream(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("doc3field2")));
+ f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc3field1")));
+ f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc3field2")));
w.addDocument(doc);
w.commit();
@@ -4560,7 +4559,7 @@
// LUCENE-1727: make sure doc fields are stored in order
public void testStoredFieldsOrder() throws Throwable {
Directory d = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("zzz", "a b c", Field.Store.YES, Field.Index.NO));
doc.add(new Field("aaa", "a b c", Field.Store.YES, Field.Index.NO));
@@ -4592,7 +4591,7 @@
public void testEmbeddedFFFF() throws Throwable {
Directory d = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", "a a\uffffb", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
@@ -4607,7 +4606,7 @@
public void testNoDocsIndex() throws Throwable {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
writer.setUseCompoundFile(false);
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.setInfoStream(new PrintStream(bos));
@@ -4625,7 +4624,7 @@
final int NUM_THREADS = 5;
final double RUN_SEC = 0.5;
final Directory dir = new MockRAMDirectory();
- final IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ final IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
w.commit();
final AtomicBoolean failed = new AtomicBoolean();
Thread[] threads = new Thread[NUM_THREADS];
Index: src/test/org/apache/lucene/index/TestIndexWriterDelete.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterDelete.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestIndexWriterDelete.java (working copy)
@@ -29,7 +29,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
public class TestIndexWriterDelete extends LuceneTestCase {
@@ -43,7 +42,7 @@
Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
modifier.setUseCompoundFile(true);
modifier.setMaxBufferedDeleteTerms(1);
@@ -80,7 +79,7 @@
Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
modifier.setMaxBufferedDocs(2);
modifier.setMaxBufferedDeleteTerms(2);
@@ -115,7 +114,7 @@
public void testMaxBufferedDeletes() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDeleteTerms(1);
writer.deleteDocuments(new Term("foobar", "1"));
writer.deleteDocuments(new Term("foobar", "1"));
@@ -130,7 +129,7 @@
for(int t=0;t<2;t++) {
Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
modifier.setMaxBufferedDocs(4);
modifier.setMaxBufferedDeleteTerms(4);
@@ -172,7 +171,7 @@
public void testBothDeletes() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
modifier.setMaxBufferedDocs(100);
modifier.setMaxBufferedDeleteTerms(100);
@@ -205,7 +204,7 @@
public void testBatchDeletes() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
modifier.setMaxBufferedDocs(2);
modifier.setMaxBufferedDeleteTerms(2);
@@ -249,7 +248,7 @@
public void testDeleteAll() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
modifier.setMaxBufferedDocs(2);
modifier.setMaxBufferedDeleteTerms(2);
@@ -296,7 +295,7 @@
public void testDeleteAllRollback() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
modifier.setMaxBufferedDocs(2);
modifier.setMaxBufferedDeleteTerms(2);
@@ -334,7 +333,7 @@
public void testDeleteAllNRT() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
modifier.setMaxBufferedDocs(2);
modifier.setMaxBufferedDeleteTerms(2);
@@ -426,7 +425,7 @@
// First build up a starting index:
MockRAMDirectory startDir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(startDir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
for (int i = 0; i < 157; i++) {
Document d = new Document();
d.add(new Field("id", Integer.toString(i), Field.Store.YES,
@@ -449,7 +448,7 @@
MockRAMDirectory dir = new MockRAMDirectory(startDir);
dir.setPreventDoubleWrite(false);
IndexWriter modifier = new IndexWriter(dir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
modifier.setMaxBufferedDocs(1000); // use flush or close
modifier.setMaxBufferedDeleteTerms(1000); // use flush or close
@@ -655,7 +654,7 @@
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
modifier.setUseCompoundFile(true);
modifier.setMaxBufferedDeleteTerms(2);
@@ -764,7 +763,7 @@
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter modifier = new IndexWriter(dir,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
dir.failOn(failure.reset());
Index: src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (working copy)
@@ -21,7 +21,6 @@
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.Directory;
@@ -135,7 +134,7 @@
public void testRandomExceptions() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
- MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions();
//writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(0.1);
@@ -173,7 +172,7 @@
public void testRandomExceptionsThreads() throws Throwable {
MockRAMDirectory dir = new MockRAMDirectory();
- MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions();
//writer.setMaxBufferedDocs(10);
writer.setRAMBufferSizeMB(0.2);
Index: src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java (working copy)
@@ -75,10 +75,10 @@
IndexWriter im;
FSDirectory dir = FSDirectory.open(this.__test_dir);
try {
- im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
} catch (FileNotFoundException e) {
try {
- im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
} catch (FileNotFoundException e1) {
}
} finally {
Index: src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (working copy)
@@ -24,7 +24,6 @@
import org.apache.lucene.document.Field;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.LuceneTestCase;
@@ -35,7 +34,7 @@
public void testNormalCase() throws IOException {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
writer.setMergeFactor(10);
writer.setMergePolicy(new LogDocMergePolicy(writer));
@@ -52,7 +51,7 @@
public void testNoOverMerge() throws IOException {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
writer.setMergeFactor(10);
writer.setMergePolicy(new LogDocMergePolicy(writer));
@@ -74,7 +73,7 @@
public void testForceFlush() throws IOException {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
writer.setMergeFactor(10);
LogDocMergePolicy mp = new LogDocMergePolicy(writer);
@@ -85,7 +84,7 @@
addDoc(writer);
writer.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
writer.setMergePolicy(mp);
mp.setMinMergeDocs(100);
@@ -100,7 +99,7 @@
public void testMergeFactorChange() throws IOException {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
writer.setMergeFactor(100);
writer.setMergePolicy(new LogDocMergePolicy(writer));
@@ -126,7 +125,7 @@
public void testMaxBufferedDocsChange() throws IOException {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDocs(101);
writer.setMergeFactor(101);
writer.setMergePolicy(new LogDocMergePolicy(writer));
@@ -140,7 +139,7 @@
}
writer.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMaxBufferedDocs(101);
writer.setMergeFactor(101);
writer.setMergePolicy(new LogDocMergePolicy(writer));
@@ -171,7 +170,7 @@
public void testMergeDocCount0() throws IOException {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMergePolicy(new LogDocMergePolicy(writer));
writer.setMaxBufferedDocs(10);
writer.setMergeFactor(100);
@@ -186,7 +185,7 @@
reader.deleteDocuments(new Term("content", "aaa"));
reader.close();
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
writer.setMergePolicy(new LogDocMergePolicy(writer));
writer.setMaxBufferedDocs(10);
writer.setMergeFactor(5);
Index: src/test/org/apache/lucene/index/TestIndexWriterMerging.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterMerging.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestIndexWriterMerging.java (working copy)
@@ -56,7 +56,7 @@
Directory merged = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMergeFactor(2);
writer.addIndexesNoOptimize(new Directory[]{indexA, indexB});
@@ -93,7 +93,7 @@
private void fillIndex(Directory dir, int start, int numDocs) throws IOException
{
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMergeFactor(2);
writer.setMaxBufferedDocs(2);
Index: src/test/org/apache/lucene/index/TestIndexWriterReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterReader.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestIndexWriterReader.java (working copy)
@@ -37,7 +37,6 @@
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ThreadInterruptedException;
@@ -77,7 +76,7 @@
boolean optimize = true;
Directory dir1 = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
// create the index
@@ -112,7 +111,7 @@
assertEquals(0, count(new Term("id", id10), r3));
assertEquals(1, count(new Term("id", Integer.toString(8000)), r3));
- writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
@@ -140,7 +139,7 @@
boolean optimize = false;
Directory dir1 = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setInfoStream(infoStream);
// create the index
@@ -149,7 +148,7 @@
// create a 2nd index
Directory dir2 = new MockRAMDirectory();
- IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer2.setInfoStream(infoStream);
createIndexNoClose(!optimize, "index2", writer2);
@@ -187,13 +186,13 @@
boolean optimize = false;
Directory dir1 = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setInfoStream(infoStream);
// create a 2nd index
Directory dir2 = new MockRAMDirectory();
- IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer2.setInfoStream(infoStream);
createIndexNoClose(!optimize, "index2", writer2);
@@ -222,7 +221,7 @@
boolean optimize = true;
Directory dir1 = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setInfoStream(infoStream);
// create the index
@@ -261,7 +260,7 @@
writer.close();
// reopen the writer to verify the delete made it to the directory
- writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setInfoStream(infoStream);
IndexReader w2r1 = writer.getReader();
@@ -276,7 +275,7 @@
int numDirs = 3;
Directory mainDir = new MockRAMDirectory();
- IndexWriter mainWriter = new IndexWriter(mainDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter mainWriter = new IndexWriter(mainDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
mainWriter.setInfoStream(infoStream);
AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(numIter, mainWriter);
@@ -384,7 +383,7 @@
this.numDirs = numDirs;
this.mainWriter = mainWriter;
addDir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(addDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(addDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
for (int i = 0; i < NUM_INIT_DOCS; i++) {
@@ -492,7 +491,7 @@
*/
public void doTestIndexWriterReopenSegment(boolean optimize) throws Exception {
Directory dir1 = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setInfoStream(infoStream);
IndexReader r1 = writer.getReader();
@@ -530,7 +529,7 @@
writer.close();
// test whether the changes made it to the directory
- writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
IndexReader w2r1 = writer.getReader();
// insure the deletes were actually flushed to the directory
@@ -571,7 +570,7 @@
*/
public static void createIndex(Directory dir1, String indexName,
boolean multiSegment) throws IOException {
- IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
w.setMergePolicy(new LogDocMergePolicy(w));
for (int i = 0; i < 100; i++) {
@@ -606,7 +605,7 @@
public void testMergeWarmer() throws Exception {
Directory dir1 = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setInfoStream(infoStream);
@@ -641,7 +640,7 @@
public void testAfterCommit() throws Exception {
Directory dir1 = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setInfoStream(infoStream);
@@ -674,7 +673,7 @@
// Make sure reader remains usable even if IndexWriter closes
public void testAfterClose() throws Exception {
Directory dir1 = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setInfoStream(infoStream);
@@ -704,7 +703,7 @@
// Stress test reopen during addIndexes
public void testDuringAddIndexes() throws Exception {
Directory dir1 = new MockRAMDirectory();
- final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setInfoStream(infoStream);
writer.setMergeFactor(2);
@@ -782,7 +781,7 @@
// Stress test reopen during add/delete
public void testDuringAddDelete() throws Exception {
Directory dir1 = new MockRAMDirectory();
- final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setInfoStream(infoStream);
writer.setMergeFactor(2);
@@ -863,7 +862,7 @@
public void testExpungeDeletes() throws Throwable {
Directory dir = new MockRAMDirectory();
- final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
@@ -888,7 +887,7 @@
public void testDeletesNumDocs() throws Throwable {
Directory dir = new MockRAMDirectory();
- final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
Index: src/test/org/apache/lucene/index/TestLazyBug.java
===================================================================
--- src/test/org/apache/lucene/index/TestLazyBug.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestLazyBug.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.*;
@@ -64,7 +63,7 @@
Directory dir = new RAMDirectory();
try {
Random r = newRandom();
- Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(false);
Index: src/test/org/apache/lucene/index/TestLazyProxSkipping.java
===================================================================
--- src/test/org/apache/lucene/index/TestLazyProxSkipping.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestLazyProxSkipping.java (working copy)
@@ -30,7 +30,6 @@
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* Tests lazy skipping on the proximity file.
@@ -61,7 +60,7 @@
int numDocs = 500;
Directory directory = new SeekCountingDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(false);
writer.setMaxBufferedDocs(10);
for (int i = 0; i < numDocs; i++) {
@@ -119,7 +118,7 @@
public void testSeek() throws IOException {
Directory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 10; i++) {
Document doc = new Document();
doc.add(new Field(this.field, "a b", Field.Store.YES, Field.Index.ANALYZED));
Index: src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
===================================================================
--- src/test/org/apache/lucene/index/TestMultiLevelSkipList.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestMultiLevelSkipList.java (working copy)
@@ -32,7 +32,6 @@
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* This testcase tests whether multi-level skipping is being used
@@ -92,7 +91,7 @@
private static class PayloadAnalyzer extends Analyzer {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- return new PayloadFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader));
+ return new PayloadFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
}
}
Index: src/test/org/apache/lucene/index/TestNorms.java
===================================================================
--- src/test/org/apache/lucene/index/TestNorms.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestNorms.java (working copy)
@@ -65,7 +65,7 @@
protected void setUp() throws Exception {
super.setUp();
similarityOne = new SimilarityOne();
- anlzr = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ anlzr = new StandardAnalyzer(TEST_VERSION_CURRENT);
}
/**
Index: src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java
===================================================================
--- src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java (working copy)
@@ -25,7 +25,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
public class TestNRTReaderWithThreads extends LuceneTestCase {
Random random = new Random();
@@ -33,7 +32,7 @@
public void testIndexing() throws Exception {
Directory mainDir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(mainDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(mainDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(false);
IndexReader reader = writer.getReader(); // start pooling readers
Index: src/test/org/apache/lucene/index/TestOmitTf.java
===================================================================
--- src/test/org/apache/lucene/index/TestOmitTf.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestOmitTf.java (working copy)
@@ -66,7 +66,7 @@
// omitTermFreqAndPositions bit in the FieldInfo
public void testOmitTermFreqAndPositions() throws Exception {
Directory ram = new MockRAMDirectory();
- Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
Document d = new Document();
@@ -112,7 +112,7 @@
// omitTermFreqAndPositions for the same field works
public void testMixedMerge() throws Exception {
Directory ram = new MockRAMDirectory();
- Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(3);
writer.setMergeFactor(2);
@@ -165,7 +165,7 @@
// field,
public void testMixedRAM() throws Exception {
Directory ram = new MockRAMDirectory();
- Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(10);
writer.setMergeFactor(2);
@@ -213,7 +213,7 @@
// Verifies no *.prx exists when all fields omit term freq:
public void testNoPrxFile() throws Throwable {
Directory ram = new MockRAMDirectory();
- Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(3);
writer.setMergeFactor(2);
@@ -244,7 +244,7 @@
// Test scores with one field with Term Freqs and one without, otherwise with equal content
public void testBasic() throws Exception {
Directory dir = new MockRAMDirectory();
- Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMergeFactor(2);
writer.setMaxBufferedDocs(2);
Index: src/test/org/apache/lucene/index/TestParallelReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestParallelReader.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestParallelReader.java (working copy)
@@ -106,7 +106,7 @@
// one document only:
Directory dir2 = new MockRAMDirectory();
- IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document d3 = new Document();
d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
w2.addDocument(d3);
@@ -151,13 +151,13 @@
Directory dir2 = getDir2();
// add another document to ensure that the indexes are not optimized
- IndexWriter modifier = new IndexWriter(dir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter modifier = new IndexWriter(dir1, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
Document d = new Document();
d.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
modifier.addDocument(d);
modifier.close();
- modifier = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ modifier = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
d = new Document();
d.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
modifier.addDocument(d);
@@ -170,7 +170,7 @@
assertFalse(pr.isOptimized());
pr.close();
- modifier = new IndexWriter(dir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ modifier = new IndexWriter(dir1, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
modifier.optimize();
modifier.close();
@@ -182,7 +182,7 @@
pr.close();
- modifier = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ modifier = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
modifier.optimize();
modifier.close();
@@ -233,7 +233,7 @@
// Fields 1-4 indexed together:
private Searcher single() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document d1 = new Document();
d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
@@ -263,7 +263,7 @@
private Directory getDir1() throws IOException {
Directory dir1 = new MockRAMDirectory();
- IndexWriter w1 = new IndexWriter(dir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w1 = new IndexWriter(dir1, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document d1 = new Document();
d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
@@ -278,7 +278,7 @@
private Directory getDir2() throws IOException {
Directory dir2 = new RAMDirectory();
- IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document d3 = new Document();
d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
d3.add(new Field("f4", "v1", Field.Store.YES, Field.Index.ANALYZED));
Index: src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
===================================================================
--- src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java (working copy)
@@ -20,7 +20,6 @@
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.analysis.SimpleAnalyzer;
@@ -48,7 +47,7 @@
*/
public void testEmptyIndex() throws IOException {
RAMDirectory rd1 = new MockRAMDirectory();
- IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
MaxFieldLength.UNLIMITED);
iw.close();
@@ -56,7 +55,7 @@
RAMDirectory rdOut = new MockRAMDirectory();
- IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
MaxFieldLength.UNLIMITED);
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(rd1,true));
@@ -81,7 +80,7 @@
public void testEmptyIndexWithVectors() throws IOException {
RAMDirectory rd1 = new MockRAMDirectory();
{
- IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("test", "", Store.NO, Index.ANALYZED,
@@ -96,7 +95,7 @@
ir.deleteDocument(0);
ir.close();
- iw = new IndexWriter(rd1, new SimpleAnalyzer(Version.LUCENE_CURRENT), false,
+ iw = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), false,
MaxFieldLength.UNLIMITED);
iw.optimize();
iw.close();
@@ -104,7 +103,7 @@
RAMDirectory rd2 = new MockRAMDirectory();
{
- IndexWriter iw = new IndexWriter(rd2, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(rd2, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
MaxFieldLength.UNLIMITED);
Document doc = new Document();
iw.addDocument(doc);
@@ -113,7 +112,7 @@
RAMDirectory rdOut = new MockRAMDirectory();
- IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
MaxFieldLength.UNLIMITED);
ParallelReader pr = new ParallelReader();
pr.add(IndexReader.open(rd1,true));
Index: src/test/org/apache/lucene/index/TestParallelTermEnum.java
===================================================================
--- src/test/org/apache/lucene/index/TestParallelTermEnum.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestParallelTermEnum.java (working copy)
@@ -20,8 +20,6 @@
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -39,7 +37,7 @@
Document doc;
RAMDirectory rd1 = new RAMDirectory();
- IndexWriter iw1 = new IndexWriter(rd1, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter iw1 = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
doc = new Document();
doc.add(new Field("field1", "the quick brown fox jumps", Store.YES,
@@ -51,7 +49,7 @@
iw1.close();
RAMDirectory rd2 = new RAMDirectory();
- IndexWriter iw2 = new IndexWriter(rd2, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter iw2 = new IndexWriter(rd2, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
doc = new Document();
doc.add(new Field("field0", "", Store.NO, Index.ANALYZED));
Index: src/test/org/apache/lucene/index/TestPayloads.java
===================================================================
--- src/test/org/apache/lucene/index/TestPayloads.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestPayloads.java (working copy)
@@ -41,7 +41,6 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.UnicodeUtil;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
@@ -396,7 +395,7 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
PayloadData payload = fieldToData.get(fieldName);
- TokenStream ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
if (payload != null) {
if (payload.numFieldInstancesToSkip == 0) {
ts = new PayloadFilter(ts, payload.data, payload.offset, payload.length);
@@ -469,7 +468,7 @@
final ByteArrayPool pool = new ByteArrayPool(numThreads, 5);
Directory dir = new RAMDirectory();
- final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
final String field = "test";
Thread[] ingesters = new Thread[numThreads];
Index: src/test/org/apache/lucene/index/TestSegmentTermDocs.java
===================================================================
--- src/test/org/apache/lucene/index/TestSegmentTermDocs.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestSegmentTermDocs.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.Directory;
@@ -103,7 +102,7 @@
public void testSkipTo(int indexDivisor) throws IOException {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Term ta = new Term("content","aaa");
Index: src/test/org/apache/lucene/index/TestSegmentTermEnum.java
===================================================================
--- src/test/org/apache/lucene/index/TestSegmentTermEnum.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestSegmentTermEnum.java (working copy)
@@ -20,8 +20,6 @@
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -38,7 +36,7 @@
{
IndexWriter writer = null;
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
// ADD 100 documents with term : aaa
// add 100 documents with terms: aaa bbb
@@ -54,7 +52,7 @@
verifyDocFreq();
// merge segments by optimizing the index
- writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.optimize();
writer.close();
@@ -65,7 +63,7 @@
public void testPrevTermAtEnd() throws IOException
{
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDoc(writer, "aaa bbb");
writer.close();
SegmentReader reader = SegmentReader.getOnlySegmentReader(dir);
Index: src/test/org/apache/lucene/index/TestStressIndexing.java
===================================================================
--- src/test/org/apache/lucene/index/TestStressIndexing.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestStressIndexing.java (working copy)
@@ -26,7 +26,7 @@
import java.io.File;
public class TestStressIndexing extends LuceneTestCase {
- private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT);
private Random RANDOM;
private static abstract class TimedThread extends Thread {
Index: src/test/org/apache/lucene/index/TestStressIndexing2.java
===================================================================
--- src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy)
@@ -19,7 +19,6 @@
import org.apache.lucene.analysis.*;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.search.TermQuery;
@@ -124,7 +123,7 @@
public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
Map<String,Document> docs = new HashMap<String,Document>();
- IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
w.setUseCompoundFile(false);
/***
@@ -176,7 +175,7 @@
public Map<String,Document> indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
Map<String,Document> docs = new HashMap<String,Document>();
for(int iter=0;iter<3;iter++) {
- IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
w.setUseCompoundFile(false);
// force many merges
@@ -219,7 +218,7 @@
public static void indexSerial(Map<String,Document> docs, Directory dir) throws IOException {
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
// index all docs in a single thread
Iterator<Document> iter = docs.values().iterator();
Index: src/test/org/apache/lucene/index/TestThreadedOptimize.java
===================================================================
--- src/test/org/apache/lucene/index/TestThreadedOptimize.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestThreadedOptimize.java (working copy)
@@ -24,7 +24,6 @@
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.English;
@@ -35,7 +34,7 @@
public class TestThreadedOptimize extends LuceneTestCase {
- private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT);
private final static int NUM_THREADS = 3;
//private final static int NUM_THREADS = 5;
Index: src/test/org/apache/lucene/index/TestTransactionRollback.java
===================================================================
--- src/test/org/apache/lucene/index/TestTransactionRollback.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestTransactionRollback.java (working copy)
@@ -27,8 +27,6 @@
import java.util.HashMap;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -67,7 +65,7 @@
if (last==null)
throw new RuntimeException("Couldn't find commit point "+id);
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
new RollbackDeletionPolicy(id), MaxFieldLength.UNLIMITED, last);
Map<String,String> data = new HashMap<String,String>();
data.put("index", "Rolled back to 1-"+id);
@@ -129,7 +127,7 @@
//Build index, of records 1 to 100, committing after each batch of 10
IndexDeletionPolicy sdp=new KeepAllDeletionPolicy();
- IndexWriter w=new IndexWriter(dir,new WhitespaceAnalyzer(Version.LUCENE_CURRENT),sdp,MaxFieldLength.UNLIMITED);
+ IndexWriter w=new IndexWriter(dir,new WhitespaceAnalyzer(TEST_VERSION_CURRENT),sdp,MaxFieldLength.UNLIMITED);
for(int currentRecordId=1;currentRecordId<=100;currentRecordId++) {
Document doc=new Document();
doc.add(new Field(FIELD_RECORD_ID,""+currentRecordId,Field.Store.YES,Field.Index.ANALYZED));
@@ -197,7 +195,7 @@
for(int i=0;i<2;i++) {
// Unless you specify a prior commit point, rollback
// should not work:
- new IndexWriter(dir,new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ new IndexWriter(dir,new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
new DeleteLastCommitPolicy(),
MaxFieldLength.UNLIMITED).close();
IndexReader r = IndexReader.open(dir, true);
Index: src/test/org/apache/lucene/index/TestTransactions.java
===================================================================
--- src/test/org/apache/lucene/index/TestTransactions.java (revision 908485)
+++ src/test/org/apache/lucene/index/TestTransactions.java (working copy)
@@ -88,12 +88,12 @@
@Override
public void doWork() throws Throwable {
- IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
writer1.setMaxBufferedDocs(3);
writer1.setMergeFactor(2);
((ConcurrentMergeScheduler) writer1.getMergeScheduler()).setSuppressExceptions();
- IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
// Intentionally use different params so flush/merge
// happen @ different times
writer2.setMaxBufferedDocs(2);
@@ -178,7 +178,7 @@
}
public void initIndex(Directory dir) throws Throwable {
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
for(int j=0; j<7; j++) {
Document d = new Document();
int n = RANDOM.nextInt();
Index: src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java (revision 908485)
+++ src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java (working copy)
@@ -44,7 +44,7 @@
public void testMultiAnalyzer() throws ParseException {
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "", new MultiAnalyzer());
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "", new MultiAnalyzer());
// trivial, no multiple tokens:
assertEquals("foo", qp.parse("foo").toString());
@@ -135,9 +135,9 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
result = new TestFilter(result);
- result = new LowerCaseFilter(Version.LUCENE_CURRENT, result);
+ result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
return result;
}
}
@@ -203,9 +203,9 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
result = new TestPosIncrementFilter(result);
- result = new LowerCaseFilter(Version.LUCENE_CURRENT, result);
+ result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
return result;
}
}
@@ -242,7 +242,7 @@
private final static class DumbQueryParser extends QueryParser {
public DumbQueryParser(String f, Analyzer a) {
- super(Version.LUCENE_CURRENT, f, a);
+ super(TEST_VERSION_CURRENT, f, a);
}
/** expose super's version */
Index: src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (revision 908485)
+++ src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (working copy)
@@ -36,7 +36,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* Tests QueryParser.
@@ -60,18 +59,18 @@
String[] fields = {"b", "t"};
Occur occur[] = {Occur.SHOULD, Occur.SHOULD};
TestQueryParser.QPTestAnalyzer a = new TestQueryParser.QPTestAnalyzer();
- MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, a);
+ MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, a);
Query q = mfqp.parse(qtxt);
assertEquals(expectedRes, q.toString());
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, qtxt, fields, occur, a);
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, qtxt, fields, occur, a);
assertEquals(expectedRes, q.toString());
}
public void testSimple() throws Exception {
String[] fields = {"b", "t"};
- MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
Query q = mfqp.parse("one");
assertEquals("b:one t:one", q.toString());
@@ -134,7 +133,7 @@
boosts.put("b", Float.valueOf(5));
boosts.put("t", Float.valueOf(10));
String[] fields = {"b", "t"};
- MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), boosts);
+ MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new StandardAnalyzer(TEST_VERSION_CURRENT), boosts);
//Check for simple
@@ -160,24 +159,24 @@
public void testStaticMethod1() throws ParseException {
String[] fields = {"b", "t"};
String[] queries = {"one", "two"};
- Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("b:one t:two", q.toString());
String[] queries2 = {"+one", "+two"};
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries2, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries2, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("(+b:one) (+t:two)", q.toString());
String[] queries3 = {"one", "+two"};
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries3, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries3, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("b:one (+t:two)", q.toString());
String[] queries4 = {"one +more", "+two"};
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries4, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries4, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("(b:one +b:more) (+t:two)", q.toString());
String[] queries5 = {"blah"};
try {
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries5, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries5, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch(IllegalArgumentException e) {
// expected exception, array length differs
@@ -187,11 +186,11 @@
TestQueryParser.QPTestAnalyzer stopA = new TestQueryParser.QPTestAnalyzer();
String[] queries6 = {"((+stop))", "+((stop))"};
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries6, fields, stopA);
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries6, fields, stopA);
assertEquals("", q.toString());
String[] queries7 = {"one ((+stop)) +more", "+((stop)) +two"};
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries7, fields, stopA);
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries7, fields, stopA);
assertEquals("(b:one +b:more) (+t:two)", q.toString());
}
@@ -199,15 +198,15 @@
public void testStaticMethod2() throws ParseException {
String[] fields = {"b", "t"};
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
- Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+b:one -t:one", q.toString());
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch(IllegalArgumentException e) {
// expected exception, array length differs
@@ -219,15 +218,15 @@
//int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD};
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
- Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));//, fields, flags, new StandardAnalyzer());
+ Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));//, fields, flags, new StandardAnalyzer());
assertEquals("+b:one -t:one", q.toString());
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch(IllegalArgumentException e) {
// expected exception, array length differs
@@ -239,12 +238,12 @@
String[] fields = {"f1", "f2", "f3"};
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD};
- Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+f1:one -f2:two f3:three", q.toString());
try {
BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch(IllegalArgumentException e) {
// expected exception, array length differs
@@ -255,12 +254,12 @@
String[] queries = {"one", "two"};
String[] fields = {"b", "t"};
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
- Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+b:one -t:two", q.toString());
try {
BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
- q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch(IllegalArgumentException e) {
// expected exception, array length differs
@@ -269,7 +268,7 @@
public void testAnalyzerReturningNull() throws ParseException {
String[] fields = new String[] { "f1", "f2", "f3" };
- MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new AnalyzerReturningNull());
+ MultiFieldQueryParser parser = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new AnalyzerReturningNull());
Query q = parser.parse("bla AND blo");
assertEquals("+(f2:bla f3:bla) +(f2:blo f3:blo)", q.toString());
// the following queries are not affected as their terms are not analyzed anyway:
@@ -282,7 +281,7 @@
}
public void testStopWordSearching() throws Exception {
- Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
Directory ramDir = new RAMDirectory();
IndexWriter iw = new IndexWriter(ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
@@ -291,7 +290,7 @@
iw.close();
MultiFieldQueryParser mfqp =
- new MultiFieldQueryParser(Version.LUCENE_CURRENT, new String[] {"body"}, analyzer);
+ new MultiFieldQueryParser(TEST_VERSION_CURRENT, new String[] {"body"}, analyzer);
mfqp.setDefaultOperator(QueryParser.Operator.AND);
Query q = mfqp.parse("the footest");
IndexSearcher is = new IndexSearcher(ramDir, true);
@@ -304,7 +303,7 @@
* Return empty tokens for field "f1".
*/
private static class AnalyzerReturningNull extends Analyzer {
- StandardAnalyzer stdAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer stdAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
public AnalyzerReturningNull() {
}
Index: src/test/org/apache/lucene/queryParser/TestQueryParser.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestQueryParser.java (revision 908485)
+++ src/test/org/apache/lucene/queryParser/TestQueryParser.java (working copy)
@@ -64,7 +64,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.util.LocalizedTestCase;
-import org.apache.lucene.util.Version;
/**
* Tests QueryParser.
@@ -128,13 +127,13 @@
/** Filters LowerCaseTokenizer with StopFilter. */
@Override
public final TokenStream tokenStream(String fieldName, Reader reader) {
- return new QPTestFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader));
+ return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
}
}
public static class QPTestParser extends QueryParser {
public QPTestParser(String f, Analyzer a) {
- super(Version.LUCENE_CURRENT, f, a);
+ super(TEST_VERSION_CURRENT, f, a);
}
@Override
@@ -158,8 +157,8 @@
public QueryParser getParser(Analyzer a) throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", a);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a);
qp.setDefaultOperator(QueryParser.OR_OPERATOR);
return qp;
}
@@ -228,8 +227,8 @@
public Query getQueryDOA(String query, Analyzer a)
throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", a);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a);
qp.setDefaultOperator(QueryParser.AND_OPERATOR);
return qp.parse(query);
}
@@ -253,8 +252,8 @@
public void testSimple() throws Exception {
assertQueryEquals("term term term", null, "term term term");
- assertQueryEquals("türm term term", new WhitespaceAnalyzer(Version.LUCENE_CURRENT), "türm term term");
- assertQueryEquals("ümlaut", new WhitespaceAnalyzer(Version.LUCENE_CURRENT), "ümlaut");
+ assertQueryEquals("türm term term", new WhitespaceAnalyzer(TEST_VERSION_CURRENT), "türm term term");
+ assertQueryEquals("ümlaut", new WhitespaceAnalyzer(TEST_VERSION_CURRENT), "ümlaut");
assertQueryEquals("\"\"", new KeywordAnalyzer(), "");
assertQueryEquals("foo:\"\"", new KeywordAnalyzer(), "foo:");
@@ -301,7 +300,7 @@
assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null,
"+(title:dog title:cat) -author:\"bob dole\"");
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new StandardAnalyzer(TEST_VERSION_CURRENT));
// make sure OR is the default:
assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator());
qp.setDefaultOperator(QueryParser.AND_OPERATOR);
@@ -311,7 +310,7 @@
}
public void testPunct() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("a&b", a, "a&b");
assertQueryEquals("a&&b", a, "a&&b");
assertQueryEquals(".NET", a, ".NET");
@@ -331,7 +330,7 @@
assertQueryEquals("term 1.0 1 2", null, "term");
assertQueryEquals("term term1 term2", null, "term term term");
- Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("3", a, "3");
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
assertQueryEquals("term term1 term2", a, "term term1 term2");
@@ -456,7 +455,7 @@
assertQueryEquals("[ a TO z]", null, "[a TO z]");
assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod());
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new SimpleAnalyzer(Version.LUCENE_CURRENT));
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new SimpleAnalyzer(TEST_VERSION_CURRENT));
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod());
@@ -473,7 +472,7 @@
public void testFarsiRangeCollating() throws Exception {
RAMDirectory ramDir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("content","\u0633\u0627\u0628",
@@ -482,7 +481,7 @@
iw.close();
IndexSearcher is = new IndexSearcher(ramDir, true);
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "content", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "content", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
@@ -580,7 +579,7 @@
final String defaultField = "default";
final String monthField = "month";
final String hourField = "hour";
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new SimpleAnalyzer(Version.LUCENE_CURRENT));
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new SimpleAnalyzer(TEST_VERSION_CURRENT));
// Don't set any date resolution and verify if DateField is used
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
@@ -621,7 +620,7 @@
}
public void testEscaped() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
/*assertQueryEquals("\\[brackets", a, "\\[brackets");
assertQueryEquals("\\[brackets", null, "brackets");
@@ -715,7 +714,7 @@
}
public void testQueryStringEscaping() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c");
assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c");
@@ -802,8 +801,8 @@
throws Exception {
Set<Object> stopWords = new HashSet<Object>(1);
stopWords.add("on");
- StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, stopWords);
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", oneStopAnalyzer);
+ StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT, stopWords);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", oneStopAnalyzer);
Query q = qp.parse("on^1.0");
assertNotNull(q);
q = qp.parse("\"hello\"^2.0");
@@ -815,7 +814,7 @@
q = qp.parse("\"on\"^1.0");
assertNotNull(q);
- QueryParser qp2 = new QueryParser(Version.LUCENE_CURRENT, "field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ QueryParser qp2 = new QueryParser(TEST_VERSION_CURRENT, "field", new StandardAnalyzer(TEST_VERSION_CURRENT));
q = qp2.parse("the^3");
// "the" is a stop word so the result is an empty query:
assertNotNull(q);
@@ -844,7 +843,7 @@
public void testCustomQueryParserWildcard() {
try {
- new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("a?t");
+ new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("a?t");
fail("Wildcard queries should not be allowed");
} catch (ParseException expected) {
// expected exception
@@ -853,7 +852,7 @@
public void testCustomQueryParserFuzzy() throws Exception {
try {
- new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("xunit~");
+ new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("xunit~");
fail("Fuzzy queries should not be allowed");
} catch (ParseException expected) {
// expected exception
@@ -863,7 +862,7 @@
public void testBooleanQuery() throws Exception {
BooleanQuery.setMaxClauseCount(2);
try {
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
qp.parse("one two three");
fail("ParseException expected due to too many boolean clauses");
} catch (ParseException expected) {
@@ -875,7 +874,7 @@
* This test differs from TestPrecedenceQueryParser
*/
public void testPrecedence() throws Exception {
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
Query query1 = qp.parse("A AND B OR C AND D");
Query query2 = qp.parse("+A +B +C +D");
assertEquals(query1, query2);
@@ -883,7 +882,7 @@
public void testLocalDateFormat() throws IOException, ParseException {
RAMDirectory ramDir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
iw.close();
@@ -899,7 +898,7 @@
public void testStarParsing() throws Exception {
final int[] type = new int[1];
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)) {
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)) {
@Override
protected Query getWildcardQuery(String field, String termStr) throws ParseException {
// override error checking of superclass
@@ -958,7 +957,7 @@
}
public void testStopwords() throws Exception {
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "foo")));
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "foo")));
Query result = qp.parse("a:the OR a:foo");
assertNotNull("result is null and it shouldn't be", result);
assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery);
@@ -974,7 +973,7 @@
}
public void testPositionIncrement() throws Exception {
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "in", "are", "this")));
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "in", "are", "this")));
qp.setEnablePositionIncrements(true);
String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\"";
// 0 2 5 7 8
@@ -991,7 +990,7 @@
}
public void testMatchAllDocs() throws Exception {
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
assertEquals(new MatchAllDocsQuery(), qp.parse("*:*"));
assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)"));
BooleanQuery bq = (BooleanQuery)qp.parse("+*:* -*:*");
@@ -1000,7 +999,7 @@
}
private void assertHits(int expected, String query, IndexSearcher is) throws ParseException, IOException {
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "date", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "date", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
qp.setLocale(Locale.ENGLISH);
Query q = qp.parse(query);
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
@@ -1028,7 +1027,7 @@
// "match"
public void testPositionIncrements() throws Exception {
Directory dir = new MockRAMDirectory();
- Analyzer a = new StandardAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
IndexWriter w = new IndexWriter(dir, a, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED));
@@ -1036,7 +1035,7 @@
IndexReader r = w.getReader();
w.close();
IndexSearcher s = new IndexSearcher(r);
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "f", a);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "f", a);
Query q = qp.parse("\"wizard of ozzy\"");
assertEquals(1, s.search(q, 1).totalHits);
r.close();
Index: src/test/org/apache/lucene/search/BaseTestRangeFilter.java
===================================================================
--- src/test/org/apache/lucene/search/BaseTestRangeFilter.java (revision 908485)
+++ src/test/org/apache/lucene/search/BaseTestRangeFilter.java (working copy)
@@ -20,8 +20,6 @@
import java.util.Random;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -98,7 +96,7 @@
try {
/* build an index */
- IndexWriter writer = new IndexWriter(index.index, new SimpleAnalyzer(Version.LUCENE_CURRENT), T,
+ IndexWriter writer = new IndexWriter(index.index, new SimpleAnalyzer(TEST_VERSION_CURRENT), T,
IndexWriter.MaxFieldLength.LIMITED);
for (int d = minId; d <= maxId; d++) {
Index: src/test/org/apache/lucene/search/function/FunctionTestSetup.java
===================================================================
--- src/test/org/apache/lucene/search/function/FunctionTestSetup.java (revision 908485)
+++ src/test/org/apache/lucene/search/function/FunctionTestSetup.java (working copy)
@@ -96,7 +96,7 @@
// prepare a small index with just a few documents.
super.setUp();
dir = new RAMDirectory();
- anlzr = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ anlzr = new StandardAnalyzer(TEST_VERSION_CURRENT);
IndexWriter iw = new IndexWriter(dir, anlzr,
IndexWriter.MaxFieldLength.LIMITED);
// add docs not exactly in natural ID order, to verify we do check the order of docs by scores
Index: src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
===================================================================
--- src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java (working copy)
@@ -20,7 +20,6 @@
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.*;
-import org.apache.lucene.util.Version;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -185,7 +184,7 @@
}
public void testCustomExternalQuery() throws Exception {
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, TEXT_FIELD,anlzr);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, TEXT_FIELD,anlzr);
String qtxt = "first aid text"; // from the doc texts in FunctionQuerySetup.
Query q1 = qp.parse(qtxt);
@@ -208,7 +207,7 @@
float boost = (float) dboost;
IndexSearcher s = new IndexSearcher(dir, true);
FieldScoreQuery qValSrc = new FieldScoreQuery(field, tp); // a query that would score by the field
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, TEXT_FIELD, anlzr);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, TEXT_FIELD, anlzr);
String qtxt = "first aid text"; // from the doc texts in FunctionQuerySetup.
// regular (boolean) query.
Index: src/test/org/apache/lucene/search/payloads/PayloadHelper.java
===================================================================
--- src/test/org/apache/lucene/search/payloads/PayloadHelper.java (revision 908485)
+++ src/test/org/apache/lucene/search/payloads/PayloadHelper.java (working copy)
@@ -25,9 +25,9 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.util.English;
-import org.apache.lucene.util.Version;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Similarity;
+import static org.apache.lucene.util.LuceneTestCaseJ4.TEST_VERSION_CURRENT;
import java.io.Reader;
import java.io.IOException;
@@ -51,7 +51,7 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader);
result = new PayloadFilter(result, fieldName);
return result;
}
Index: src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
===================================================================
--- src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (working copy)
@@ -40,7 +40,6 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.search.Explanation.IDFExplanation;
@@ -57,7 +56,7 @@
private class PayloadAnalyzer extends Analyzer {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader);
result = new PayloadFilter(result, fieldName);
return result;
}
Index: src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java
===================================================================
--- src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (working copy)
@@ -18,7 +18,6 @@
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.English;
-import org.apache.lucene.util.Version;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.QueryUtils;
import org.apache.lucene.search.TopDocs;
@@ -67,7 +66,7 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader);
result = new PayloadFilter(result, fieldName);
return result;
}
Index: src/test/org/apache/lucene/search/QueryUtils.java
===================================================================
--- src/test/org/apache/lucene/search/QueryUtils.java (revision 908485)
+++ src/test/org/apache/lucene/search/QueryUtils.java (working copy)
@@ -15,7 +15,7 @@
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import static org.apache.lucene.util.LuceneTestCaseJ4.TEST_VERSION_CURRENT;
/**
* Copyright 2005 Apache Software Foundation
@@ -200,7 +200,7 @@
private static RAMDirectory makeEmptyIndex(final int numDeletedDocs)
throws IOException {
RAMDirectory d = new RAMDirectory();
- IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
MaxFieldLength.LIMITED);
for (int i = 0; i < numDeletedDocs; i++) {
w.addDocument(new Document());
Index: src/test/org/apache/lucene/search/spans/TestBasics.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestBasics.java (revision 908485)
+++ src/test/org/apache/lucene/search/spans/TestBasics.java (working copy)
@@ -35,7 +35,6 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* Tests basic search capabilities.
@@ -56,7 +55,7 @@
public void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
//writer.infoStream = System.out;
for (int i = 0; i < 1000; i++) {
Index: src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java (working copy)
@@ -32,7 +32,6 @@
import org.apache.lucene.search.QueryUtils;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
public class TestFieldMaskingSpanQuery extends LuceneTestCase {
@@ -55,7 +54,7 @@
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer= new IndexWriter(directory,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
writer.addDocument(doc(new Field[] { field("id", "0")
Index: src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (revision 908485)
+++ src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (working copy)
@@ -30,14 +30,13 @@
import org.apache.lucene.search.Scorer;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
public class TestNearSpansOrdered extends LuceneTestCase {
protected IndexSearcher searcher;
public static final String FIELD = "field";
public static final QueryParser qp =
- new QueryParser(Version.LUCENE_CURRENT, FIELD, new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ new QueryParser(TEST_VERSION_CURRENT, FIELD, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
@Override
public void tearDown() throws Exception {
@@ -49,7 +48,7 @@
public void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(new Field(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
Index: src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (revision 908485)
+++ src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (working copy)
@@ -47,7 +47,6 @@
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
public class TestPayloadSpans extends LuceneTestCase {
private final static boolean DEBUG = true;
@@ -469,7 +468,7 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader);
result = new PayloadFilter(result, fieldName);
return result;
}
@@ -521,7 +520,7 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader);
result = new PayloadFilter(result, fieldName);
return result;
}
Index: src/test/org/apache/lucene/search/spans/TestSpans.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestSpans.java (revision 908485)
+++ src/test/org/apache/lucene/search/spans/TestSpans.java (working copy)
@@ -37,8 +37,6 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
import java.util.Collections;
@@ -51,7 +49,7 @@
public void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(new Field(field, docFields[i], Field.Store.YES, Field.Index.ANALYZED));
@@ -453,7 +451,7 @@
// LUCENE-1404
public void testNPESpanQuery() throws Throwable {
final Directory dir = new MockRAMDirectory();
- final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, Collections.emptySet()), IndexWriter.MaxFieldLength.LIMITED);
+ final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.emptySet()), IndexWriter.MaxFieldLength.LIMITED);
// Add documents
addDoc(writer, "1", "the big dogs went running to the market");
Index: src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (revision 908485)
+++ src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (working copy)
@@ -56,7 +56,7 @@
// create test index
mDirectory = new RAMDirectory();
- final IndexWriter writer = new IndexWriter(mDirectory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ final IndexWriter writer = new IndexWriter(mDirectory, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDocument(writer, "1", "I think it should work.");
addDocument(writer, "2", "I think it should work.");
addDocument(writer, "3", "I think it should work.");
Index: src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java (revision 908485)
+++ src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java (working copy)
@@ -40,7 +40,7 @@
super.setUp();
// create test index
- final IndexWriter writer = new IndexWriter(mDirectory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ final IndexWriter writer = new IndexWriter(mDirectory, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
addDocument(writer, "A", "Should we, could we, would we?");
addDocument(writer, "B", "It should. Should it?");
addDocument(writer, "C", "It shouldn't.");
Index: src/test/org/apache/lucene/search/TestBoolean2.java
===================================================================
--- src/test/org/apache/lucene/search/TestBoolean2.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestBoolean2.java (working copy)
@@ -32,7 +32,6 @@
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/** Test BooleanQuery2 against BooleanQuery by overriding the standard query parser.
* This also tests the scoring order of BooleanQuery.
@@ -51,7 +50,7 @@
public void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
@@ -68,14 +67,14 @@
int docCount = 0;
do {
final Directory copy = new RAMDirectory(dir2);
- IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
w.addIndexesNoOptimize(new Directory[] {copy});
docCount = w.maxDoc();
w.close();
mulFactor *= 2;
} while(docCount < 3000);
- IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED));
for(int i=0;i<NUM_EXTRA_DOCS/2;i++) {
@@ -107,7 +106,7 @@
};
public Query makeQuery(String queryText) throws ParseException {
- Query q = (new QueryParser(Version.LUCENE_CURRENT, field, new WhitespaceAnalyzer(Version.LUCENE_CURRENT))).parse(queryText);
+ Query q = (new QueryParser(TEST_VERSION_CURRENT, field, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))).parse(queryText);
return q;
}
Index: src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
===================================================================
--- src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java (working copy)
@@ -20,7 +20,6 @@
import junit.framework.TestCase;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -60,7 +59,7 @@
index = new RAMDirectory();
IndexWriter writer = new IndexWriter(index,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < data.length; i++) {
Index: src/test/org/apache/lucene/search/TestBooleanOr.java
===================================================================
--- src/test/org/apache/lucene/search/TestBooleanOr.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestBooleanOr.java (working copy)
@@ -135,7 +135,7 @@
RAMDirectory rd = new RAMDirectory();
//
- IndexWriter writer = new IndexWriter(rd, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(rd, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
//
Document d = new Document();
Index: src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java (working copy)
@@ -18,8 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import junit.framework.Test;
import junit.framework.TestSuite;
import junit.textui.TestRunner;
@@ -81,7 +79,7 @@
Query rw2 = null;
IndexReader reader = null;
try {
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
Index: src/test/org/apache/lucene/search/TestBooleanQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestBooleanQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestBooleanQuery.java (working copy)
@@ -24,7 +24,6 @@
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.index.Term;
public class TestBooleanQuery extends LuceneTestCase {
@@ -61,7 +60,7 @@
// LUCENE-1630
public void testNullOrSubScorer() throws Throwable {
Directory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", "a b c d", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
Index: src/test/org/apache/lucene/search/TestBooleanScorer.java
===================================================================
--- src/test/org/apache/lucene/search/TestBooleanScorer.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestBooleanScorer.java (working copy)
@@ -28,7 +28,6 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
public class TestBooleanScorer extends LuceneTestCase
{
@@ -45,7 +44,7 @@
String[] values = new String[] { "1", "2", "3", "4" };
try {
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < values.length; i++) {
Document doc = new Document();
doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
Index: src/test/org/apache/lucene/search/TestCachingWrapperFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestCachingWrapperFilter.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestCachingWrapperFilter.java (working copy)
@@ -32,7 +32,7 @@
public class TestCachingWrapperFilter extends LuceneTestCase {
public void testCachingWorks() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
@@ -71,7 +71,7 @@
public void testIsCacheAble() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Index: src/test/org/apache/lucene/search/TestCustomSearcherSort.java
===================================================================
--- src/test/org/apache/lucene/search/TestCustomSearcherSort.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestCustomSearcherSort.java (working copy)
@@ -70,7 +70,7 @@
private Directory getIndex()
throws IOException {
RAMDirectory indexStore = new RAMDirectory ();
- IndexWriter writer = new IndexWriter (indexStore, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter (indexStore, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
RandomGen random = new RandomGen(newRandom());
for (int i=0; i<INDEX_SIZE; ++i) { // don't decrease; if to low the problem doesn't show up
Document doc = new Document();
Index: src/test/org/apache/lucene/search/TestDateFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestDateFilter.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestDateFilter.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.DateTools;
import org.apache.lucene.document.Document;
@@ -51,7 +50,7 @@
{
// create an index
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
long now = System.currentTimeMillis();
@@ -112,7 +111,7 @@
{
// create an index
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
long now = System.currentTimeMillis();
Index: src/test/org/apache/lucene/search/TestDateSort.java
===================================================================
--- src/test/org/apache/lucene/search/TestDateSort.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestDateSort.java (working copy)
@@ -33,7 +33,6 @@
import org.apache.lucene.search.SortField;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
/**
* Test date sorting, i.e. auto-sorting of fields with type "long".
@@ -51,7 +50,7 @@
super.setUp();
// Create an index writer.
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
// oldest doc:
@@ -76,7 +75,7 @@
Sort sort = new Sort(new SortField(DATE_TIME_FIELD, SortField.STRING, true));
- QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, TEXT_FIELD, new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, TEXT_FIELD, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
Query query = queryParser.parse("Document");
// Execute the search and process the search results.
Index: src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java (working copy)
@@ -19,7 +19,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -80,7 +79,7 @@
index = new RAMDirectory();
IndexWriter writer = new IndexWriter(index,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
true, IndexWriter.MaxFieldLength.LIMITED);
writer.setSimilarity(sim);
Index: src/test/org/apache/lucene/search/TestDocBoost.java
===================================================================
--- src/test/org/apache/lucene/search/TestDocBoost.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestDocBoost.java (working copy)
@@ -20,7 +20,6 @@
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.index.IndexReader;
@@ -40,7 +39,7 @@
public void testDocBoost() throws Exception {
RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Fieldable f1 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED);
Fieldable f2 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED);
Index: src/test/org/apache/lucene/search/TestDocIdSet.java
===================================================================
--- src/test/org/apache/lucene/search/TestDocIdSet.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestDocIdSet.java (working copy)
@@ -35,7 +35,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
public class TestDocIdSet extends LuceneTestCase {
@@ -106,7 +105,7 @@
// Tests that if a Filter produces a null DocIdSet, which is given to
// IndexSearcher, everything works fine. This came up in LUCENE-1754.
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("c", "val", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
writer.addDocument(doc);
Index: src/test/org/apache/lucene/search/TestElevationComparator.java
===================================================================
--- src/test/org/apache/lucene/search/TestElevationComparator.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestElevationComparator.java (working copy)
@@ -23,8 +23,6 @@
import org.apache.lucene.index.*;
import org.apache.lucene.store.*;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@@ -36,7 +34,7 @@
//@Test
public void testSorting() throws Throwable {
Directory directory = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(1000);
writer.addDocument(adoc(new String[] {"id", "a", "title", "ipod", "str_s", "a"}));
Index: src/test/org/apache/lucene/search/TestExplanations.java
===================================================================
--- src/test/org/apache/lucene/search/TestExplanations.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestExplanations.java (working copy)
@@ -32,7 +32,6 @@
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* Tests primitive queries (ie: that rewrite to themselves) to
@@ -52,7 +51,7 @@
public static final String KEY = "KEY";
public static final String FIELD = "field";
public static final QueryParser qp =
- new QueryParser(Version.LUCENE_CURRENT, FIELD, new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ new QueryParser(TEST_VERSION_CURRENT, FIELD, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
@Override
public void tearDown() throws Exception {
@@ -64,7 +63,7 @@
public void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
Index: src/test/org/apache/lucene/search/TestFieldCache.java
===================================================================
--- src/test/org/apache/lucene/search/TestFieldCache.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestFieldCache.java (working copy)
@@ -23,8 +23,6 @@
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
@@ -41,7 +39,7 @@
protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
long theLong = Long.MAX_VALUE;
double theDouble = Double.MAX_VALUE;
byte theByte = Byte.MAX_VALUE;
Index: src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (working copy)
@@ -27,7 +27,6 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
/**
* A basic 'positive' Unit test class for the FieldCacheRangeFilter class.
@@ -532,7 +531,7 @@
// test using a sparse index (with deleted docs). The DocIdSet should be not cacheable, as it uses TermDocs if the range contains 0
public void testSparseIndex() throws IOException {
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), T, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), T, IndexWriter.MaxFieldLength.LIMITED);
for (int d = -20; d <= 20; d++) {
Document doc = new Document();
Index: src/test/org/apache/lucene/search/TestFilteredQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestFilteredQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestFilteredQuery.java (working copy)
@@ -27,8 +27,6 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.DocIdBitSet;
-import org.apache.lucene.util.Version;
-
import java.util.BitSet;
/**
@@ -50,7 +48,7 @@
public void setUp() throws Exception {
super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter (directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter (directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add (new Field("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
Index: src/test/org/apache/lucene/search/TestFilteredSearch.java
===================================================================
--- src/test/org/apache/lucene/search/TestFilteredSearch.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestFilteredSearch.java (working copy)
@@ -20,8 +20,6 @@
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -51,13 +49,13 @@
RAMDirectory directory = new RAMDirectory();
int[] filterBits = {1, 36};
SimpleDocIdSetFilter filter = new SimpleDocIdSetFilter(filterBits);
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
searchFiltered(writer, directory, filter, enforceSingleSegment);
// run the test on more than one segment
enforceSingleSegment = false;
// reset - it is stateful
filter.reset();
- writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
// we index 60 docs - this will create 6 segments
writer.setMaxBufferedDocs(10);
searchFiltered(writer, directory, filter, enforceSingleSegment);
Index: src/test/org/apache/lucene/search/TestFuzzyQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestFuzzyQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestFuzzyQuery.java (working copy)
@@ -33,7 +33,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.util.Version;
/**
* Tests {@link FuzzyQuery}.
@@ -43,7 +42,7 @@
public void testFuzziness() throws Exception {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDoc("aaaaa", writer);
addDoc("aaaab", writer);
addDoc("aaabb", writer);
@@ -200,7 +199,7 @@
public void testFuzzinessLong() throws Exception {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
addDoc("aaaaaaa", writer);
addDoc("segment", writer);
writer.optimize();
@@ -288,7 +287,7 @@
public void testTokenLengthOpt() throws IOException {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
true, IndexWriter.MaxFieldLength.LIMITED);
addDoc("12345678911", writer);
addDoc("segment", writer);
@@ -320,7 +319,7 @@
public void testGiga() throws Exception {
- StandardAnalyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
Directory index = new MockRAMDirectory();
IndexWriter w = new IndexWriter(index, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
@@ -345,7 +344,7 @@
IndexReader r = w.getReader();
w.close();
- Query q = new QueryParser(Version.LUCENE_CURRENT, "field", analyzer).parse( "giga~0.9" );
+ Query q = new QueryParser(TEST_VERSION_CURRENT, "field", analyzer).parse( "giga~0.9" );
// 3. search
IndexSearcher searcher = new IndexSearcher(r);
Index: src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (working copy)
@@ -29,14 +29,13 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* Tests MatchAllDocsQuery.
*
*/
public class TestMatchAllDocsQuery extends LuceneTestCase {
- private Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
+ private Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
public void testQuery() throws Exception {
@@ -100,7 +99,7 @@
assertEquals(2, hits.length);
// test parsable toString()
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "key", analyzer);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "key", analyzer);
hits = is.search(qp.parse(new MatchAllDocsQuery().toString()), null, 1000).scoreDocs;
assertEquals(2, hits.length);
Index: src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (working copy)
@@ -28,8 +28,6 @@
import org.apache.lucene.document.Field;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
import java.util.LinkedList;
import java.util.Collections;
@@ -47,7 +45,7 @@
public void testPhrasePrefix() throws IOException {
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
add("blueberry pie", writer);
add("blueberry strudel", writer);
add("blueberry pizza", writer);
@@ -141,7 +139,7 @@
// The contained PhraseMultiQuery must contain exactly one term array.
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
add("blueberry pie", writer);
add("blueberry chewing gum", writer);
add("blue raspberry pie", writer);
@@ -169,7 +167,7 @@
public void testPhrasePrefixWithBooleanQuery() throws IOException {
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, Collections.emptySet()), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(indexStore, new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.emptySet()), true, IndexWriter.MaxFieldLength.LIMITED);
add("This is a test", "object", writer);
add("a note", "note", writer);
writer.close();
Index: src/test/org/apache/lucene/search/TestMultiSearcher.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiSearcher.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestMultiSearcher.java (working copy)
@@ -30,8 +30,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
@@ -84,9 +82,9 @@
lDoc3.add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
// creating an index writer for the first index
- IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
// creating an index writer for the second index, but writing nothing
- IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
//--------------------------------------------------------------------
// scenario 1
@@ -103,7 +101,7 @@
writerB.close();
// creating the query
- QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "fulltext", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fulltext", new StandardAnalyzer(TEST_VERSION_CURRENT));
Query query = parser.parse("handle:1");
// building the searchables
@@ -130,7 +128,7 @@
//--------------------------------------------------------------------
// adding one document to the empty index
- writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writerB.addDocument(lDoc);
writerB.optimize();
writerB.close();
@@ -176,7 +174,7 @@
readerB.close();
// optimizing the index with the writer
- writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writerB.optimize();
writerB.close();
Index: src/test/org/apache/lucene/search/TestMultiSearcherRanking.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiSearcherRanking.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestMultiSearcherRanking.java (working copy)
@@ -26,8 +26,6 @@
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
/**
@@ -88,7 +86,7 @@
private void checkQuery(String queryStr) throws IOException, ParseException {
// check result hit ranking
if(verbose) System.out.println("Query: " + queryStr);
- QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new StandardAnalyzer(TEST_VERSION_CURRENT));
Query query = queryParser.parse(queryStr);
ScoreDoc[] multiSearcherHits = multiSearcher.search(query, null, 1000).scoreDocs;
ScoreDoc[] singleSearcherHits = singleSearcher.search(query, null, 1000).scoreDocs;
@@ -115,12 +113,12 @@
super.setUp();
// create MultiSearcher from two seperate searchers
Directory d1 = new RAMDirectory();
- IndexWriter iw1 = new IndexWriter(d1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true,
+ IndexWriter iw1 = new IndexWriter(d1, new StandardAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
addCollection1(iw1);
iw1.close();
Directory d2 = new RAMDirectory();
- IndexWriter iw2 = new IndexWriter(d2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true,
+ IndexWriter iw2 = new IndexWriter(d2, new StandardAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
addCollection2(iw2);
iw2.close();
@@ -132,7 +130,7 @@
// create IndexSearcher which contains all documents
Directory d = new RAMDirectory();
- IndexWriter iw = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
addCollection1(iw);
addCollection2(iw);
Index: src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiTermConstantScore.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestMultiTermConstantScore.java (working copy)
@@ -26,8 +26,6 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
@@ -66,7 +64,7 @@
"X 4 5 6" };
small = new RAMDirectory();
- IndexWriter writer = new IndexWriter(small, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(small, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < data.length; i++) {
@@ -617,7 +615,7 @@
/* build an index */
RAMDirectory farsiIndex = new RAMDirectory();
- IndexWriter writer = new IndexWriter(farsiIndex, new SimpleAnalyzer(Version.LUCENE_CURRENT), T,
+ IndexWriter writer = new IndexWriter(farsiIndex, new SimpleAnalyzer(TEST_VERSION_CURRENT), T,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
@@ -657,7 +655,7 @@
/* build an index */
RAMDirectory danishIndex = new RAMDirectory();
- IndexWriter writer = new IndexWriter(danishIndex, new SimpleAnalyzer(Version.LUCENE_CURRENT), T,
+ IndexWriter writer = new IndexWriter(danishIndex, new SimpleAnalyzer(TEST_VERSION_CURRENT), T,
IndexWriter.MaxFieldLength.LIMITED);
// Danish collation orders the words below in the given order
Index: src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.index.IndexReader;
@@ -42,7 +41,7 @@
public void setUp() throws Exception {
super.setUp();
IndexWriter writer
- = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
//writer.setUseCompoundFile(false);
//writer.infoStream = System.out;
for (int i = 0; i < numDocs; i++) {
Index: src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java (working copy)
@@ -30,7 +30,6 @@
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
@@ -44,7 +43,7 @@
final Random rnd = newRandom();
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED);
DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.US));
Index: src/test/org/apache/lucene/search/TestNot.java
===================================================================
--- src/test/org/apache/lucene/search/TestNot.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestNot.java (working copy)
@@ -25,7 +25,6 @@
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.util.Version;
/** Similarity unit test.
*
@@ -39,7 +38,7 @@
public void testNot() throws Exception {
RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document d1 = new Document();
d1.add(new Field("field", "a b", Field.Store.YES, Field.Index.ANALYZED));
@@ -49,7 +48,7 @@
writer.close();
Searcher searcher = new IndexSearcher(store, true);
- QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "field", new SimpleAnalyzer(Version.LUCENE_CURRENT));
+ QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "field", new SimpleAnalyzer(TEST_VERSION_CURRENT));
Query query = parser.parse("a NOT b");
//System.out.println(query);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
Index: src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
===================================================================
--- src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (working copy)
@@ -29,7 +29,6 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.Version;
public class TestNumericRangeQuery32 extends LuceneTestCase {
// distance of entries
@@ -47,7 +46,7 @@
BooleanQuery.setMaxClauseCount(3*255*2 + 255);
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
true, MaxFieldLength.UNLIMITED);
NumericField
Index: src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
===================================================================
--- src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (working copy)
@@ -28,7 +28,6 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.Version;
public class TestNumericRangeQuery64 extends LuceneTestCase {
// distance of entries
@@ -46,7 +45,7 @@
BooleanQuery.setMaxClauseCount(7*255*2 + 255);
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
true, MaxFieldLength.UNLIMITED);
NumericField
Index: src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -49,7 +48,7 @@
throws IOException
{
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc1 = new Document();
Document doc2 = new Document();
Document doc3 = new Document();
Index: src/test/org/apache/lucene/search/TestPhraseQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestPhraseQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestPhraseQuery.java (working copy)
@@ -51,7 +51,7 @@
Analyzer analyzer = new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- return new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader);
+ return new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
}
@Override
@@ -239,7 +239,7 @@
public void testPhraseQueryInConjunctionScorer() throws Exception {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
@@ -275,7 +275,7 @@
searcher.close();
- writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
doc = new Document();
doc.add(new Field("contents", "map entry woo", Field.Store.YES, Field.Index.ANALYZED));
@@ -325,7 +325,7 @@
public void testSlopScoring() throws IOException {
Directory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
@@ -362,8 +362,8 @@
}
public void testToString() throws Exception {
- StopAnalyzer analyzer = new StopAnalyzer(Version.LUCENE_CURRENT);
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", analyzer);
+ StopAnalyzer analyzer = new StopAnalyzer(TEST_VERSION_CURRENT);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", analyzer);
qp.setEnablePositionIncrements(true);
PhraseQuery q = (PhraseQuery)qp.parse("\"this hi this is a test is\"");
assertEquals("field:\"? hi ? ? ? test\"", q.toString());
Index: src/test/org/apache/lucene/search/TestPositionIncrement.java
===================================================================
--- src/test/org/apache/lucene/search/TestPositionIncrement.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestPositionIncrement.java (working copy)
@@ -191,7 +191,7 @@
assertEquals(0, hits.length);
// should not find "1 2" because there is a gap of 1 in the index
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field",
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field",
new StopWhitespaceAnalyzer(false));
q = (PhraseQuery) qp.parse("\"1 2\"");
hits = searcher.search(q, null, 1000).scoreDocs;
@@ -215,7 +215,7 @@
assertEquals(0, hits.length);
// when both qp qnd stopFilter propagate increments, we should find the doc.
- qp = new QueryParser(Version.LUCENE_CURRENT, "field",
+ qp = new QueryParser(TEST_VERSION_CURRENT, "field",
new StopWhitespaceAnalyzer(true));
qp.setEnablePositionIncrements(true);
q = (PhraseQuery) qp.parse("\"1 stop 2\"");
@@ -225,15 +225,15 @@
private static class StopWhitespaceAnalyzer extends Analyzer {
boolean enablePositionIncrements;
- final WhitespaceAnalyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ final WhitespaceAnalyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
public StopWhitespaceAnalyzer(boolean enablePositionIncrements) {
this.enablePositionIncrements = enablePositionIncrements;
}
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream ts = a.tokenStream(fieldName,reader);
- return new StopFilter(enablePositionIncrements?Version.LUCENE_CURRENT:Version.LUCENE_24, ts,
- new CharArraySet(Version.LUCENE_CURRENT, Collections.singleton("stop"), true));
+ return new StopFilter(enablePositionIncrements?TEST_VERSION_CURRENT:Version.LUCENE_24, ts,
+ new CharArraySet(TEST_VERSION_CURRENT, Collections.singleton("stop"), true));
}
}
@@ -318,7 +318,7 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new LowerCaseTokenizer(LuceneTestCase.TEST_VERSION_CURRENT, reader);
return new PayloadFilter(result, fieldName);
}
}
Index: src/test/org/apache/lucene/search/TestPrefixFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestPrefixFilter.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestPrefixFilter.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
@@ -38,7 +37,7 @@
"/Computers/Mac/One",
"/Computers/Mac/Two",
"/Computers/Windows"};
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
Index: src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java (working copy)
@@ -18,8 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -47,7 +45,7 @@
super.setUp();
IndexWriter writer = new IndexWriter(directory,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 5137; ++i) {
Index: src/test/org/apache/lucene/search/TestPrefixQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestPrefixQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestPrefixQuery.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
@@ -37,7 +36,7 @@
String[] categories = new String[] {"/Computers",
"/Computers/Mac",
"/Computers/Windows"};
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
Index: src/test/org/apache/lucene/search/TestQueryTermVector.java
===================================================================
--- src/test/org/apache/lucene/search/TestQueryTermVector.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestQueryTermVector.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
public class TestQueryTermVector extends LuceneTestCase {
@@ -43,7 +42,7 @@
result = new QueryTermVector(null);
assertTrue(result.getTerms().length == 0);
- result = new QueryTermVector("foo bar foo again foo bar go go go", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ result = new QueryTermVector("foo bar foo again foo bar go go go", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
assertTrue(result != null);
terms = result.getTerms();
assertTrue(terms.length == 4);
Index: src/test/org/apache/lucene/search/TestQueryWrapperFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestQueryWrapperFilter.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestQueryWrapperFilter.java (working copy)
@@ -33,7 +33,7 @@
public void testBasic() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("field", "value", Store.NO, Index.ANALYZED));
Index: src/test/org/apache/lucene/search/TestScorerPerf.java
===================================================================
--- src/test/org/apache/lucene/search/TestScorerPerf.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestScorerPerf.java (working copy)
@@ -2,8 +2,6 @@
import org.apache.lucene.util.DocIdBitSet;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import java.util.Random;
import java.util.BitSet;
import java.io.IOException;
@@ -46,7 +44,7 @@
// Create a dummy index with nothing in it.
// This could possibly fail if Lucene starts checking for docid ranges...
RAMDirectory rd = new RAMDirectory();
- IndexWriter iw = new IndexWriter(rd,new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter iw = new IndexWriter(rd,new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
iw.addDocument(new Document());
iw.close();
s = new IndexSearcher(rd, true);
@@ -61,7 +59,7 @@
terms[i] = new Term("f",Character.toString((char)('A'+i)));
}
- IndexWriter iw = new IndexWriter(dir,new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter iw = new IndexWriter(dir,new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i=0; i<nDocs; i++) {
Document d = new Document();
for (int j=0; j<nTerms; j++) {
Index: src/test/org/apache/lucene/search/TestSetNorm.java
===================================================================
--- src/test/org/apache/lucene/search/TestSetNorm.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestSetNorm.java (working copy)
@@ -20,7 +20,6 @@
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.index.IndexReader;
@@ -40,7 +39,7 @@
public void testSetNorm() throws Exception {
RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
// add the same document four times
Fieldable f1 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED);
Index: src/test/org/apache/lucene/search/TestSimilarity.java
===================================================================
--- src/test/org/apache/lucene/search/TestSimilarity.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestSimilarity.java (working copy)
@@ -18,8 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
import java.util.Collection;
@@ -65,7 +63,7 @@
public void testSimilarity() throws Exception {
RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
writer.setSimilarity(new SimpleSimilarity());
Index: src/test/org/apache/lucene/search/TestSimpleExplanations.java
===================================================================
--- src/test/org/apache/lucene/search/TestSimpleExplanations.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestSimpleExplanations.java (working copy)
@@ -28,7 +28,6 @@
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.util.Version;
/**
@@ -317,8 +316,8 @@
Document lDoc3 = new Document();
lDoc3.add(new Field("handle", "1 2", Field.Store.YES, Field.Index.ANALYZED));
- IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
- IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writerA.addDocument(lDoc);
writerA.addDocument(lDoc2);
@@ -328,7 +327,7 @@
writerB.addDocument(lDoc3);
writerB.close();
- QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "fulltext", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fulltext", new StandardAnalyzer(TEST_VERSION_CURRENT));
Query query = parser.parse("handle:1");
Searcher[] searchers = new Searcher[2];
Index: src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java (working copy)
@@ -18,8 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -118,7 +116,7 @@
query.setSlop(slop);
RAMDirectory ramDir = new RAMDirectory();
- WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(ramDir, analyzer, MaxFieldLength.UNLIMITED);
writer.addDocument(doc);
writer.close();
Index: src/test/org/apache/lucene/search/TestSort.java
===================================================================
--- src/test/org/apache/lucene/search/TestSort.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestSort.java (working copy)
@@ -41,7 +41,6 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.DocIdBitSet;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* Unit tests for sorting code.
@@ -104,7 +103,7 @@
private Searcher getIndex (boolean even, boolean odd)
throws IOException {
RAMDirectory indexStore = new RAMDirectory ();
- IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(1000);
for (int i=0; i<data.length; ++i) {
@@ -140,7 +139,7 @@
private IndexSearcher getFullStrings() throws CorruptIndexException, LockObtainFailedException, IOException {
RAMDirectory indexStore = new RAMDirectory ();
- IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(4);
writer.setMergeFactor(97);
for (int i=0; i<NUM_STRINGS; i++) {
Index: src/test/org/apache/lucene/search/TestSpanQueryFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestSpanQueryFilter.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestSpanQueryFilter.java (working copy)
@@ -29,7 +29,6 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
public class TestSpanQueryFilter extends LuceneTestCase {
@@ -40,7 +39,7 @@
public void testFilterWorks() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < 500; i++) {
Document document = new Document();
Index: src/test/org/apache/lucene/search/TestTermRangeFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestTermRangeFilter.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestTermRangeFilter.java (working copy)
@@ -28,7 +28,6 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
/**
* A basic 'positive' Unit test class for the TermRangeFilter class.
@@ -340,7 +339,7 @@
/* build an index */
RAMDirectory farsiIndex = new RAMDirectory();
- IndexWriter writer = new IndexWriter(farsiIndex, new SimpleAnalyzer(Version.LUCENE_CURRENT), T,
+ IndexWriter writer = new IndexWriter(farsiIndex, new SimpleAnalyzer(TEST_VERSION_CURRENT), T,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("content","\u0633\u0627\u0628",
@@ -380,7 +379,7 @@
/* build an index */
RAMDirectory danishIndex = new RAMDirectory();
IndexWriter writer = new IndexWriter
- (danishIndex, new SimpleAnalyzer(Version.LUCENE_CURRENT), T,
+ (danishIndex, new SimpleAnalyzer(TEST_VERSION_CURRENT), T,
IndexWriter.MaxFieldLength.LIMITED);
// Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ).
Index: src/test/org/apache/lucene/search/TestTermRangeQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestTermRangeQuery.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestTermRangeQuery.java (working copy)
@@ -28,8 +28,6 @@
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import java.io.IOException;
import java.io.Reader;
import java.util.Locale;
@@ -311,7 +309,7 @@
}
private void initializeIndex(String[] values) throws IOException {
- initializeIndex(values, new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ initializeIndex(values, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
}
private void initializeIndex(String[] values, Analyzer analyzer) throws IOException {
@@ -323,7 +321,7 @@
}
private void addDoc(String content) throws IOException {
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
insertDoc(writer, content);
writer.close();
}
Index: src/test/org/apache/lucene/search/TestTermScorer.java
===================================================================
--- src/test/org/apache/lucene/search/TestTermScorer.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestTermScorer.java (working copy)
@@ -22,7 +22,6 @@
import java.util.List;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -52,7 +51,7 @@
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < values.length; i++)
{
Document doc = new Document();
Index: src/test/org/apache/lucene/search/TestTermVectors.java
===================================================================
--- src/test/org/apache/lucene/search/TestTermVectors.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestTermVectors.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -42,7 +41,7 @@
@Override
public void setUp() throws Exception {
super.setUp();
- IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
//writer.setUseCompoundFile(true);
//writer.infoStream = System.out;
@@ -94,7 +93,7 @@
public void testTermVectorsFieldOrder() throws IOException {
Directory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("c", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("a", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
@@ -232,7 +231,7 @@
Directory dir = new MockRAMDirectory();
try {
- IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
assertTrue(writer != null);
writer.addDocument(testDoc1);
@@ -348,7 +347,7 @@
// Test only a few docs having vectors
public void testRareVectors() throws IOException {
- IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
for(int i=0;i<100;i++) {
Document doc = new Document();
@@ -380,7 +379,7 @@
// In a single doc, for the same field, mix the term
// vectors up
public void testMixedVectrosVectors() throws IOException {
- IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("field", "one",
Index: src/test/org/apache/lucene/search/TestThreadSafe.java
===================================================================
--- src/test/org/apache/lucene/search/TestThreadSafe.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestThreadSafe.java (working copy)
@@ -18,7 +18,6 @@
import junit.framework.TestCase;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexReader;
@@ -106,7 +105,7 @@
String[] words = "now is the time for all good men to come to the aid of their country".split(" ");
void buildDir(Directory dir, int nDocs, int maxFields, int maxFieldLen) throws IOException {
- IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
iw.setMaxBufferedDocs(10);
for (int j=0; j<nDocs; j++) {
Document d = new Document();
Index: src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
===================================================================
--- src/test/org/apache/lucene/search/TestTimeLimitingCollector.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestTimeLimitingCollector.java (working copy)
@@ -31,7 +31,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util.ThreadInterruptedException;
/**
@@ -76,7 +75,7 @@
"blueberry pizza",
};
Directory directory = new RAMDirectory();
- IndexWriter iw = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, MaxFieldLength.UNLIMITED);
+ IndexWriter iw = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED);
for (int i=0; i<N_DOCS; i++) {
add(docText[i%docText.length], iw);
@@ -89,7 +88,7 @@
for (int i = 1; i < docText.length; i++) {
qtxt += ' ' + docText[i]; // large query so that search will be longer
}
- QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
query = queryParser.parse(qtxt);
// warm the searcher
Index: src/test/org/apache/lucene/search/TestWildcard.java
===================================================================
--- src/test/org/apache/lucene/search/TestWildcard.java (revision 908485)
+++ src/test/org/apache/lucene/search/TestWildcard.java (working copy)
@@ -28,7 +28,6 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
import java.io.IOException;
@@ -212,7 +211,7 @@
private RAMDirectory getIndexStore(String field, String[] contents)
throws IOException {
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < contents.length; ++i) {
Document doc = new Document();
doc.add(new Field(field, contents[i], Field.Store.YES, Field.Index.ANALYZED));
@@ -240,7 +239,7 @@
public void testParsingAndSearching() throws Exception {
String field = "content";
boolean dbg = false;
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, field, new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, field, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
qp.setAllowLeadingWildcard(true);
String docs[] = {
"\\ abcdefg1",
@@ -270,7 +269,7 @@
// prepare the index
RAMDirectory dir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < docs.length; i++) {
Document doc = new Document();
doc.add(new Field(field,docs[i],Store.NO,Index.ANALYZED));
Index: src/test/org/apache/lucene/store/TestBufferedIndexInput.java
===================================================================
--- src/test/org/apache/lucene/store/TestBufferedIndexInput.java (revision 908485)
+++ src/test/org/apache/lucene/store/TestBufferedIndexInput.java (working copy)
@@ -37,7 +37,6 @@
import org.apache.lucene.store.NIOFSDirectory.NIOFSIndexInput;
import org.apache.lucene.store.SimpleFSDirectory.SimpleFSIndexInput;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.ArrayUtil;
@@ -243,7 +242,7 @@
File indexDir = new File(System.getProperty("tempDir"), "testSetBufferSize");
MockFSDirectory dir = new MockFSDirectory(indexDir, newRandom());
try {
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(false);
for(int i=0;i<37;i++) {
Document doc = new Document();
Index: src/test/org/apache/lucene/store/TestFileSwitchDirectory.java
===================================================================
--- src/test/org/apache/lucene/store/TestFileSwitchDirectory.java (revision 908485)
+++ src/test/org/apache/lucene/store/TestFileSwitchDirectory.java (working copy)
@@ -26,7 +26,6 @@
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.TestIndexWriterReader;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
public class TestFileSwitchDirectory extends LuceneTestCase {
/**
@@ -42,7 +41,7 @@
RAMDirectory secondaryDir = new MockRAMDirectory();
FileSwitchDirectory fsd = new FileSwitchDirectory(fileExtensions, primaryDir, secondaryDir, true);
- IndexWriter writer = new IndexWriter(fsd, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(fsd, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
IndexWriter.MaxFieldLength.LIMITED);
writer.setUseCompoundFile(false);
TestIndexWriterReader.createIndexNoClose(true, "ram", writer);
Index: src/test/org/apache/lucene/store/TestLockFactory.java
===================================================================
--- src/test/org/apache/lucene/store/TestLockFactory.java (revision 908485)
+++ src/test/org/apache/lucene/store/TestLockFactory.java (working copy)
@@ -33,7 +33,6 @@
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
public class TestLockFactory extends LuceneTestCase {
@@ -49,7 +48,7 @@
// Lock prefix should have been set:
assertTrue("lock prefix was not set by the RAMDirectory", lf.lockPrefixSet);
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
// add 100 documents (so that commit lock is used)
@@ -82,14 +81,14 @@
assertTrue("RAMDirectory.setLockFactory did not take",
NoLockFactory.class.isInstance(dir.getLockFactory()));
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
// Create a 2nd IndexWriter. This is normally not allowed but it should run through since we're not
// using any locks:
IndexWriter writer2 = null;
try {
- writer2 = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false,
+ writer2 = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false,
IndexWriter.MaxFieldLength.LIMITED);
} catch (Exception e) {
e.printStackTrace(System.out);
@@ -110,13 +109,13 @@
assertTrue("RAMDirectory did not use correct LockFactory: got " + dir.getLockFactory(),
SingleInstanceLockFactory.class.isInstance(dir.getLockFactory()));
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
// Create a 2nd IndexWriter. This should fail:
IndexWriter writer2 = null;
try {
- writer2 = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false,
+ writer2 = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false,
IndexWriter.MaxFieldLength.LIMITED);
fail("Should have hit an IOException with two IndexWriters on default SingleInstanceLockFactory");
} catch (IOException e) {
@@ -153,7 +152,7 @@
FSDirectory fs1 = FSDirectory.open(indexDir, lockFactory);
// First create a 1 doc index:
- IndexWriter w = new IndexWriter(fs1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter w = new IndexWriter(fs1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
addDoc(w);
w.close();
@@ -263,7 +262,7 @@
}
@Override
public void run() {
- WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = null;
for(int i=0;i<this.numIteration;i++) {
try {
Index: src/test/org/apache/lucene/store/TestRAMDirectory.java
===================================================================
--- src/test/org/apache/lucene/store/TestRAMDirectory.java (revision 908485)
+++ src/test/org/apache/lucene/store/TestRAMDirectory.java (working copy)
@@ -25,8 +25,6 @@
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -56,7 +54,7 @@
indexDir = new File(tempDir, "RAMDirIndex");
Directory dir = FSDirectory.open(indexDir);
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
// add some documents
Document doc = null;
for (int i = 0; i < docsToAdd; i++) {
@@ -107,7 +105,7 @@
final MockRAMDirectory ramDir = new MockRAMDirectory(dir);
dir.close();
- final IndexWriter writer = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ final IndexWriter writer = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
writer.optimize();
assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes());
Index: src/test/org/apache/lucene/store/TestWindowsMMap.java
===================================================================
--- src/test/org/apache/lucene/store/TestWindowsMMap.java (revision 908485)
+++ src/test/org/apache/lucene/store/TestWindowsMMap.java (working copy)
@@ -69,7 +69,7 @@
// plan to add a set of useful stopwords, consider changing some of the
// interior filters.
- StandardAnalyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, Collections.emptySet());
+ StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.emptySet());
// TODO: something about lock timeouts and leftover locks.
IndexWriter writer = new IndexWriter(storeDirectory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
IndexSearcher searcher = new IndexSearcher(storeDirectory, true);
Index: src/test/org/apache/lucene/TestDemo.java
===================================================================
--- src/test/org/apache/lucene/TestDemo.java (revision 908485)
+++ src/test/org/apache/lucene/TestDemo.java (working copy)
@@ -32,7 +32,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* A very simple demo used in the API documentation (src/java/overview.html).
@@ -44,7 +43,7 @@
public void testDemo() throws IOException, ParseException {
- Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
// Store the index in memory:
Directory directory = new RAMDirectory();
@@ -62,7 +61,7 @@
// Now search the index:
IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true
// Parse a simple query that searches for "text":
- QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "fieldname", analyzer);
+ QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fieldname", analyzer);
Query query = parser.parse("text");
ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs;
assertEquals(1, hits.length);
Index: src/test/org/apache/lucene/TestMergeSchedulerExternal.java
===================================================================
--- src/test/org/apache/lucene/TestMergeSchedulerExternal.java (revision 908485)
+++ src/test/org/apache/lucene/TestMergeSchedulerExternal.java (working copy)
@@ -18,7 +18,6 @@
*/
import java.io.IOException;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.Directory;
@@ -96,7 +95,7 @@
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
MyMergeScheduler ms = new MyMergeScheduler();
writer.setMergeScheduler(ms);
writer.setMaxBufferedDocs(2);
Index: src/test/org/apache/lucene/TestSearch.java
===================================================================
--- src/test/org/apache/lucene/TestSearch.java (revision 908485)
+++ src/test/org/apache/lucene/TestSearch.java (working copy)
@@ -22,7 +22,6 @@
import java.io.StringWriter;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import junit.framework.TestSuite;
import junit.textui.TestRunner;
@@ -74,7 +73,7 @@
throws Exception
{
Directory directory = new RAMDirectory();
- Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(directory, analyzer, true,
IndexWriter.MaxFieldLength.LIMITED);
@@ -108,7 +107,7 @@
};
ScoreDoc[] hits = null;
- QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "contents", analyzer);
+ QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "contents", analyzer);
parser.setPhraseSlop(4);
for (int j = 0; j < queries.length; j++) {
Query query = parser.parse(queries[j]);
Index: src/test/org/apache/lucene/TestSearchForDuplicates.java
===================================================================
--- src/test/org/apache/lucene/TestSearchForDuplicates.java (revision 908485)
+++ src/test/org/apache/lucene/TestSearchForDuplicates.java (working copy)
@@ -27,8 +27,6 @@
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.queryParser.*;
-import org.apache.lucene.util.Version;
-
import org.apache.lucene.util.LuceneTestCase;
import junit.framework.TestSuite;
import junit.textui.TestRunner;
@@ -79,7 +77,7 @@
private void doTest(PrintWriter out, boolean useCompoundFiles) throws Exception {
Directory directory = new RAMDirectory();
- Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(directory, analyzer, true,
IndexWriter.MaxFieldLength.LIMITED);
@@ -98,7 +96,7 @@
// try a search without OR
Searcher searcher = new IndexSearcher(directory, true);
- QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, PRIORITY_FIELD, analyzer);
+ QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, PRIORITY_FIELD, analyzer);
Query query = parser.parse(HIGH_PRIORITY);
out.println("Query: " + query.toString(PRIORITY_FIELD));
@@ -113,7 +111,7 @@
searcher = new IndexSearcher(directory, true);
hits = null;
- parser = new QueryParser(Version.LUCENE_CURRENT, PRIORITY_FIELD, analyzer);
+ parser = new QueryParser(TEST_VERSION_CURRENT, PRIORITY_FIELD, analyzer);
query = parser.parse(HIGH_PRIORITY + " OR " + MED_PRIORITY);
out.println("Query: " + query.toString(PRIORITY_FIELD));
Index: src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java
===================================================================
--- src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (revision 908485)
+++ src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (working copy)
@@ -67,7 +67,7 @@
Directory dir = new MockRAMDirectory();
SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
- IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
// Force frequent flushes
writer.setMaxBufferedDocs(2);
Document doc = new Document();
@@ -83,7 +83,7 @@
writer.close();
copyFiles(dir, cp);
- writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
copyFiles(dir, cp);
for(int i=0;i<7;i++) {
writer.addDocument(doc);
@@ -95,7 +95,7 @@
writer.close();
copyFiles(dir, cp);
dp.release();
- writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
writer.close();
try {
copyFiles(dir, cp);
@@ -111,7 +111,7 @@
final long stopTime = System.currentTimeMillis() + 1000;
SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
- final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
+ final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
// Force frequent flushes
writer.setMaxBufferedDocs(2);
Index: src/test/org/apache/lucene/util/LuceneTestCase.java
===================================================================
--- src/test/org/apache/lucene/util/LuceneTestCase.java (revision 908485)
+++ src/test/org/apache/lucene/util/LuceneTestCase.java (working copy)
@@ -53,6 +53,8 @@
@Deprecated
public abstract class LuceneTestCase extends TestCase {
+ public static final Version TEST_VERSION_CURRENT = LuceneTestCaseJ4.TEST_VERSION_CURRENT;
+
public LuceneTestCase() {
super();
}
Index: src/test/org/apache/lucene/util/LuceneTestCaseJ4.java
===================================================================
--- src/test/org/apache/lucene/util/LuceneTestCaseJ4.java (revision 908485)
+++ src/test/org/apache/lucene/util/LuceneTestCaseJ4.java (working copy)
@@ -73,6 +73,9 @@
//@RunWith(RunBareWrapper.class)
public class LuceneTestCaseJ4 extends TestWatchman {
+ /** Change this when development starts for new Lucene version: */
+ public static final Version TEST_VERSION_CURRENT = Version.LUCENE_31;
+
// This is how we get control when errors occur.
// Think of this as start/end/success/failed
// events.
@@ -214,7 +217,7 @@
*/
public Random newRandom() {
if (seed != null) {
- throw new IllegalStateException("please call LuceneTestCase.newRandom only once per test");
+ throw new IllegalStateException("please call LuceneTestCaseJ4.newRandom only once per test");
}
return newRandom(seedRnd.nextLong());
}
@@ -226,7 +229,7 @@
*/
public Random newRandom(long seed) {
if (this.seed != null) {
- throw new IllegalStateException("please call LuceneTestCase.newRandom only once per test");
+ throw new IllegalStateException("please call LuceneTestCaseJ4.newRandom only once per test");
}
this.seed = Long.valueOf(seed);
return new Random(seed);
Index: src/test/org/apache/lucene/util/TestCharacterUtils.java
===================================================================
--- src/test/org/apache/lucene/util/TestCharacterUtils.java (revision 908485)
+++ src/test/org/apache/lucene/util/TestCharacterUtils.java (working copy)
@@ -18,6 +18,7 @@
*/
import static org.junit.Assert.*;
+import static org.apache.lucene.util.LuceneTestCaseJ4.TEST_VERSION_CURRENT;
import java.io.IOException;
import java.io.Reader;
@@ -45,7 +46,7 @@
} catch (ArrayIndexOutOfBoundsException e) {
}
- CharacterUtils java5 = CharacterUtils.getInstance(Version.LUCENE_31);
+ CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
assertEquals((int) 'A', java5.codePointAt(cpAt3, 0));
assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
cpAt3, 3));
@@ -71,7 +72,7 @@
} catch (StringIndexOutOfBoundsException e) {
}
- CharacterUtils java5 = CharacterUtils.getInstance(Version.LUCENE_31);
+ CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
assertEquals((int) 'A', java5.codePointAt(cpAt3, 0));
assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
cpAt3, 3));
@@ -93,7 +94,7 @@
assertEquals((int) '\ud801', java4.codePointAt(cpAt3, 3, 5));
assertEquals((int) '\ud801', java4.codePointAt(highSurrogateAt3, 3, 4));
- CharacterUtils java5 = CharacterUtils.getInstance(Version.LUCENE_31);
+ CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
assertEquals((int) 'A', java5.codePointAt(cpAt3, 0, 2));
assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
cpAt3, 3, 5));
@@ -122,7 +123,7 @@
@Test
public void testFillNoHighSurrogate() throws IOException {
- Version[] versions = new Version[] { Version.LUCENE_30, Version.LUCENE_31 };
+ Version[] versions = new Version[] { Version.LUCENE_30, TEST_VERSION_CURRENT };
for (Version version : versions) {
CharacterUtils instance = CharacterUtils.getInstance(version);
Reader reader = new StringReader("helloworld");
@@ -144,7 +145,7 @@
@Test
public void testFillJava15() throws IOException {
String input = "1234\ud801\udc1c789123\ud801\ud801\udc1c\ud801";
- CharacterUtils instance = CharacterUtils.getInstance(Version.LUCENE_31);
+ CharacterUtils instance = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
Reader reader = new StringReader(input);
CharacterBuffer buffer = CharacterUtils.newCharacterBuffer(5);
assertTrue(instance.fill(buffer, reader));
Index: src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
===================================================================
--- src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (revision 908485)
+++ src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (working copy)
@@ -45,9 +45,9 @@
RAMDirectory dirA = new RAMDirectory();
RAMDirectory dirB = new RAMDirectory();
- IndexWriter wA = new IndexWriter(dirA, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter wA = new IndexWriter(dirA, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
- IndexWriter wB = new IndexWriter(dirB, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter wB = new IndexWriter(dirB, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
long theLong = Long.MAX_VALUE;