Java Code Examples for org.apache.lucene.util.TestUtil#randomSimpleString()
The following examples show how to use
org.apache.lucene.util.TestUtil#randomSimpleString() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FileDictionaryTest.java From lucene-solr with Apache License 2.0 | 6 votes |
private Map.Entry<List<String>, String> generateFileEntry(String fieldDelimiter, boolean hasWeight, boolean hasPayload) { List<String> entryValues = new ArrayList<>(); StringBuilder sb = new StringBuilder(); String term = TestUtil.randomSimpleString(random(), 1, 300); sb.append(term); entryValues.add(term); if (hasWeight) { sb.append(fieldDelimiter); long weight = TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE); sb.append(weight); entryValues.add(String.valueOf(weight)); } if (hasPayload) { sb.append(fieldDelimiter); String payload = TestUtil.randomSimpleString(random(), 1, 300); sb.append(payload); entryValues.add(payload); } sb.append("\n"); return new SimpleEntry<>(entryValues, sb.toString()); }
Example 2
Source File: TestExportWriter.java From lucene-solr with Apache License 2.0 | 6 votes |
private void createLargeIndex() throws Exception { int BATCH_SIZE = 1000; int NUM_BATCHES = 100; SolrInputDocument[] docs = new SolrInputDocument[BATCH_SIZE]; for (int i = 0; i < NUM_BATCHES; i++) { for (int j = 0; j < BATCH_SIZE; j++) { docs[j] = new SolrInputDocument( "id", String.valueOf(i * BATCH_SIZE + j), "batch_i_p", String.valueOf(i), "random_i_p", String.valueOf(random().nextInt(BATCH_SIZE)), "sortabledv", TestUtil.randomSimpleString(random(), 2, 3), "sortabledv_udvas", String.valueOf(random().nextInt(100)), "small_i_p", String.valueOf((i + j) % 7) ); } updateJ(jsonAdd(docs), null); } assertU(commit()); }
Example 3
Source File: AnalyzingSuggesterTest.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testTooLongSuggestion() throws Exception { Analyzer a = new MockAnalyzer(random()); Directory tempDir = getDirectory(); AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a); String bigString = TestUtil.randomSimpleString(random(), 30000, 30000); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> { suggester.build(new InputArrayIterator(new Input[] { new Input(bigString, 7)})); }); assertTrue(ex.getMessage().contains("input automaton is too large")); IOUtils.close(a, tempDir); }
Example 4
Source File: BufferStoreTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Before public void setup() { metrics = new Metrics(); SolrMetricManager metricManager = new SolrMetricManager(); String registry = TestUtil.randomSimpleString(random(), 2, 10); String scope = TestUtil.randomSimpleString(random(), 2, 10); SolrMetricsContext solrMetricsContext = new SolrMetricsContext(metricManager, registry, "foo"); metrics.initializeMetrics(solrMetricsContext, scope); metricsMap = (MetricsMap) ((SolrMetricManager.GaugeWrapper)metricManager.registry(registry).getMetrics().get("CACHE." + scope + ".hdfsBlockCache")).getGauge(); BufferStore.initNewBuffer(blockSize, blockSize, metrics); store = BufferStore.instance(blockSize); }
Example 5
Source File: SolrMetricManagerTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test public void testRegistryName() throws Exception { Random r = random(); String name = TestUtil.randomSimpleString(r, 1, 10); String result = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, name, "collection1"); assertEquals("solr.core." + name + ".collection1", result); // try it with already prefixed name - group will be ignored result = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, result); assertEquals("solr.core." + name + ".collection1", result); // try it with already prefixed name but with additional segments result = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, result, "shard1", "replica1"); assertEquals("solr.core." + name + ".collection1.shard1.replica1", result); }
Example 6
Source File: TestSolrFieldCacheBean.java From lucene-solr with Apache License 2.0 | 5 votes |
private void assertEntryListNotIncluded(boolean checkJmx) { SolrFieldCacheBean mbean = new SolrFieldCacheBean(); Random r = random(); String registryName = TestUtil.randomSimpleString(r, 1, 10); SolrMetricManager metricManager = h.getCoreContainer().getMetricManager(); SolrMetricsContext solrMetricsContext = new SolrMetricsContext(metricManager, registryName, "foo"); mbean.initializeMetrics(solrMetricsContext, null); MetricsMap metricsMap = (MetricsMap)((SolrMetricManager.GaugeWrapper)metricManager.registry(registryName).getMetrics().get("CACHE.fieldCache")).getGauge(); Map<String, Object> metrics = checkJmx ? metricsMap.getValue(true) : metricsMap.getValue(); assertTrue(((Number)metrics.get("entries_count")).longValue() > 0); assertNull(metrics.get("total_size")); assertNull(metrics.get("entry#0")); }
Example 7
Source File: BaseMergePolicyTestCase.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testFindForcedDeletesMerges() throws IOException { MergePolicy mp = mergePolicy(); if (mp instanceof FilterMergePolicy) { assumeFalse("test doesn't work with MockRandomMP", ((FilterMergePolicy) mp).in instanceof MockRandomMergePolicy); } SegmentInfos infos = new SegmentInfos(Version.LATEST.major); try (Directory directory = newDirectory()) { MergePolicy.MergeContext context = new MockMergeContext(s -> 0); int numSegs = random().nextInt(10); for (int i = 0; i < numSegs; i++) { SegmentInfo info = new SegmentInfo( directory, // dir Version.LATEST, // version Version.LATEST, // min version TestUtil.randomSimpleString(random()), // name random().nextInt(Integer.MAX_VALUE), // maxDoc random().nextBoolean(), // isCompoundFile null, // codec Collections.emptyMap(), // diagnostics TestUtil.randomSimpleString(// id random(), StringHelper.ID_LENGTH, StringHelper.ID_LENGTH).getBytes(StandardCharsets.US_ASCII), Collections.emptyMap(), // attributes null /* indexSort */); info.setFiles(Collections.emptyList()); infos.add(new SegmentCommitInfo(info, random().nextInt(1), 0, -1, -1, -1, StringHelper.randomId())); } MergePolicy.MergeSpecification forcedDeletesMerges = mp.findForcedDeletesMerges(infos, context); if (forcedDeletesMerges != null) { assertEquals(0, forcedDeletesMerges.merges.size()); } } }
Example 8
Source File: TestCollationDocValuesField.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testRanges() throws Exception { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); Field field = newField("field", "", StringField.TYPE_STORED); Collator collator = Collator.getInstance(Locale.getDefault()); // uses -Dtests.locale if (random().nextBoolean()) { collator.setStrength(Collator.PRIMARY); } CollationDocValuesField collationField = new CollationDocValuesField("collated", collator); doc.add(field); doc.add(collationField); int numDocs = atLeast(100); for (int i = 0; i < numDocs; i++) { String value = TestUtil.randomSimpleString(random()); field.setStringValue(value); collationField.setStringValue(value); iw.addDocument(doc); } IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = newSearcher(ir); int numChecks = atLeast(20); try { for (int i = 0; i < numChecks; i++) { String start = TestUtil.randomSimpleString(random()); String end = TestUtil.randomSimpleString(random()); BytesRef lowerVal = new BytesRef(collator.getCollationKey(start).toByteArray()); BytesRef upperVal = new BytesRef(collator.getCollationKey(end).toByteArray()); doTestRanges(is, start, end, lowerVal, upperVal, collator); } } finally { ir.close(); dir.close(); } }
Example 9
Source File: SolrJmxReporterTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Before public void beforeTest() throws Exception { initCore("solrconfig-basic.xml", "schema.xml"); final SolrCore core = h.getCore(); domain = core.getName(); rootName = PREFIX + TestUtil.randomSimpleString(random(), 5, 10); coreMetricManager = core.getCoreMetricManager(); metricManager = core.getCoreContainer().getMetricManager(); PluginInfo pluginInfo = createReporterPluginInfo(rootName, true); metricManager.loadReporter(coreMetricManager.getRegistryName(), coreMetricManager.getCore(), pluginInfo, coreMetricManager.getTag()); Map<String, SolrMetricReporter> reporters = metricManager.getReporters(coreMetricManager.getRegistryName()); assertTrue("reporters.size should be > 0, but was + " + reporters.size(), reporters.size() > 0); String reporterName = pluginInfo.name; String taggedName = reporterName + "@" + coreMetricManager.getTag(); assertNotNull("reporter " + taggedName + " not present among " + reporters, reporters.get(taggedName)); assertTrue("wrong reporter class: " + reporters.get(taggedName), reporters.get(taggedName) instanceof SolrJmxReporter); SolrJmxReporter reporter = (SolrJmxReporter) reporters.get(taggedName); assertNotNull("MBean server not found on reporter", reporter.getMBeanServer()); assertEquals("Wrong MBeanServer found on reporter", TEST_MBEAN_SERVER, reporter.getMBeanServer()); }
Example 10
Source File: TestICUCollationDocValuesField.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testRanges() throws Exception { Directory dir = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), dir); Document doc = new Document(); Field field = newField("field", "", StringField.TYPE_STORED); Collator collator = Collator.getInstance(); // uses -Dtests.locale if (random().nextBoolean()) { collator.setStrength(Collator.PRIMARY); } ICUCollationDocValuesField collationField = new ICUCollationDocValuesField("collated", collator); doc.add(field); doc.add(collationField); int numDocs = atLeast(500); for (int i = 0; i < numDocs; i++) { String value = TestUtil.randomSimpleString(random()); field.setStringValue(value); collationField.setStringValue(value); iw.addDocument(doc); } IndexReader ir = iw.getReader(); iw.close(); IndexSearcher is = newSearcher(ir); int numChecks = atLeast(100); for (int i = 0; i < numChecks; i++) { String start = TestUtil.randomSimpleString(random()); String end = TestUtil.randomSimpleString(random()); BytesRef lowerVal = new BytesRef(collator.getCollationKey(start).toByteArray()); BytesRef upperVal = new BytesRef(collator.getCollationKey(end).toByteArray()); doTestRanges(is, start, end, lowerVal, upperVal, collator); } ir.close(); dir.close(); }
Example 11
Source File: SolrJmxReporterTest.java From lucene-solr with Apache License 2.0 | 5 votes |
private PluginInfo createReporterPluginInfo(String rootName, boolean enabled) { Random random = random(); String className = SolrJmxReporter.class.getName(); String reporterName = PREFIX + TestUtil.randomSimpleString(random, 5, 10); Map<String, Object> attrs = new HashMap<>(); attrs.put(FieldType.CLASS_NAME, className); attrs.put(CoreAdminParams.NAME, reporterName); attrs.put("rootName", rootName); attrs.put("enabled", enabled); try { String agentId = (String) TEST_MBEAN_SERVER.getAttribute (new ObjectName("JMImplementation:type=MBeanServerDelegate"), "MBeanServerId"); attrs.put("agentId", agentId); } catch (Exception e) { throw new RuntimeException("Unable to determine agentId of MBeanServer: " + e.getMessage(), e); } boolean shouldOverrideDomain = random.nextBoolean(); if (shouldOverrideDomain) { domain = PREFIX + TestUtil.randomSimpleString(random); attrs.put("domain", domain); } return new PluginInfo(TestUtil.randomUnicodeString(random), attrs); }
Example 12
Source File: AnalyzingSuggesterTest.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Adds 50 random keys, that all analyze to the same thing (dog), with the same cost, * and checks that they come back in surface-form order. */ public void testTieBreakOnSurfaceForm() throws Exception { Analyzer a = new MultiCannedAnalyzer(new CannedTokenStream(token("dog", 1, 1))); Directory tempDir = getDirectory(); AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, -1, true); // make 50 inputs all with the same cost of 1, random strings Input[] inputs = new Input[100]; for (int i = 0; i < inputs.length; i++) { inputs[i] = new Input(TestUtil.randomSimpleString(random()), 1); } suggester.build(new InputArrayIterator(inputs)); // Try to save/load: Path tmpDir = createTempDir("AnalyzingSuggesterTest"); Path path = tmpDir.resolve("suggester"); OutputStream os = Files.newOutputStream(path); suggester.store(os); os.close(); InputStream is = Files.newInputStream(path); suggester.load(is); is.close(); // now suggest everything, and check that stuff comes back in order List<LookupResult> results = suggester.lookup("", false, 50); assertEquals(50, results.size()); for (int i = 1; i < 50; i++) { String previous = results.get(i-1).toString(); String current = results.get(i).toString(); assertTrue("surface forms out of order: previous=" + previous + ",current=" + current, current.compareTo(previous) >= 0); } IOUtils.close(a, tempDir); }
Example 13
Source File: TestExceedMaxTermLength.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testExceededMaxTermLengthWithLimitingFilter(){ // problematic field final String longFieldName = "cat_length"; final String longFieldValue = TestUtil.randomSimpleString(random(), minTestTermLength, maxTestTermLength); final String okayFieldName = TestUtil.randomSimpleString(random(), 1, 50) + "_sS" ; //Dynamic field final String okayFieldValue = TestUtil.randomSimpleString(random(), minTestTermLength, maxTestTermLength); boolean includeOkayFields = random().nextBoolean(); if(random().nextBoolean()) { //Use XML String doc; if(includeOkayFields) { doc = adoc("id", "1", longFieldName, longFieldValue, okayFieldName, okayFieldValue); } else { doc = adoc("id", "1", longFieldName, longFieldValue); } assertU(doc); } else { //Use JSON String jsonStr = null; try { if(includeOkayFields) { jsonStr = "[{'id':'1','%s':'%s', '%s': '%s'}]"; jsonStr = String.format(Locale.ROOT, jsonStr, longFieldName, longFieldValue, okayFieldName, okayFieldValue); updateJ(json(jsonStr), null); } else { jsonStr = "[{'id':'1','%s':'%s'}]"; jsonStr = String.format(Locale.ROOT, jsonStr, longFieldName, longFieldValue); updateJ(json(jsonStr), null); } } catch (Exception e) { fail("Should not have failed adding doc " + jsonStr); String msg= e.getCause().getMessage(); assertTrue(msg.contains("one immense term in field=\"cat\"")); } } assertU(commit()); assertQ(req("q", "*:*"), "//*[@numFound='1']"); }
Example 14
Source File: TestIDVersionPostingsFormat.java From lucene-solr with Apache License 2.0 | 4 votes |
private IDSource getRandomIDs() { IDSource ids; switch (random().nextInt(6)) { case 0: // random simple if (VERBOSE) { System.out.println("TEST: use random simple ids"); } ids = new IDSource() { @Override public String next() { return TestUtil.randomSimpleString(random()); } }; break; case 1: // random realistic unicode if (VERBOSE) { System.out.println("TEST: use random realistic unicode ids"); } ids = new IDSource() { @Override public String next() { return TestUtil.randomRealisticUnicodeString(random()); } }; break; case 2: // sequential if (VERBOSE) { System.out.println("TEST: use seuquential ids"); } ids = new IDSource() { int upto; @Override public String next() { return Integer.toString(upto++); } }; break; case 3: // zero-pad sequential if (VERBOSE) { System.out.println("TEST: use zero-pad seuquential ids"); } ids = new IDSource() { final int radix = TestUtil.nextInt(random(), Character.MIN_RADIX, Character.MAX_RADIX); final String zeroPad = String.format(Locale.ROOT, "%0" + TestUtil.nextInt(random(), 5, 20) + "d", 0); int upto; @Override public String next() { String s = Integer.toString(upto++); return zeroPad.substring(zeroPad.length() - s.length()) + s; } }; break; case 4: // random long if (VERBOSE) { System.out.println("TEST: use random long ids"); } ids = new IDSource() { final int radix = TestUtil.nextInt(random(), Character.MIN_RADIX, Character.MAX_RADIX); int upto; @Override public String next() { return Long.toString(random().nextLong() & 0x3ffffffffffffffL, radix); } }; break; case 5: // zero-pad random long if (VERBOSE) { System.out.println("TEST: use zero-pad random long ids"); } ids = new IDSource() { final int radix = TestUtil.nextInt(random(), Character.MIN_RADIX, Character.MAX_RADIX); final String zeroPad = String.format(Locale.ROOT, "%015d", 0); int upto; @Override public String next() { return Long.toString(random().nextLong() & 0x3ffffffffffffffL, radix); } }; break; default: throw new AssertionError(); } return ids; }
Example 15
Source File: CollationTestBase.java From lucene-solr with Apache License 2.0 | 4 votes |
public void assertThreadSafe(final Analyzer analyzer) throws Exception { int numTestPoints = 100; int numThreads = TestUtil.nextInt(random(), 3, 5); final HashMap<String,BytesRef> map = new HashMap<>(); // create a map<String,SortKey> up front. // then with multiple threads, generate sort keys for all the keys in the map // and ensure they are the same as the ones we produced in serial fashion. for (int i = 0; i < numTestPoints; i++) { String term = TestUtil.randomSimpleString(random()); try (TokenStream ts = analyzer.tokenStream("fake", term)) { TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); ts.reset(); assertTrue(ts.incrementToken()); // ensure we make a copy of the actual bytes too map.put(term, BytesRef.deepCopyOf(termAtt.getBytesRef())); assertFalse(ts.incrementToken()); ts.end(); } } Thread threads[] = new Thread[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new Thread() { @Override public void run() { try { for (Map.Entry<String,BytesRef> mapping : map.entrySet()) { String term = mapping.getKey(); BytesRef expected = mapping.getValue(); try (TokenStream ts = analyzer.tokenStream("fake", term)) { TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); ts.reset(); assertTrue(ts.incrementToken()); assertEquals(expected, termAtt.getBytesRef()); assertFalse(ts.incrementToken()); ts.end(); } } } catch (IOException e) { throw new RuntimeException(e); } } }; } for (int i = 0; i < numThreads; i++) { threads[i].start(); } for (int i = 0; i < numThreads; i++) { threads[i].join(); } }
Example 16
Source File: TestLucene80DocValuesFormat.java From lucene-solr with Apache License 2.0 | 4 votes |
@Nightly public void testSortedSetAroundBlockSize() throws IOException { final int frontier = 1 << Lucene80DocValuesFormat.DIRECT_MONOTONIC_BLOCK_SHIFT; for (int maxDoc = frontier - 1; maxDoc <= frontier + 1; ++maxDoc) { final Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(newLogMergePolicy())); ByteBuffersDataOutput out = new ByteBuffersDataOutput(); Document doc = new Document(); SortedSetDocValuesField field1 = new SortedSetDocValuesField("sset", new BytesRef()); doc.add(field1); SortedSetDocValuesField field2 = new SortedSetDocValuesField("sset", new BytesRef()); doc.add(field2); for (int i = 0; i < maxDoc; ++i) { BytesRef s1 = new BytesRef(TestUtil.randomSimpleString(random(), 2)); BytesRef s2 = new BytesRef(TestUtil.randomSimpleString(random(), 2)); field1.setBytesValue(s1); field2.setBytesValue(s2); w.addDocument(doc); Set<BytesRef> set = new TreeSet<>(Arrays.asList(s1, s2)); out.writeVInt(set.size()); for (BytesRef ref : set) { out.writeVInt(ref.length); out.writeBytes(ref.bytes, ref.offset, ref.length); } } w.forceMerge(1); DirectoryReader r = DirectoryReader.open(w); w.close(); LeafReader sr = getOnlyLeafReader(r); assertEquals(maxDoc, sr.maxDoc()); SortedSetDocValues values = sr.getSortedSetDocValues("sset"); assertNotNull(values); ByteBuffersDataInput in = out.toDataInput(); BytesRefBuilder b = new BytesRefBuilder(); for (int i = 0; i < maxDoc; ++i) { assertEquals(i, values.nextDoc()); final int numValues = in.readVInt(); for (int j = 0; j < numValues; ++j) { b.setLength(in.readVInt()); b.grow(b.length()); in.readBytes(b.bytes(), 0, b.length()); assertEquals(b.get(), values.lookupOrd(values.nextOrd())); } assertEquals(SortedSetDocValues.NO_MORE_ORDS, values.nextOrd()); } r.close(); dir.close(); } }
Example 17
Source File: TestExceedMaxTermLength.java From lucene-solr with Apache License 2.0 | 4 votes |
public void test() throws Exception { IndexWriter w = new IndexWriter (dir, newIndexWriterConfig(random(), new MockAnalyzer(random()))); try { final FieldType ft = new FieldType(); ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); ft.setStored(random().nextBoolean()); ft.freeze(); final Document doc = new Document(); if (random().nextBoolean()) { // totally ok short field value doc.add(new Field(TestUtil.randomSimpleString(random(), 1, 10), TestUtil.randomSimpleString(random(), 1, 10), ft)); } // problematic field final String name = TestUtil.randomSimpleString(random(), 1, 50); final String value = TestUtil.randomSimpleString(random(), minTestTermLength, maxTestTermLegnth); final Field f = new Field(name, value, ft); if (random().nextBoolean()) { // totally ok short field value doc.add(new Field(TestUtil.randomSimpleString(random(), 1, 10), TestUtil.randomSimpleString(random(), 1, 10), ft)); } doc.add(f); IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> { w.addDocument(doc); }); String maxLengthMsg = String.valueOf(IndexWriter.MAX_TERM_LENGTH); String msg = expected.getMessage(); assertTrue("IllegalArgumentException didn't mention 'immense term': " + msg, msg.contains("immense term")); assertTrue("IllegalArgumentException didn't mention max length ("+maxLengthMsg+"): " + msg, msg.contains(maxLengthMsg)); assertTrue("IllegalArgumentException didn't mention field name ("+name+"): " + msg, msg.contains(name)); assertTrue("IllegalArgumentException didn't mention original message: " + msg, msg.contains("bytes can be at most") && msg.contains("in length; got")); } finally { w.close(); } }
Example 18
Source File: TestContextQuery.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testRandomContextQueryScoring() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); try(RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"))) { int numSuggestions = atLeast(20); int numContexts = atLeast(5); Set<Integer> seenWeights = new HashSet<>(); List<Entry> expectedEntries = new ArrayList<>(); List<CharSequence> contexts = new ArrayList<>(); for (int i = 1; i <= numContexts; i++) { CharSequence context = TestUtil.randomSimpleString(random(), 10) + i; contexts.add(context); for (int j = 1; j <= numSuggestions; j++) { String suggestion = "sugg_" + TestUtil.randomSimpleString(random(), 10) + j; int weight = TestUtil.nextInt(random(), 1, 1000 * numContexts * numSuggestions); while (seenWeights.contains(weight)) { weight = TestUtil.nextInt(random(), 1, 1000 * numContexts * numSuggestions); } seenWeights.add(weight); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", suggestion, weight, context)); iw.addDocument(document); expectedEntries.add(new Entry(suggestion, context.toString(), i * weight)); } if (rarely()) { iw.commit(); } } Entry[] expectedResults = expectedEntries.toArray(new Entry[expectedEntries.size()]); ArrayUtil.introSort(expectedResults, new Comparator<Entry>() { @Override public int compare(Entry o1, Entry o2) { int cmp = Float.compare(o2.value, o1.value); if (cmp != 0) { return cmp; } else { return o1.output.compareTo(o2.output); } } }); try(DirectoryReader reader = iw.getReader()) { SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg"))); for (int i = 0; i < contexts.size(); i++) { query.addContext(contexts.get(i), i + 1); } TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 4, false); assertSuggestions(suggest, ArrayUtil.copyOfSubArray(expectedResults, 0, 4)); } } }
Example 19
Source File: TestMultiDocValues.java From lucene-solr with Apache License 2.0 | 4 votes |
public void testSortedWithLotsOfDups() throws Exception { Directory dir = newDirectory(); Document doc = new Document(); Field field = new SortedDocValuesField("bytes", new BytesRef()); doc.add(field); IndexWriterConfig iwc = newIndexWriterConfig(random(), null); iwc.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc); int numDocs = TEST_NIGHTLY ? atLeast(500) : atLeast(50); for (int i = 0; i < numDocs; i++) { BytesRef ref = new BytesRef(TestUtil.randomSimpleString(random(), 2)); field.setBytesValue(ref); iw.addDocument(doc); if (random().nextInt(17) == 0) { iw.commit(); } } DirectoryReader ir = iw.getReader(); iw.forceMerge(1); DirectoryReader ir2 = iw.getReader(); LeafReader merged = getOnlyLeafReader(ir2); iw.close(); SortedDocValues multi = MultiDocValues.getSortedValues(ir, "bytes"); SortedDocValues single = merged.getSortedDocValues("bytes"); assertEquals(single.getValueCount(), multi.getValueCount()); for (int i = 0; i < numDocs; i++) { assertEquals(i, multi.nextDoc()); assertEquals(i, single.nextDoc()); // check ord assertEquals(single.ordValue(), multi.ordValue()); // check ord value final BytesRef expected = BytesRef.deepCopyOf(single.binaryValue()); final BytesRef actual = multi.binaryValue(); assertEquals(expected, actual); } testRandomAdvance(merged.getSortedDocValues("bytes"), MultiDocValues.getSortedValues(ir, "bytes")); testRandomAdvanceExact(merged.getSortedDocValues("bytes"), MultiDocValues.getSortedValues(ir, "bytes"), merged.maxDoc()); ir.close(); ir2.close(); dir.close(); }
Example 20
Source File: SolrInfoBeanTest.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Gets a list of everything we can find in the classpath and makes sure it has * a name, description, etc... */ @SuppressWarnings({"unchecked"}) public void testCallMBeanInfo() throws Exception { @SuppressWarnings({"rawtypes"}) List<Class> classes = new ArrayList<>(); classes.addAll(getClassesForPackage(SearchHandler.class.getPackage().getName())); classes.addAll(getClassesForPackage(SearchComponent.class.getPackage().getName())); classes.addAll(getClassesForPackage(LukeRequestHandler.class.getPackage().getName())); classes.addAll(getClassesForPackage(DefaultSolrHighlighter.class.getPackage().getName())); classes.addAll(getClassesForPackage(CaffeineCache.class.getPackage().getName())); // System.out.println(classes); int checked = 0; SolrMetricManager metricManager = h.getCoreContainer().getMetricManager(); String registry = h.getCore().getCoreMetricManager().getRegistryName(); SolrMetricsContext solrMetricsContext = new SolrMetricsContext(metricManager, registry, "foo"); String scope = TestUtil.randomSimpleString(random(), 2, 10); for(@SuppressWarnings({"rawtypes"})Class clazz : classes ) { if( SolrInfoBean.class.isAssignableFrom( clazz ) ) { try { SolrInfoBean info = (SolrInfoBean)clazz.getConstructor().newInstance(); if (info instanceof SolrMetricProducer) { ((SolrMetricProducer)info).initializeMetrics(solrMetricsContext, scope); } //System.out.println( info.getClass() ); assertNotNull( info.getClass().getCanonicalName(), info.getName() ); assertNotNull( info.getClass().getCanonicalName(), info.getDescription() ); assertNotNull( info.getClass().getCanonicalName(), info.getCategory() ); if( info instanceof CaffeineCache ) { continue; } assertNotNull( info.toString() ); checked++; } catch( ReflectiveOperationException ex ) { // expected... //System.out.println( "unable to initialize: "+clazz ); } } } assertTrue( "there are at least 10 SolrInfoBean that should be found in the classpath, found " + checked, checked > 10 ); }