Java Code Examples for org.apache.solr.client.solrj.request.UpdateRequest#add()
The following examples show how to use
org.apache.solr.client.solrj.request.UpdateRequest#add() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CdcrTestsUtil.java From lucene-solr with Apache License 2.0 | 6 votes |
public static void indexRandomDocs(Integer start, Integer count, CloudSolrClient solrClient) throws Exception { // ADD operation on cluster 1 int docs = 0; if (count == 0) { docs = (TEST_NIGHTLY ? 100 : 10); } else { docs = count; } for (int k = start; k < docs; k++) { UpdateRequest req = new UpdateRequest(); SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", k); req.add(doc); req.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true); req.process(solrClient); } }
Example 2
Source File: SolrIndex.java From titan1withtp3.1 with Apache License 2.0 | 6 votes |
private void deleteIndividualFieldsFromIndex(String collectionName, String keyIdField, String docId, HashSet<IndexEntry> fieldDeletions) throws SolrServerException, IOException { if (fieldDeletions.isEmpty()) return; Map<String, String> fieldDeletes = new HashMap<String, String>(1) {{ put("set", null); }}; SolrInputDocument doc = new SolrInputDocument(); doc.addField(keyIdField, docId); StringBuilder sb = new StringBuilder(); for (IndexEntry fieldToDelete : fieldDeletions) { doc.addField(fieldToDelete.field, fieldDeletes); sb.append(fieldToDelete).append(","); } if (logger.isTraceEnabled()) logger.trace("Deleting individual fields [{}] for document {}", sb.toString(), docId); UpdateRequest singleDocument = newUpdateRequest(); singleDocument.add(doc); solrClient.request(singleDocument, collectionName); }
Example 3
Source File: StreamExpressionTest.java From lucene-solr with Apache License 2.0 | 6 votes |
@Test public void testEvalStream() throws Exception { UpdateRequest updateRequest = new UpdateRequest(); updateRequest.add(id, "hello", "test_t", "l b c d c"); updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS); String expr = "eval(select(echo(\"search("+COLLECTIONORALIAS+", q=\\\"*:*\\\", fl=id, sort=\\\"id desc\\\")\"), echo as expr_s))"; ModifiableSolrParams paramsLoc = new ModifiableSolrParams(); paramsLoc.set("expr", expr); paramsLoc.set("qt", "/stream"); String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS; TupleStream solrStream = new SolrStream(url, paramsLoc); StreamContext context = new StreamContext(); solrStream.setStreamContext(context); List<Tuple> tuples = getTuples(solrStream); assertTrue(tuples.size() == 1); String s = (String)tuples.get(0).get("id"); assertTrue(s.equals("hello")); }
Example 4
Source File: SolrCmdDistributor.java From lucene-solr with Apache License 2.0 | 6 votes |
public void distribAdd(AddUpdateCommand cmd, List<Node> nodes, ModifiableSolrParams params, boolean synchronous, RollupRequestReplicationTracker rollupTracker, LeaderRequestReplicationTracker leaderTracker) throws IOException { for (Node node : nodes) { UpdateRequest uReq = new UpdateRequest(); if (cmd.isLastDocInBatch) uReq.lastDocInBatch(); uReq.setParams(params); uReq.add(cmd.solrDoc, cmd.commitWithin, cmd.overwrite); if (cmd.isInPlaceUpdate()) { params.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, String.valueOf(cmd.prevVersion)); } submit(new Req(cmd, node, uReq, synchronous, rollupTracker, leaderTracker), false); } }
Example 5
Source File: CloudAuthStreamTest.java From lucene-solr with Apache License 2.0 | 5 votes |
/** A simple "Delete by Query" example */ public void testSimpleDeleteStreamByQuery() throws Exception { { // Put some "real" docs directly to both X... final UpdateRequest update = setBasicAuthCredentials(new UpdateRequest(), WRITE_X_USER); for (int i = 1; i <= 42; i++) { update.add(sdoc("id",i+"x","foo_i",""+i)); } assertEquals("initial docs in X", 0, update.commit(cluster.getSolrClient(), COLLECTION_X).getStatus()); } assertEquals(42L, commitAndCountDocsInCollection(COLLECTION_X, WRITE_X_USER)); { // WRITE_X user should be able to delete X via a query from X final String expr = "delete("+COLLECTION_X+", batchSize=5, " // note batch size + " search("+COLLECTION_X+", " + " q=\"foo_i:[* TO 10]\", " // 10 matches = 2 batches + " rows=100, " + " fl=\"id,foo_i,_version_\", " // foo_i should be ignored... + " sort=\"foo_i desc\")) " // version constraint should be ok ; final SolrStream solrStream = new SolrStream(solrUrl + "/" + COLLECTION_X, params("qt", "/stream", "expr", expr)); solrStream.setCredentials(WRITE_X_USER, WRITE_X_USER); final List<Tuple> tuples = getTuples(solrStream); assertEquals(2, tuples.size()); assertEquals(5L, tuples.get(0).get("totalIndexed")); assertEquals(10L, tuples.get(1).get("totalIndexed")); } assertEquals(42L - 10L, commitAndCountDocsInCollection(COLLECTION_X, WRITE_X_USER)); }
Example 6
Source File: CloudHttp2SolrClientTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test public void testParallelUpdateQTime() throws Exception { CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient()); cluster.waitForActiveCollection(COLLECTION, 2, 2); UpdateRequest req = new UpdateRequest(); for (int i=0; i<10; i++) { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", String.valueOf(TestUtil.nextInt(random(), 1000, 1100))); req.add(doc); } UpdateResponse response = req.process(getRandomClient(), COLLECTION); // See SOLR-6547, we just need to ensure that no exception is thrown here assertTrue(response.getQTime() >= 0); }
Example 7
Source File: SolrClientInterceptorTest.java From skywalking with Apache License 2.0 | 5 votes |
@Test public void testUpdateWithAdd() throws Throwable { UpdateRequest request = new UpdateRequest(); List<SolrInputDocument> docs = Lists.newArrayList(); for (int start = 0; start < 100; start++) { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", start); docs.add(doc); } arguments = new Object[] { request.add(docs), null, collection }; interceptor.beforeMethod(enhancedInstance, method, arguments, argumentType, null); interceptor.afterMethod(enhancedInstance, method, arguments, argumentType, getResponse()); List<TraceSegment> segments = segmentStorage.getTraceSegments(); Assert.assertEquals(segments.size(), 1); List<AbstractTracingSpan> spans = SegmentHelper.getSpans(segments.get(0)); Assert.assertEquals(spans.size(), 1); AbstractTracingSpan span = spans.get(0); int pox = 0; if (Config.Plugin.SolrJ.TRACE_STATEMENT) { SpanAssert.assertTag(span, ++pox, "100"); } if (Config.Plugin.SolrJ.TRACE_OPS_PARAMS) { SpanAssert.assertTag(span, ++pox, "-1"); } spanCommonAssert(span, pox, "solrJ/collection/update/ADD"); }
Example 8
Source File: AbstractFullDistribZkTestBase.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override protected void indexDoc(SolrInputDocument doc) throws IOException, SolrServerException { UpdateRequest req = new UpdateRequest(); req.add(doc); req.setParam("CONTROL", "TRUE"); req.process(controlClient); // if we wanted to randomly pick a client - but sometimes they may be // down... // boolean pick = random.nextBoolean(); // // int which = (doc.getField(id).toString().hashCode() & 0x7fffffff) % // sliceCount; // // if (pick && sliceCount > 1) { // which = which + ((shardCount / sliceCount) * // random.nextInt(sliceCount-1)); // } // // HttpSolrServer client = (HttpSolrServer) // clients.get(which); UpdateRequest ureq = new UpdateRequest(); ureq.add(doc); // ureq.setParam(UpdateParams.UPDATE_CHAIN, DISTRIB_UPDATE_CHAIN); ureq.process(cloudClient); }
Example 9
Source File: SendToSolrCloudProcessor.java From jesterj with Apache License 2.0 | 5 votes |
@Override protected void batchOperation(ConcurrentBiMap<Document, SolrInputDocument> oldBatch) throws SolrServerException, IOException { List<String> deletes = oldBatch.keySet().stream() .filter(doc -> doc.getOperation() == Document.Operation.DELETE) .map(Document::getId) .collect(Collectors.toList()); if (deletes.size() > 0) { getSolrClient().deleteById(deletes); } List<SolrInputDocument> adds = oldBatch.keySet().stream() .filter(doc -> doc.getOperation() != Document.Operation.DELETE) .map(oldBatch::get) .collect(Collectors.toList()); if (adds.size() > 0) { Map<String, String> params = getParams(); if (params == null) { getSolrClient().add(adds); } else { UpdateRequest req = new UpdateRequest(); req.add(adds); // always true right now, but pattern for addtional global params... for (String s : params.keySet()) { req.setParam(s, params.get(s)); } getSolrClient().request(req); } } for (Document document : oldBatch.keySet()) { putIdInThreadContext(document); if (document.getOperation() == Document.Operation.DELETE) { log().info(Status.INDEXED.getMarker(), "{} deleted from solr successfully", document.getId()); } else { log().info(Status.INDEXED.getMarker(), "{} sent to solr successfully", document.getId()); } } }
Example 10
Source File: SolrAbstractSink.java From pulsar with Apache License 2.0 | 5 votes |
@Override public void write(Record<T> record) { UpdateRequest updateRequest = new UpdateRequest(); if (solrSinkConfig.getSolrCommitWithinMs() > 0) { updateRequest.setCommitWithin(solrSinkConfig.getSolrCommitWithinMs()); } if (enableBasicAuth) { updateRequest.setBasicAuthCredentials( solrSinkConfig.getUsername(), solrSinkConfig.getPassword() ); } SolrInputDocument document = convert(record); updateRequest.add(document); try { UpdateResponse updateResponse = updateRequest.process(client, solrSinkConfig.getSolrCollection()); if (updateResponse.getStatus() == 0) { record.ack(); } else { record.fail(); } } catch (SolrServerException | IOException e) { record.fail(); log.warn("Solr update document exception ", e); } }
Example 11
Source File: LegacyQueryFacetCloudTest.java From lucene-solr with Apache License 2.0 | 4 votes |
@Before public void beforeTest() throws Exception { //INT int1TestStart.add(new ArrayList<Integer>()); int2TestStart.add(new ArrayList<Integer>()); //LONG longTestStart.add(new ArrayList<Long>()); longTestStart.add(new ArrayList<Long>()); //FLOAT floatTestStart.add(new ArrayList<Float>()); floatTestStart.add(new ArrayList<Float>()); floatTestStart.add(new ArrayList<Float>()); UpdateRequest req = new UpdateRequest(); for (int j = 0; j < NUM_LOOPS; ++j) { int i = j%INT; long l = j%LONG; float f = j%FLOAT; double d = j%DOUBLE; int dt = j%DATE; int s = j%STRING; List<String> fields = new ArrayList<>(); fields.add("id"); fields.add("1000"+j); fields.add("int_id"); fields.add("" + i); fields.add("long_ld"); fields.add("" + l); fields.add("float_fd"); fields.add("" + f); fields.add("double_dd"); fields.add("" + d); fields.add("date_dtd"); fields.add((1000+dt) + "-01-01T23:59:59Z"); fields.add("string_sd"); fields.add("abc" + s); req.add(fields.toArray(new String[0])); if (f<=50) { int1TestStart.get(0).add(i); } if (f<=30) { int2TestStart.get(0).add(i); } if (s == 1) { longTestStart.get(0).add(l); } if (s == 2) { longTestStart.get(1).add(l); } if (l>=30) { floatTestStart.get(0).add(f); } if (d<=50) { floatTestStart.get(1).add(f); } if (l>=20) { floatTestStart.get(2).add(f); } } req.commit(cluster.getSolrClient(), COLLECTIONORALIAS); }
Example 12
Source File: MergeIndexesExampleTestBase.java From lucene-solr with Apache License 2.0 | 4 votes |
private UpdateRequest setupCores() throws SolrServerException, IOException { UpdateRequest up = new UpdateRequest(); up.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true); up.deleteByQuery("*:*"); up.process(getSolrCore0()); up.process(getSolrCore1()); up.clear(); // Add something to each core SolrInputDocument doc = new SolrInputDocument(); doc.setField("id", "AAA"); doc.setField("name", "core0"); // Add to core0 up.add(doc); up.process(getSolrCore0()); // Add to core1 doc.setField("id", "BBB"); doc.setField("name", "core1"); up.add(doc); up.process(getSolrCore1()); // Now Make sure AAA is in 0 and BBB in 1 SolrQuery q = new SolrQuery(); QueryRequest r = new QueryRequest(q); q.setQuery("id:AAA"); assertEquals(1, r.process(getSolrCore0()).getResults().size()); assertEquals(0, r.process(getSolrCore1()).getResults().size()); assertEquals(1, getSolrCore0().query(new SolrQuery("id:AAA")).getResults().size()); assertEquals(0, getSolrCore0().query(new SolrQuery("id:BBB")).getResults().size()); assertEquals(0, getSolrCore1().query(new SolrQuery("id:AAA")).getResults().size()); assertEquals(1, getSolrCore1().query(new SolrQuery("id:BBB")).getResults().size()); return up; }
Example 13
Source File: TestExportTool.java From lucene-solr with Apache License 2.0 | 4 votes |
@Nightly public void testVeryLargeCluster() throws Exception { String COLLECTION_NAME = "veryLargeColl"; configureCluster(4) .addConfig("conf", configset("cloud-minimal")) .configure(); try { CollectionAdminRequest .createCollection(COLLECTION_NAME, "conf", 8, 1) .setMaxShardsPerNode(10) .process(cluster.getSolrClient()); cluster.waitForActiveCollection(COLLECTION_NAME, 8, 8); String tmpFileLoc = new File(cluster.getBaseDir().toFile().getAbsolutePath() + File.separator).getPath(); String url = cluster.getRandomJetty(random()).getBaseUrl() + "/" + COLLECTION_NAME; int docCount = 0; for (int j = 0; j < 4; j++) { int bsz = 10000; UpdateRequest ur = new UpdateRequest(); ur.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true); for (int i = 0; i < bsz; i++) { ur.add("id", String.valueOf((j * bsz) + i), "desc_s", TestUtil.randomSimpleString(random(), 10, 50)); } cluster.getSolrClient().request(ur, COLLECTION_NAME); docCount += bsz; } QueryResponse qr = cluster.getSolrClient().query(COLLECTION_NAME, new SolrQuery("*:*").setRows(0)); assertEquals(docCount, qr.getResults().getNumFound()); DocCollection coll = cluster.getSolrClient().getClusterStateProvider().getCollection(COLLECTION_NAME); HashMap<String, Long> docCounts = new HashMap<>(); long totalDocsFromCores = 0; for (Slice slice : coll.getSlices()) { Replica replica = slice.getLeader(); try (HttpSolrClient client = new HttpSolrClient.Builder(replica.getBaseUrl()).build()) { long count = ExportTool.getDocCount(replica.getCoreName(), client); docCounts.put(replica.getCoreName(), count); totalDocsFromCores += count; } } assertEquals(docCount, totalDocsFromCores); ExportTool.MultiThreadedRunner info = null; String absolutePath = null; info = new ExportTool.MultiThreadedRunner(url); info.output = System.out; absolutePath = tmpFileLoc + COLLECTION_NAME + random().nextInt(100000) + ".javabin"; info.setOutFormat(absolutePath, "javabin"); info.setLimit("-1"); info.exportDocs(); assertJavabinDocsCount(info, docCount); for (Map.Entry<String, Long> e : docCounts.entrySet()) { assertEquals(e.getValue().longValue(), info.corehandlers.get(e.getKey()).receivedDocs.get()); } info = new ExportTool.MultiThreadedRunner(url); info.output = System.out; absolutePath = tmpFileLoc + COLLECTION_NAME + random().nextInt(100000) + ".json"; info.setOutFormat(absolutePath, "jsonl"); info.fields = "id,desc_s"; info.setLimit("-1"); info.exportDocs(); long actual = ((ExportTool.JsonSink) info.sink).info.docsWritten.get(); assertTrue("docs written :" + actual + "docs produced : " + info.docsWritten.get(), actual >= docCount); assertJsonDocsCount(info, docCount,null); } finally { cluster.shutdown(); } }
Example 14
Source File: AddBlockUpdateTest.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testXML() throws IOException, XMLStreamException { UpdateRequest req = new UpdateRequest(); List<SolrInputDocument> docs = new ArrayList<>(); String xml_doc1 = "<doc >" + " <field name=\"id\">1</field>" + " <field name=\"parent_s\">X</field>" + "<doc> " + " <field name=\"id\" >2</field>" + " <field name=\"child_s\">y</field>" + "</doc>"+ "<doc> " + " <field name=\"id\" >3</field>" + " <field name=\"child_s\">z</field>" + "</doc>"+ "</doc>"; String xml_doc2 = "<doc >" + " <field name=\"id\">4</field>" + " <field name=\"parent_s\">A</field>" + "<doc> " + " <field name=\"id\" >5</field>" + " <field name=\"child_s\">b</field>" + "</doc>"+ "<doc> " + " <field name=\"id\" >6</field>" + " <field name=\"child_s\">c</field>" + "</doc>"+ "</doc>"; XMLStreamReader parser = inputFactory.createXMLStreamReader( new StringReader( xml_doc1 ) ); parser.next(); // read the START document... //null for the processor is all right here XMLLoader loader = new XMLLoader(); SolrInputDocument document1 = loader.readDoc( parser ); XMLStreamReader parser2 = inputFactory.createXMLStreamReader( new StringReader( xml_doc2 ) ); parser2.next(); // read the START document... //null for the processor is all right here //XMLLoader loader = new XMLLoader(); SolrInputDocument document2 = loader.readDoc( parser2 ); docs.add(document1); docs.add(document2); Collections.shuffle(docs, random()); req.add(docs); RequestWriter requestWriter = new RequestWriter(); OutputStream os = new ByteArrayOutputStream(); requestWriter.write(req, os); assertBlockU(os.toString()); assertU(commit()); final SolrIndexSearcher searcher = getSearcher(); assertSingleParentOf(searcher, one("yz"), "X"); assertSingleParentOf(searcher, one("bc"), "A"); }
Example 15
Source File: ModelTupleStreamIntegrationTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@BeforeClass public static void setupCluster() throws Exception { final Path configsetPath = configset("mini-expressible"); // create and serialize model { final Model model = buildModel(); final File serializedModelFile = configsetPath .resolve(MY_SERIALIZED_MODEL_FILENAME) .toFile(); ModelSerializer.writeModel(model, serializedModelFile.getPath(), false); } final String configName = "conf"; final int numShards = 2; final int numReplicas = 2; final int maxShardsPerNode = 1; final int nodeCount = (numShards*numReplicas + (maxShardsPerNode-1))/maxShardsPerNode; // create and configure cluster configureCluster(nodeCount) .addConfig(configName, configsetPath) .configure(); // create an empty collection CollectionAdminRequest.createCollection(MY_COLLECTION_NAME, configName, numShards, numReplicas) .setMaxShardsPerNode(maxShardsPerNode) .process(cluster.getSolrClient()); // compose an update request final UpdateRequest updateRequest = new UpdateRequest(); // add some documents updateRequest.add( sdoc("id", "green", "channel_b_f", "0", "channel_g_f", "255", "channel_r_f", "0")); updateRequest.add( sdoc("id", "black", "channel_b_f", "0", "channel_g_f", "0", "channel_r_f", "0")); updateRequest.add( sdoc("id", "yellow", "channel_b_f", "0", "channel_g_f", "255", "channel_r_f", "255")); // make the update request updateRequest.commit(cluster.getSolrClient(), MY_COLLECTION_NAME); }
Example 16
Source File: SystemLogListener.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override @SuppressWarnings({"unchecked", "rawtypes"}) public void onEvent(TriggerEvent event, TriggerEventProcessorStage stage, String actionName, ActionContext context, Throwable error, String message) throws Exception { try { ClusterState clusterState = cloudManager.getClusterStateProvider().getClusterState(); DocCollection coll = clusterState.getCollectionOrNull(collection); if (coll == null) { log.debug("Collection {} missing, skip sending event {}", collection, event); return; } SolrInputDocument doc = new SolrInputDocument(); doc.addField(CommonParams.TYPE, DOC_TYPE); doc.addField(SOURCE_FIELD, SOURCE); doc.addField("id", IdUtils.timeRandomId()); doc.addField("event.id_s", event.getId()); doc.addField(EVENT_TYPE_FIELD, event.getEventType().toString()); doc.addField(EVENT_SOURCE_FIELD, event.getSource()); doc.addField("event.time_l", event.getEventTime()); doc.addField("timestamp", new Date()); addMap("event.property.", doc, event.getProperties()); doc.addField(STAGE_FIELD, stage.toString()); if (actionName != null) { doc.addField(ACTION_FIELD, actionName); } if (message != null) { doc.addField(MESSAGE_FIELD, message); } addError(doc, error); // add JSON versions of event and context String eventJson = Utils.toJSONString(event); doc.addField("event_str", eventJson); if (context != null) { // capture specifics of operations after compute_plan action addOperations(doc, (List<SolrRequest>)context.getProperties().get("operations")); // capture specifics of responses after execute_plan action addResponses(doc, (List<NamedList<Object>>)context.getProperties().get("responses")); addActions(BEFORE_ACTIONS_FIELD, doc, (List<String>)context.getProperties().get(TriggerEventProcessorStage.BEFORE_ACTION.toString())); addActions(AFTER_ACTIONS_FIELD, doc, (List<String>)context.getProperties().get(TriggerEventProcessorStage.AFTER_ACTION.toString())); String contextJson = Utils.toJSONString(context); doc.addField("context_str", contextJson); } UpdateRequest req = new UpdateRequest(); req.add(doc); req.setParam(CollectionAdminParams.COLLECTION, collection); cloudManager.request(req); } catch (Exception e) { if ((e instanceof SolrException) && e.getMessage().contains("Collection not found")) { // relatively benign but log this - collection still existed when we started log.info("Collection {} missing, skip sending event {}", collection, event); } else { log.warn("Exception sending event. Collection: {}, event: {}, exception: {}", collection, event, e); } } }
Example 17
Source File: TestQueryingOnDownCollection.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test /** * Assert that requests to "down collection", i.e. a collection which has all replicas in down state * (but are hosted on nodes that are live), fail fast and throw meaningful exceptions */ public void testQueryToDownCollectionShouldFailFast() throws Exception { CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf", 2, 1) .setBasicAuthCredentials(USERNAME, PASSWORD) .process(cluster.getSolrClient()); // Add some dummy documents UpdateRequest update = (UpdateRequest) new UpdateRequest().setBasicAuthCredentials(USERNAME, PASSWORD); for (int i = 0; i < 100; i++) { update.add("id", Integer.toString(i)); } update.commit(cluster.getSolrClient(), COLLECTION_NAME); // Bring down replicas but keep nodes up. This could've been done by some combinations of collections API operations; // however, to make it faster, altering cluster state directly! ;-) downAllReplicas(); // assert all replicas are in down state List<Replica> replicas = getCollectionState(COLLECTION_NAME).getReplicas(); for (Replica replica: replicas){ assertEquals(replica.getState(), Replica.State.DOWN); } // assert all nodes as active assertEquals(3, cluster.getSolrClient().getClusterStateProvider().getLiveNodes().size()); SolrClient client = cluster.getJettySolrRunner(0).newClient(); @SuppressWarnings({"rawtypes"}) SolrRequest req = new QueryRequest(new SolrQuery("*:*").setRows(0)).setBasicAuthCredentials(USERNAME, PASSWORD); // Without the SOLR-13793 fix, this causes requests to "down collection" to pile up (until the nodes run out // of serviceable threads and they crash, even for other collections hosted on the nodes). SolrException error = expectThrows(SolrException.class, "Request should fail after trying all replica nodes once", () -> client.request(req, COLLECTION_NAME) ); client.close(); assertEquals(error.code(), SolrException.ErrorCode.INVALID_STATE.code); assertTrue(error.getMessage().contains("No active replicas found for collection: " + COLLECTION_NAME)); // run same set of tests on v2 client which uses V2HttpCall Http2SolrClient v2Client = new Http2SolrClient.Builder(cluster.getJettySolrRunner(0).getBaseUrl().toString()) .build(); error = expectThrows(SolrException.class, "Request should fail after trying all replica nodes once", () -> v2Client.request(req, COLLECTION_NAME) ); v2Client.close(); assertEquals(error.code(), SolrException.ErrorCode.INVALID_STATE.code); assertTrue(error.getMessage().contains("No active replicas found for collection: " + COLLECTION_NAME)); }
Example 18
Source File: SolrIO.java From beam with Apache License 2.0 | 4 votes |
private void flushBatch() throws IOException, InterruptedException { if (batch.isEmpty()) { return; } try { UpdateRequest updateRequest = new UpdateRequest(); updateRequest.add(batch); Sleeper sleeper = Sleeper.DEFAULT; BackOff backoff = retryBackoff.backoff(); int attempt = 0; while (true) { attempt++; try { solrClient.process(spec.getCollection(), updateRequest); break; } catch (Exception exception) { // fail immediately if no retry configuration doesn't handle this if (spec.getRetryConfiguration() == null || !spec.getRetryConfiguration().getRetryPredicate().test(exception)) { throw new IOException( "Error writing to Solr (no attempt made to retry)", exception); } // see if we can pause and try again if (!BackOffUtils.next(sleeper, backoff)) { throw new IOException( String.format( "Error writing to Solr after %d attempt(s). No more attempts allowed", attempt), exception); } else { // Note: this used in test cases to verify behavior LOG.warn(String.format(RETRY_ATTEMPT_LOG, attempt), exception); } } } } finally { batch.clear(); } }
Example 19
Source File: StreamExpressionTest.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testFeaturesSelectionStream() throws Exception { Assume.assumeTrue(!useAlias); CollectionAdminRequest.createCollection("destinationCollection", "ml", 2, 1).process(cluster.getSolrClient()); cluster.waitForActiveCollection("destinationCollection", 2, 2); UpdateRequest updateRequest = new UpdateRequest(); for (int i = 0; i < 5000; i+=2) { updateRequest.add(id, String.valueOf(i), "whitetok", "a b c d", "out_i", "1"); updateRequest.add(id, String.valueOf(i+1), "whitetok", "a b e f", "out_i", "0"); } updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS); StreamExpression expression; TupleStream stream; List<Tuple> tuples; StreamContext streamContext = new StreamContext(); SolrClientCache solrClientCache = new SolrClientCache(); streamContext.setSolrClientCache(solrClientCache); StreamFactory factory = new StreamFactory() .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress()) .withCollectionZkHost("destinationCollection", cluster.getZkServer().getZkAddress()) .withFunctionName("featuresSelection", FeaturesSelectionStream.class) .withFunctionName("search", CloudSolrStream.class) .withFunctionName("update", UpdateStream.class); try { String featuresExpression = "featuresSelection(collection1, q=\"*:*\", featureSet=\"first\", field=\"whitetok\", outcome=\"out_i\", numTerms=4)"; // basic expression = StreamExpressionParser.parse(featuresExpression); stream = new FeaturesSelectionStream(expression, factory); stream.setStreamContext(streamContext); tuples = getTuples(stream); assert (tuples.size() == 4); assertTrue(tuples.get(0).get("term_s").equals("c")); assertTrue(tuples.get(1).get("term_s").equals("d")); assertTrue(tuples.get(2).get("term_s").equals("e")); assertTrue(tuples.get(3).get("term_s").equals("f")); // update expression = StreamExpressionParser.parse("update(destinationCollection, " + featuresExpression + ")"); stream = new UpdateStream(expression, factory); stream.setStreamContext(streamContext); getTuples(stream); cluster.getSolrClient().commit("destinationCollection"); expression = StreamExpressionParser.parse("search(destinationCollection, q=featureSet_s:first, fl=\"index_i, term_s\", sort=\"index_i asc\")"); stream = new CloudSolrStream(expression, factory); stream.setStreamContext(streamContext); tuples = getTuples(stream); assertEquals(4, tuples.size()); assertTrue(tuples.get(0).get("term_s").equals("c")); assertTrue(tuples.get(1).get("term_s").equals("d")); assertTrue(tuples.get(2).get("term_s").equals("e")); assertTrue(tuples.get(3).get("term_s").equals("f")); } finally { CollectionAdminRequest.deleteCollection("destinationCollection").process(cluster.getSolrClient()); solrClientCache.close(); } }
Example 20
Source File: SolrClient.java From lucene-solr with Apache License 2.0 | 3 votes |
/** * Adds a collection of documents, specifying max time before they become committed * * @param collection the Solr collection to add documents to * @param docs the collection of documents * @param commitWithinMs max time (in ms) before a commit will happen * * @return an {@link org.apache.solr.client.solrj.response.UpdateResponse} from the server * * @throws IOException if there is a communication error with the server * @throws SolrServerException if there is an error on the server * * @since Solr 5.1 */ public UpdateResponse add(String collection, Collection<SolrInputDocument> docs, int commitWithinMs) throws SolrServerException, IOException { UpdateRequest req = new UpdateRequest(); req.add(docs); req.setCommitWithin(commitWithinMs); return req.process(this, collection); }