Java Code Examples for org.apache.solr.client.solrj.request.CollectionAdminRequest#Create
The following examples show how to use
org.apache.solr.client.solrj.request.CollectionAdminRequest#Create .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CollectionPerTimeFrameAssignmentStrategy.java From storm-solr with Apache License 2.0 | 6 votes |
protected void createCollection(CloudSolrClient cloudSolrClient, String collection) throws Exception { CollectionAdminRequest.Create createCmd = new CollectionAdminRequest.Create(); createCmd.setCollectionName(collection); createCmd.setNumShards(numShards); createCmd.setConfigName(configName); createCmd.setReplicationFactor(replicationFactor); int liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes().size(); int maxShardsPerNode = (int) Math.max(Math.ceil((numShards * replicationFactor) / liveNodes), 1); createCmd.setMaxShardsPerNode(maxShardsPerNode); log.info("Creating new collection " + collection + " with " + numShards + " shards and " + replicationFactor + " replicas per shard"); try { cloudSolrClient.request(createCmd); } catch (Exception exc) { // may have been created by another bolt instance cloudSolrClient.getZkStateReader().updateClusterState(); if (!cloudSolrClient.getZkStateReader().getClusterState().hasCollection(collection)) { // failed to create the collection and it doesn't exist ... throw the error log.error("Failed to create collection "+collection+" due to: "+exc, exc); throw exc; } } }
Example 2
Source File: CreateCollectionCleanupTest.java From lucene-solr with Apache License 2.0 | 6 votes |
@Test public void testAsyncCreateCollectionCleanup() throws Exception { final CloudSolrClient cloudClient = cluster.getSolrClient(); String collectionName = "foo2"; assertThat(CollectionAdminRequest.listCollections(cloudClient), not(hasItem(collectionName))); // Create a collection that would fail CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,"conf1",1,1); Properties properties = new Properties(); Path tmpDir = createTempDir(); tmpDir = tmpDir.resolve("foo"); Files.createFile(tmpDir); properties.put(CoreAdminParams.DATA_DIR, tmpDir.toString()); create.setProperties(properties); create.setAsyncId("testAsyncCreateCollectionCleanup"); create.process(cloudClient); RequestStatusState state = AbstractFullDistribZkTestBase.getRequestStateAfterCompletion("testAsyncCreateCollectionCleanup", 30, cloudClient); assertThat(state.getKey(), is("failed")); // Confirm using LIST that the collection does not exist assertThat("Failed collection is still in the clusterstate: " + cluster.getSolrClient().getClusterStateProvider().getClusterState().getCollectionOrNull(collectionName), CollectionAdminRequest.listCollections(cloudClient), not(hasItem(collectionName))); }
Example 3
Source File: TestSegmentSorting.java From lucene-solr with Apache License 2.0 | 6 votes |
@Before public void createCollection() throws Exception { final String collectionName = testName.getMethodName(); final CloudSolrClient cloudSolrClient = cluster.getSolrClient(); final Map<String, String> collectionProperties = new HashMap<>(); collectionProperties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-sortingmergepolicyfactory.xml"); CollectionAdminRequest.Create cmd = CollectionAdminRequest.createCollection(collectionName, configName, NUM_SHARDS, REPLICATION_FACTOR) .setProperties(collectionProperties); if (random().nextBoolean()) { assertTrue( cmd.process(cloudSolrClient).isSuccess() ); } else { // async assertEquals(RequestStatusState.COMPLETED, cmd.processAndWait(cloudSolrClient, 30)); } ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader(); cluster.waitForActiveCollection(collectionName, NUM_SHARDS, NUM_SHARDS * REPLICATION_FACTOR); cloudSolrClient.setDefaultCollection(collectionName); }
Example 4
Source File: ShardSplitTest.java From lucene-solr with Apache License 2.0 | 6 votes |
private void doSplitShardWithRule(SolrIndexSplitter.SplitMethod splitMethod) throws Exception { waitForThingsToLevelOut(15, TimeUnit.SECONDS); log.info("Starting testSplitShardWithRule"); String collectionName = "shardSplitWithRule_" + splitMethod.toLower(); CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2) .setRule("shard:*,replica:<2,node:*"); CollectionAdminResponse response = createRequest.process(cloudClient); assertEquals(0, response.getStatus()); try { cloudClient.waitForState(collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 2)); } catch (TimeoutException e) { new RuntimeException("Timeout waiting for 1shards and 2 replicas.", e); } CollectionAdminRequest.SplitShard splitShardRequest = CollectionAdminRequest.splitShard(collectionName) .setShardName("shard1").setSplitMethod(splitMethod.toLower()); response = splitShardRequest.process(cloudClient); assertEquals(String.valueOf(response.getErrorMessages()), 0, response.getStatus()); }
Example 5
Source File: Solr6Index.java From atlas with Apache License 2.0 | 5 votes |
private static void createCollectionIfNotExists(CloudSolrClient client, Configuration config, String collection) throws IOException, SolrServerException, KeeperException, InterruptedException { if (!checkIfCollectionExists(client, collection)) { final Integer numShards = config.get(NUM_SHARDS); final Integer maxShardsPerNode = config.get(MAX_SHARDS_PER_NODE); final Integer replicationFactor = config.get(REPLICATION_FACTOR); // Ideally this property used so a new configset is not uploaded for every single // index (collection) created in solr. // if a generic configSet is not set, make the configset name the same as the collection. // This was the default behavior before a default configSet could be specified final String genericConfigSet = config.has(SOLR_DEFAULT_CONFIG) ? config.get(SOLR_DEFAULT_CONFIG):collection; final CollectionAdminRequest.Create createRequest = CollectionAdminRequest.createCollection(collection, genericConfigSet, numShards, replicationFactor); createRequest.setMaxShardsPerNode(maxShardsPerNode); final CollectionAdminResponse createResponse = createRequest.process(client); if (createResponse.isSuccess()) { logger.trace("Collection {} successfully created.", collection); } else { throw new SolrServerException(Joiner.on("\n").join(createResponse.getErrorMessages())); } } waitForRecoveriesToFinish(client, collection); }
Example 6
Source File: TestSolrCloudWithDelegationTokens.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Test HttpSolrServer's delegation token support for Update Requests */ @Test public void testDelegationTokenSolrClientWithUpdateRequests() throws Exception { String collectionName = "testDelegationTokensWithUpdate"; // Get token String token = getDelegationToken(null, "bar", solrClientPrimary); assertNotNull(token); // Tests with update request. // Before SOLR-13921, the request without commit will fail with a NullpointerException in DelegationTokenHttpSolrClient.createMethod // due to a missing null check in the createMethod. (When requesting a commit, the setAction method will call setParams on the // request so there is no NPE in the createMethod.) final HttpSolrClient scUpdateWToken = new HttpSolrClient.Builder(solrClientPrimary.getBaseURL().toString()) .withKerberosDelegationToken(token) .withResponseParser(solrClientPrimary.getParser()) .build(); // Create collection CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, 1, 1); create.process(scUpdateWToken); try { // test update request with token via property and commit=true @SuppressWarnings({"rawtypes"}) SolrRequest request = getUpdateRequest(true); doSolrRequest(scUpdateWToken, request, collectionName, HttpStatus.SC_OK); // test update request with token via property and commit=false request = getUpdateRequest(false); doSolrRequest(scUpdateWToken, request, collectionName, HttpStatus.SC_OK); } finally { scUpdateWToken.close(); } }
Example 7
Source File: TestSolrCloudWithHadoopAuthPlugin.java From lucene-solr with Apache License 2.0 | 5 votes |
protected void testCollectionCreateSearchDelete() throws Exception { CloudSolrClient solrClient = cluster.getSolrClient(); String collectionName = "testkerberoscollection"; // create collection CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", NUM_SHARDS, REPLICATION_FACTOR); create.process(solrClient); // The metrics counter for wrong credentials here really just means assertAuthMetricsMinimums(6, 3, 0, 3, 0, 0); SolrInputDocument doc = new SolrInputDocument(); doc.setField("id", "1"); solrClient.add(collectionName, doc); solrClient.commit(collectionName); assertAuthMetricsMinimums(10, 5, 0, 5, 0, 0); SolrQuery query = new SolrQuery(); query.setQuery("*:*"); QueryResponse rsp = solrClient.query(collectionName, query); assertEquals(1, rsp.getResults().getNumFound()); CollectionAdminRequest.Delete deleteReq = CollectionAdminRequest.deleteCollection(collectionName); deleteReq.process(solrClient); AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, solrClient.getZkStateReader(), true, 330); assertAuthMetricsMinimums(14, 8, 0, 6, 0, 0); }
Example 8
Source File: MetricTriggerTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupCluster() throws Exception { configureCluster(1) .addConfig("conf", configset("cloud-minimal")) .configure(); CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(DEFAULT_TEST_COLLECTION_NAME, "conf", 1, 1); CloudSolrClient solrClient = cluster.getSolrClient(); create.setMaxShardsPerNode(1); create.process(solrClient); cluster.waitForActiveCollection(DEFAULT_TEST_COLLECTION_NAME, 1, 1); }
Example 9
Source File: TestBlobHandler.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test public void doBlobHandlerTest() throws Exception { try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) { CollectionAdminResponse response1; CollectionAdminRequest.Create createCollectionRequest = CollectionAdminRequest.createCollection(".system",1,2); response1 = createCollectionRequest.process(client); assertEquals(0, response1.getStatus()); assertTrue(response1.isSuccess()); DocCollection sysColl = cloudClient.getZkStateReader().getClusterState().getCollection(".system"); Replica replica = sysColl.getActiveSlicesMap().values().iterator().next().getLeader(); String baseUrl = replica.getStr(ZkStateReader.BASE_URL_PROP); String url = baseUrl + "/.system/config/requestHandler"; MapWriter map = TestSolrConfigHandlerConcurrent.getAsMap(url, cloudClient); assertNotNull(map); assertEquals("solr.BlobHandler", map._get(asList( "config", "requestHandler", "/blob", "class"),null)); map = TestSolrConfigHandlerConcurrent.getAsMap(baseUrl + "/.system/schema/fields/blob", cloudClient); assertNotNull(map); assertEquals("blob", map._get(asList( "field", "name"),null)); assertEquals("bytes", map._get( asList( "field", "type"),null)); checkBlobPost(baseUrl, cloudClient); checkBlobPostMd5(baseUrl, cloudClient); } }
Example 10
Source File: SolrIndex.java From titan1withtp3.1 with Apache License 2.0 | 5 votes |
private static void createCollectionIfNotExists(CloudSolrClient client, Configuration config, String collection) throws IOException, SolrServerException, KeeperException, InterruptedException { if (!checkIfCollectionExists(client, collection)) { Integer numShards = config.get(NUM_SHARDS); Integer maxShardsPerNode = config.get(MAX_SHARDS_PER_NODE); Integer replicationFactor = config.get(REPLICATION_FACTOR); // Ideally this property used so a new configset is not uploaded for every single // index (collection) created in solr. // if a generic configSet is not set, make the configset name the same as the collection. // This was the default behavior before a default configSet could be specified String genericConfigSet = config.has(SOLR_DEFAULT_CONFIG) ? config.get(SOLR_DEFAULT_CONFIG):collection; CollectionAdminRequest.Create createRequest = new CollectionAdminRequest.Create(); createRequest.setConfigName(genericConfigSet); createRequest.setCollectionName(collection); createRequest.setNumShards(numShards); createRequest.setMaxShardsPerNode(maxShardsPerNode); createRequest.setReplicationFactor(replicationFactor); CollectionAdminResponse createResponse = createRequest.process(client); if (createResponse.isSuccess()) { logger.trace("Collection {} successfully created.", collection); } else { throw new SolrServerException(Joiner.on("\n").join(createResponse.getErrorMessages())); } } waitForRecoveriesToFinish(client, collection); }
Example 11
Source File: TestRequestForwarding.java From lucene-solr with Apache License 2.0 | 5 votes |
private void createCollection(String name, String config) throws Exception { CollectionAdminResponse response; CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(name,config,2,1); create.setMaxShardsPerNode(1); response = create.process(solrCluster.getSolrClient()); if (response.getStatus() != 0 || response.getErrorMessages() != null) { fail("Could not create collection. Response" + response.toString()); } ZkStateReader zkStateReader = solrCluster.getSolrClient().getZkStateReader(); solrCluster.waitForActiveCollection(name, 2, 2); }
Example 12
Source File: MoveReplicaTest.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test // 12-Jun-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 17-Mar-2018 This JIRA is fixed, but this test still fails //17-Aug-2018 commented @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018 // commented out on: 17-Feb-2019 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // annotated on: 24-Dec-2018 public void testFailedMove() throws Exception { String coll = getTestClass().getSimpleName() + "_failed_coll_" + inPlaceMove; int REPLICATION = 2; CloudSolrClient cloudClient = cluster.getSolrClient(); // random create tlog or pull type replicas with nrt boolean isTlog = random().nextBoolean(); CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf1", 2, 1, isTlog ? 1 : 0, !isTlog ? 1 : 0); create.setAutoAddReplicas(false); cloudClient.request(create); addDocs(coll, 100); NamedList<Object> overSeerStatus = cluster.getSolrClient().request(CollectionAdminRequest.getOverseerStatus()); String overseerLeader = (String) overSeerStatus.get("leader"); // don't kill overseer in this test Replica replica; int count = 10; do { replica = getRandomReplica(coll, cloudClient); } while (!replica.getNodeName().equals(overseerLeader) && count-- > 0); assertNotNull("could not find non-overseer replica???", replica); Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes(); ArrayList<String> l = new ArrayList<>(liveNodes); Collections.shuffle(l, random()); String targetNode = null; for (String node : liveNodes) { if (!replica.getNodeName().equals(node) && !overseerLeader.equals(node)) { targetNode = node; break; } } assertNotNull(targetNode); CollectionAdminRequest.MoveReplica moveReplica = createMoveReplicaRequest(coll, replica, targetNode); moveReplica.setInPlaceMove(inPlaceMove); // start moving String asyncId = IdUtils.randomId(); moveReplica.processAsync(asyncId, cloudClient); // shut down target node for (int i = 0; i < cluster.getJettySolrRunners().size(); i++) { if (cluster.getJettySolrRunner(i).getNodeName().equals(targetNode)) { JettySolrRunner j = cluster.stopJettySolrRunner(i); cluster.waitForJettyToStop(j); break; } } CollectionAdminRequest.RequestStatus requestStatus = CollectionAdminRequest.requestStatus(asyncId); // wait for async request success boolean success = true; for (int i = 0; i < 200; i++) { CollectionAdminRequest.RequestStatusResponse rsp = requestStatus.process(cloudClient); assertNotSame(rsp.getRequestStatus().toString(), rsp.getRequestStatus(), RequestStatusState.COMPLETED); if (rsp.getRequestStatus() == RequestStatusState.FAILED) { success = false; break; } Thread.sleep(500); } assertFalse(success); if (log.isInfoEnabled()) { log.info("--- current collection state: {}", cloudClient.getZkStateReader().getClusterState().getCollection(coll)); } assertEquals(100, cluster.getSolrClient().query(coll, new SolrQuery("*:*")).getResults().getNumFound()); }
Example 13
Source File: AddReplicaTest.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testAddMultipleReplicas() throws Exception { String collection = "testAddMultipleReplicas"; CloudSolrClient cloudClient = cluster.getSolrClient(); CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collection, "conf1", 1, 1); create.setMaxShardsPerNode(2); cloudClient.request(create); cluster.waitForActiveCollection(collection, 1, 1); CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(collection, "shard1") .setNrtReplicas(1) .setTlogReplicas(1) .setPullReplicas(1); RequestStatusState status = addReplica.processAndWait(collection + "_xyz1", cloudClient, 120); assertEquals(COMPLETED, status); cluster.waitForActiveCollection(collection, 1, 4); DocCollection docCollection = cloudClient.getZkStateReader().getClusterState().getCollectionOrNull(collection); assertNotNull(docCollection); assertEquals(4, docCollection.getReplicas().size()); assertEquals(2, docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).size()); assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).size()); assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).size()); // try to add 5 more replicas which should fail because numNodes(4)*maxShardsPerNode(2)=8 and 4 replicas already exist addReplica = CollectionAdminRequest.addReplicaToShard(collection, "shard1") .setNrtReplicas(3) .setTlogReplicas(1) .setPullReplicas(1); status = addReplica.processAndWait(collection + "_xyz1", cloudClient, 120); assertEquals(FAILED, status); docCollection = cloudClient.getZkStateReader().getClusterState().getCollectionOrNull(collection); assertNotNull(docCollection); // sanity check that everything is as before assertEquals(4, docCollection.getReplicas().size()); assertEquals(2, docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).size()); assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).size()); assertEquals(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).size()); // but adding any number of replicas is supported if an explicit create node set is specified // so test that as well LinkedHashSet<String> createNodeSet = new LinkedHashSet<>(2); createNodeSet.add(cluster.getRandomJetty(random()).getNodeName()); while (true) { String nodeName = cluster.getRandomJetty(random()).getNodeName(); if (createNodeSet.add(nodeName)) break; } addReplica = CollectionAdminRequest.addReplicaToShard(collection, "shard1") .setNrtReplicas(3) .setTlogReplicas(1) .setPullReplicas(1) .setCreateNodeSet(String.join(",", createNodeSet)); status = addReplica.processAndWait(collection + "_xyz1", cloudClient, 120); assertEquals(COMPLETED, status); waitForState("Timedout wait for collection to be created", collection, clusterShape(1, 9)); docCollection = cloudClient.getZkStateReader().getClusterState().getCollectionOrNull(collection); assertNotNull(docCollection); // sanity check that everything is as before assertEquals(9, docCollection.getReplicas().size()); assertEquals(5, docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).size()); assertEquals(2, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).size()); assertEquals(2, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).size()); }
Example 14
Source File: TestCloudSearcherWarming.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testRepFactor1LeaderStartup() throws Exception { CloudSolrClient solrClient = cluster.getSolrClient(); String collectionName = "testRepFactor1LeaderStartup"; CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, 1, 1) .setCreateNodeSet(cluster.getJettySolrRunner(0).getNodeName()); create.process(solrClient); cluster.waitForActiveCollection(collectionName, 1, 1); solrClient.setDefaultCollection(collectionName); String addListenerCommand = "{" + "'add-listener' : {'name':'newSearcherListener','event':'newSearcher', 'class':'" + SleepingSolrEventListener.class.getName() + "'}" + "'add-listener' : {'name':'firstSearcherListener','event':'firstSearcher', 'class':'" + SleepingSolrEventListener.class.getName() + "'}" + "}"; ConfigRequest request = new ConfigRequest(addListenerCommand); solrClient.request(request); solrClient.add(new SolrInputDocument("id", "1")); solrClient.commit(); AtomicInteger expectedDocs = new AtomicInteger(1); AtomicReference<String> failingCoreNodeName = new AtomicReference<>(); CollectionStateWatcher stateWatcher = createActiveReplicaSearcherWatcher(expectedDocs, failingCoreNodeName); JettySolrRunner runner = cluster.getJettySolrRunner(0); runner.stop(); cluster.waitForJettyToStop(runner); // check waitForState only after we are sure the node has shutdown and have forced an update to liveNodes // ie: workaround SOLR-13490 cluster.getSolrClient().getZkStateReader().updateLiveNodes(); waitForState("jetty count:" + cluster.getJettySolrRunners().size(), collectionName, clusterShape(1, 0)); // restart sleepTime.set(1000); runner.start(); cluster.waitForAllNodes(30); cluster.getSolrClient().getZkStateReader().registerCollectionStateWatcher(collectionName, stateWatcher); cluster.waitForActiveCollection(collectionName, 1, 1); assertNull("No replica should have been active without registering a searcher, found: " + failingCoreNodeName.get(), failingCoreNodeName.get()); cluster.getSolrClient().getZkStateReader().removeCollectionStateWatcher(collectionName, stateWatcher); }
Example 15
Source File: TestUtilizeNode.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void test() throws Exception { cluster.waitForAllNodes(5); String coll = "utilizenodecoll"; CloudSolrClient cloudClient = cluster.getSolrClient(); log.info("Creating Collection..."); CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(coll, "conf1", 2, 2) .setMaxShardsPerNode(2); cloudClient.request(create); log.info("Spinning up additional jettyX..."); JettySolrRunner jettyX = cluster.startJettySolrRunner(); cluster.waitForAllNodes(30); assertNoReplicas("jettyX should not yet be utilized: ", coll, jettyX); if (log.isInfoEnabled()) { log.info("Sending UTILIZE command for jettyX ({})", jettyX.getNodeName()); } cloudClient.request(new CollectionAdminRequest.UtilizeNode(jettyX.getNodeName())); // TODO: aparently we can't assert this? ... // // assertSomeReplicas("jettyX should now be utilized: ", coll, jettyX); // // ... it appears from the docs that unless there are policy violations, // this can be ignored unless jettyX has less "load" then other jetty instances? // // if the above is true, that means that this test is incredibly weak... // unless we know jettyX has at least one replica, then all the subsequent testing of the // port blacklist & additional UTILIZE command for jettyY are a waste of time. // // should we skip spinning up a *new* jettyX, and instead just pick an existing jetty? if (log.isInfoEnabled()) { log.info("jettyX replicas prior to being blacklisted: {}", getReplicaList(coll, jettyX)); } String setClusterPolicyCommand = "{" + " 'set-cluster-policy': [" + " {'port':" + jettyX.getLocalPort() + " , 'replica':0}" + " ]" + "}"; if (log.isInfoEnabled()) { log.info("Setting new policy to blacklist jettyX ({}) port={}", jettyX.getNodeName(), jettyX.getLocalPort()); } @SuppressWarnings({"rawtypes"}) SolrRequest req = AutoScalingRequest.create(SolrRequest.METHOD.POST, setClusterPolicyCommand); NamedList<Object> response = cloudClient.request(req); assertEquals(req + " => " + response, "success", response.get("result").toString()); log.info("Spinning up additional jettyY..."); JettySolrRunner jettyY = cluster.startJettySolrRunner(); cluster.waitForAllNodes(30); assertNoReplicas("jettyY should not yet be utilized: ", coll, jettyY); if (log.isInfoEnabled()) { log.info("jettyX replicas prior to utilizing jettyY: {}", getReplicaList(coll, jettyX)); log.info("Sending UTILIZE command for jettyY ({})", jettyY.getNodeName()); // logOk } cloudClient.request(new CollectionAdminRequest.UtilizeNode(jettyY.getNodeName())); assertSomeReplicas("jettyY should now be utilized: ", coll, jettyY); }
Example 16
Source File: AbstractCloudBackupRestoreTestCase.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testRestoreFailure() throws Exception { setTestSuffix("testfailure"); replFactor = TestUtil.nextInt(random(), 1, 2); numTlogReplicas = TestUtil.nextInt(random(), 0, 1); numPullReplicas = TestUtil.nextInt(random(), 0, 1); CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(getCollectionName(), "conf1", NUM_SHARDS, replFactor, numTlogReplicas, numPullReplicas); if (NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) > cluster.getJettySolrRunners().size()) { create.setMaxShardsPerNode((int)Math.ceil(NUM_SHARDS * (replFactor + numTlogReplicas + numPullReplicas) / cluster.getJettySolrRunners().size())); //just to assert it survives the restoration } CloudSolrClient solrClient = cluster.getSolrClient(); create.process(solrClient); indexDocs(getCollectionName(), false); String backupLocation = getBackupLocation(); String backupName = BACKUPNAME_PREFIX + testSuffix; DocCollection backupCollection = solrClient.getZkStateReader().getClusterState().getCollection(getCollectionName()); log.info("Triggering Backup command"); { CollectionAdminRequest.Backup backup = CollectionAdminRequest.backupCollection(getCollectionName(), backupName) .setLocation(backupLocation).setRepositoryName(getBackupRepoName()); assertEquals(0, backup.process(solrClient).getStatus()); } log.info("Triggering Restore command"); String restoreCollectionName = getCollectionName() + "_restored"; { CollectionAdminRequest.Restore restore = CollectionAdminRequest.restoreCollection(restoreCollectionName, backupName) .setLocation(backupLocation).setRepositoryName(getBackupRepoName()); if (backupCollection.getReplicas().size() > cluster.getJettySolrRunners().size()) { // may need to increase maxShardsPerNode (e.g. if it was shard split, then now we need more) restore.setMaxShardsPerNode((int)Math.ceil(backupCollection.getReplicas().size()/cluster.getJettySolrRunners().size())); } restore.setConfigName("confFaulty"); assertEquals(RequestStatusState.FAILED, restore.processAndWait(solrClient, 30)); assertThat("Failed collection is still in the clusterstate: " + cluster.getSolrClient().getClusterStateProvider().getClusterState().getCollectionOrNull(restoreCollectionName), CollectionAdminRequest.listCollections(solrClient), not(hasItem(restoreCollectionName))); } }
Example 17
Source File: IndexSizeTriggerTest.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testSplitConfig() throws Exception { String collectionName = "testSplitConfig_collection"; CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2).setMaxShardsPerNode(2); create.process(solrClient); CloudUtil.waitForState(cloudManager, "failed to create " + collectionName, collectionName, CloudUtil.clusterShape(2, 2, false, true)); long waitForSeconds = 3 + random().nextInt(5); Map<String, Object> props = createTriggerProps(waitForSeconds); props.put(CommonAdminParams.SPLIT_METHOD, SolrIndexSplitter.SplitMethod.REWRITE.toLower()); props.put(IndexSizeTrigger.SPLIT_BY_PREFIX, true); try (IndexSizeTrigger trigger = new IndexSizeTrigger("index_size_trigger6")) { trigger.configure(loader, cloudManager, props); trigger.init(); trigger.setProcessor(noFirstRunProcessor); trigger.run(); for (int i = 0; i < 25; i++) { SolrInputDocument doc = new SolrInputDocument("id", "id-" + i); solrClient.add(collectionName, doc); } solrClient.commit(collectionName); AtomicBoolean fired = new AtomicBoolean(false); AtomicReference<TriggerEvent> eventRef = new AtomicReference<>(); trigger.setProcessor(event -> { if (fired.compareAndSet(false, true)) { eventRef.set(event); long currentTimeNanos = timeSource.getTimeNs(); long eventTimeNanos = event.getEventTime(); long waitForNanos = TimeUnit.NANOSECONDS.convert(waitForSeconds, TimeUnit.SECONDS) - WAIT_FOR_DELTA_NANOS; if (currentTimeNanos - eventTimeNanos <= waitForNanos) { fail("processor was fired before the configured waitFor period: currentTimeNanos=" + currentTimeNanos + ", eventTimeNanos=" + eventTimeNanos + ",waitForNanos=" + waitForNanos); } } else { fail("IndexSizeTrigger was fired more than once!"); } return true; }); trigger.run(); TriggerEvent ev = eventRef.get(); // waitFor delay - should not produce any event yet assertNull("waitFor not elapsed but produced an event", ev); timeSource.sleep(TimeUnit.MILLISECONDS.convert(waitForSeconds + 1, TimeUnit.SECONDS)); trigger.run(); ev = eventRef.get(); assertNotNull("should have fired an event", ev); @SuppressWarnings({"unchecked"}) List<TriggerEvent.Op> ops = (List<TriggerEvent.Op>) ev.getProperty(TriggerEvent.REQUESTED_OPS); assertNotNull("should contain requestedOps", ops); assertEquals("number of ops: " + ops, 2, ops.size()); boolean shard1 = false; boolean shard2 = false; for (TriggerEvent.Op op : ops) { assertEquals(CollectionParams.CollectionAction.SPLITSHARD, op.getAction()); @SuppressWarnings({"unchecked"}) Set<Pair<String, String>> hints = (Set<Pair<String, String>>)op.getHints().get(Suggester.Hint.COLL_SHARD); assertNotNull("hints", hints); assertEquals("hints", 1, hints.size()); Pair<String, String> p = hints.iterator().next(); assertEquals(collectionName, p.first()); if (p.second().equals("shard1")) { shard1 = true; } else if (p.second().equals("shard2")) { shard2 = true; } else { fail("unexpected shard name " + p.second()); } @SuppressWarnings({"unchecked"}) Map<String, Object> params = (Map<String, Object>)op.getHints().get(Suggester.Hint.PARAMS); assertNotNull("params are null: " + op, params); // verify overrides for split config assertEquals("splitMethod: " + op, SolrIndexSplitter.SplitMethod.REWRITE.toLower(), params.get(CommonAdminParams.SPLIT_METHOD)); assertEquals("splitByPrefix: " + op, true, params.get(CommonAdminParams.SPLIT_BY_PREFIX)); } assertTrue("shard1 should be split", shard1); assertTrue("shard2 should be split", shard2); } }
Example 18
Source File: IndexSizeTriggerTest.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testTrigger() throws Exception { String collectionName = "testTrigger_collection"; CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2).setMaxShardsPerNode(2); create.process(solrClient); if (SPEED == 1) { cluster.waitForActiveCollection(collectionName, 2, 4); } else { CloudUtil.waitForState(cloudManager, "failed to create " + collectionName, collectionName, CloudUtil.clusterShape(2, 2, false, true)); } long waitForSeconds = 3 + random().nextInt(5); Map<String, Object> props = createTriggerProps(waitForSeconds); try (IndexSizeTrigger trigger = new IndexSizeTrigger("index_size_trigger1")) { trigger.configure(loader, cloudManager, props); trigger.init(); trigger.setProcessor(noFirstRunProcessor); trigger.run(); for (int i = 0; i < 25; i++) { SolrInputDocument doc = new SolrInputDocument("id", "id-" + i); solrClient.add(collectionName, doc); } solrClient.commit(collectionName); AtomicBoolean fired = new AtomicBoolean(false); AtomicReference<TriggerEvent> eventRef = new AtomicReference<>(); trigger.setProcessor(event -> { if (fired.compareAndSet(false, true)) { eventRef.set(event); long currentTimeNanos = timeSource.getTimeNs(); long eventTimeNanos = event.getEventTime(); long waitForNanos = TimeUnit.NANOSECONDS.convert(waitForSeconds, TimeUnit.SECONDS) - WAIT_FOR_DELTA_NANOS; if (currentTimeNanos - eventTimeNanos <= waitForNanos) { fail("processor was fired before the configured waitFor period: currentTimeNanos=" + currentTimeNanos + ", eventTimeNanos=" + eventTimeNanos + ",waitForNanos=" + waitForNanos); } } else { fail("IndexSizeTrigger was fired more than once!"); } return true; }); trigger.run(); TriggerEvent ev = eventRef.get(); // waitFor delay - should not produce any event yet assertNull("waitFor not elapsed but produced an event", ev); timeSource.sleep(TimeUnit.MILLISECONDS.convert(waitForSeconds + 1, TimeUnit.SECONDS)); trigger.run(); ev = eventRef.get(); assertNotNull("should have fired an event", ev); @SuppressWarnings({"unchecked"}) List<TriggerEvent.Op> ops = (List<TriggerEvent.Op>) ev.getProperty(TriggerEvent.REQUESTED_OPS); assertNotNull("should contain requestedOps", ops); assertEquals("number of ops: " + ops, 2, ops.size()); boolean shard1 = false; boolean shard2 = false; for (TriggerEvent.Op op : ops) { assertEquals(CollectionParams.CollectionAction.SPLITSHARD, op.getAction()); @SuppressWarnings({"unchecked"}) Set<Pair<String, String>> hints = (Set<Pair<String, String>>)op.getHints().get(Suggester.Hint.COLL_SHARD); assertNotNull("hints", hints); assertEquals("hints", 1, hints.size()); Pair<String, String> p = hints.iterator().next(); assertEquals(collectionName, p.first()); if (p.second().equals("shard1")) { shard1 = true; } else if (p.second().equals("shard2")) { shard2 = true; } else { fail("unexpected shard name " + p.second()); } @SuppressWarnings({"unchecked"}) Map<String, Object> params = (Map<String, Object>)op.getHints().get(Suggester.Hint.PARAMS); assertNotNull("params are null: " + op, params); // verify default split configs assertEquals("splitMethod: " + op, SolrIndexSplitter.SplitMethod.LINK.toLower(), params.get(CommonAdminParams.SPLIT_METHOD)); assertEquals("splitByPrefix: " + op, false, params.get(CommonAdminParams.SPLIT_BY_PREFIX)); } assertTrue("shard1 should be split", shard1); assertTrue("shard2 should be split", shard2); } }
Example 19
Source File: TestCollectionAPI.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test @ShardsFixed(num = 2) public void test() throws Exception { try (CloudSolrClient client = createCloudClient(null)) { CollectionAdminRequest.Create req; if (useTlogReplicas()) { req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 0, 1, 1); } else { req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf1",2, 1, 0, 1); } req.setMaxShardsPerNode(2); setV2(req); client.request(req); assertV2CallsCount(); createCollection(null, COLLECTION_NAME1, 1, 1, 1, client, null, "conf1"); } waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME, 2); waitForCollection(cloudClient.getZkStateReader(), COLLECTION_NAME1, 1); waitForRecoveriesToFinish(COLLECTION_NAME, false); waitForRecoveriesToFinish(COLLECTION_NAME1, false); listCollection(); clusterStatusNoCollection(); clusterStatusWithCollection(); clusterStatusWithCollectionAndShard(); clusterStatusWithCollectionAndMultipleShards(); clusterStatusWithRouteKey(); clusterStatusAliasTest(); clusterStatusRolesTest(); clusterStatusBadCollectionTest(); replicaPropTest(); clusterStatusZNodeVersion(); testCollectionCreationCollectionNameValidation(); testCollectionCreationTooManyShards(); testReplicationFactorValidaton(); testCollectionCreationShardNameValidation(); testAliasCreationNameValidation(); testShardCreationNameValidation(); testNoConfigset(); testModifyCollection(); // deletes replicationFactor property from collections, be careful adding new tests after this one! }
Example 20
Source File: TestSimLargeCluster.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test @SuppressWarnings({"unchecked"}) public void testSearchRate() throws Exception { SolrClient solrClient = cluster.simGetSolrClient(); String collectionName = "testSearchRate"; CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf", 2, 10); create.process(solrClient); if (log.isInfoEnabled()) { log.info("Ready after {} ms", CloudUtil.waitForState(cluster, collectionName, 300, TimeUnit.SECONDS, CloudUtil.clusterShape(2, 10, false, true))); } // collect the node names for shard1 Set<String> nodes = new HashSet<>(); cluster.getSimClusterStateProvider().getClusterState().getCollection(collectionName) .getSlice("shard1") .getReplicas() .forEach(r -> nodes.add(r.getNodeName())); String metricName = "QUERY./select.requestTimes:1minRate"; // simulate search traffic cluster.getSimClusterStateProvider().simSetShardValue(collectionName, "shard1", metricName, 40, false, true); // now define the trigger. doing it earlier may cause partial events to be generated (where only some // nodes / replicas exceeded the threshold). assertAutoScalingRequest ( "{" + "'set-trigger' : {" + "'name' : 'search_rate_trigger'," + "'event' : 'searchRate'," + "'waitFor' : '" + waitForSeconds + "s'," + "'aboveRate' : 1.0," + "'aboveNodeRate' : 1.0," + "'enabled' : true," + "'actions' : [" + "{'name':'compute','class':'" + ComputePlanAction.class.getName() + "'}," + "{'name':'execute','class':'" + ExecutePlanAction.class.getName() + "'}," + "{'name':'test','class':'" + FinishTriggerAction.class.getName() + "'}" + "]" + "}}"); // we're going to expect our trigger listener to process exactly one captured event listenerEventLatch = new CountDownLatch(1); assertAutoScalingRequest ( "{" + "'set-listener' : " + "{" + "'name' : 'srt'," + "'trigger' : 'search_rate_trigger'," + "'stage' : ['FAILED','SUCCEEDED']," + "'class' : '" + TestTriggerListener.class.getName() + "'" + "}" + "}"); assertAutoscalingUpdateComplete(); assertTrue("Trigger did not finish even after await()ing an excessive amount of time", triggerFinishedLatch.await(60, TimeUnit.SECONDS)); assertTrue("The listener didn't record the event even after await()ing an excessive amount of time", listenerEventLatch.await(60, TimeUnit.SECONDS)); List<CapturedEvent> events = listenerEvents.get("srt"); assertNotNull("no srt events: " + listenerEvents.toString(), events); assertEquals(events.toString(), 1, events.size()); CapturedEvent ev = events.get(0); assertEquals(TriggerEventType.SEARCHRATE, ev.event.getEventType()); Map<String, Number> m = (Map<String, Number>)ev.event.getProperty(SearchRateTrigger.HOT_NODES); assertNotNull(m); assertEquals(nodes.size(), m.size()); assertEquals(nodes, m.keySet()); m.forEach((k, v) -> assertEquals(4.0, v.doubleValue(), 0.01)); List<TriggerEvent.Op> ops = (List<TriggerEvent.Op>)ev.event.getProperty(TriggerEvent.REQUESTED_OPS); assertNotNull(ops); assertEquals(ops.toString(), 1, ops.size()); ops.forEach(op -> { assertEquals(CollectionParams.CollectionAction.ADDREPLICA, op.getAction()); assertEquals(1, op.getHints().size()); Object o = op.getHints().get(Suggester.Hint.COLL_SHARD); // this may be a pair or a HashSet of pairs with size 1 Pair<String, String> hint = null; if (o instanceof Pair) { hint = (Pair<String, String>)o; } else if (o instanceof Set) { assertEquals("unexpected number of hints: " + o, 1, ((Set)o).size()); o = ((Set)o).iterator().next(); assertTrue("unexpected hint: " + o, o instanceof Pair); hint = (Pair<String, String>)o; } else { fail("unexpected hints: " + o); } assertNotNull(hint); assertEquals(collectionName, hint.first()); assertEquals("shard1", hint.second()); }); }