Java Code Examples for org.apache.solr.client.solrj.embedded.JettySolrRunner#getCoreContainer()
The following examples show how to use
org.apache.solr.client.solrj.embedded.JettySolrRunner#getCoreContainer() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestTlogReplica.java From lucene-solr with Apache License 2.0 | 6 votes |
private List<SolrCore> getSolrCore(boolean isLeader) { List<SolrCore> rs = new ArrayList<>(); CloudSolrClient cloudClient = cluster.getSolrClient(); DocCollection docCollection = cloudClient.getZkStateReader().getClusterState().getCollection(collectionName); for (JettySolrRunner solrRunner : cluster.getJettySolrRunners()) { if (solrRunner.getCoreContainer() == null) continue; for (SolrCore solrCore : solrRunner.getCoreContainer().getCores()) { CloudDescriptor cloudDescriptor = solrCore.getCoreDescriptor().getCloudDescriptor(); Slice slice = docCollection.getSlice(cloudDescriptor.getShardId()); Replica replica = docCollection.getReplica(cloudDescriptor.getCoreNodeName()); if (slice.getLeader().equals(replica) && isLeader) { rs.add(solrCore); } else if (!slice.getLeader().equals(replica) && !isLeader) { rs.add(solrCore); } } } return rs; }
Example 2
Source File: TestTlogReplica.java From lucene-solr with Apache License 2.0 | 6 votes |
private List<JettySolrRunner> getSolrRunner(boolean isLeader) { List<JettySolrRunner> rs = new ArrayList<>(); CloudSolrClient cloudClient = cluster.getSolrClient(); DocCollection docCollection = cloudClient.getZkStateReader().getClusterState().getCollection(collectionName); for (JettySolrRunner solrRunner : cluster.getJettySolrRunners()) { if (solrRunner.getCoreContainer() == null) continue; for (SolrCore solrCore : solrRunner.getCoreContainer().getCores()) { CloudDescriptor cloudDescriptor = solrCore.getCoreDescriptor().getCloudDescriptor(); Slice slice = docCollection.getSlice(cloudDescriptor.getShardId()); Replica replica = docCollection.getReplica(cloudDescriptor.getCoreNodeName()); if (slice.getLeader() == replica && isLeader) { rs.add(solrRunner); } else if (slice.getLeader() != replica && !isLeader) { rs.add(solrRunner); } } } return rs; }
Example 3
Source File: SolrITInitializer.java From SearchServices with GNU Lesser General Public License v3.0 | 5 votes |
public static void initSingleSolrServer(String testClassName, Properties solrcoreProperties) throws Throwable { initSolrServers(0,testClassName,solrcoreProperties); JettySolrRunner jsr = jettyContainers.get(testClassName); CoreContainer coreContainer = jsr.getCoreContainer(); AlfrescoCoreAdminHandler coreAdminHandler = (AlfrescoCoreAdminHandler) coreContainer.getMultiCoreHandler(); assertNotNull(coreAdminHandler); String[] extras = null; if ((solrcoreProperties != null) && !solrcoreProperties.isEmpty()) { int i = 0; extras = new String[solrcoreProperties.size()*2]; for (Map.Entry<Object, Object> prop:solrcoreProperties.entrySet()) { extras[i++] = "property."+prop.getKey(); extras[i++] = (String) prop.getValue(); } } defaultCore = createCoreUsingTemplate(coreContainer, coreAdminHandler, "alfresco", "rerank", 1, 1, extras); assertNotNull(defaultCore); String url = buildUrl(jsr.getLocalPort()) + "/" + "alfresco"; SolrClient standaloneClient = createNewSolrClient(url); assertNotNull(standaloneClient); solrCollectionNameToStandaloneClient.put("alfresco", standaloneClient); }
Example 4
Source File: AbstractAlfrescoDistributedIT.java From SearchServices with GNU Lesser General Public License v3.0 | 5 votes |
protected static List<AlfrescoCoreAdminHandler> getAdminHandlers(Collection<JettySolrRunner> runners) { List<AlfrescoCoreAdminHandler> coreAdminHandlers = new ArrayList<>(); for (JettySolrRunner jettySolrRunner : runners) { CoreContainer coreContainer = jettySolrRunner.getCoreContainer(); AlfrescoCoreAdminHandler coreAdminHandler = (AlfrescoCoreAdminHandler) coreContainer.getMultiCoreHandler(); coreAdminHandlers.add(coreAdminHandler); } return coreAdminHandlers; }
Example 5
Source File: ChaosMonkey.java From lucene-solr with Apache License 2.0 | 5 votes |
public void expireSession(final JettySolrRunner jetty) { CoreContainer cores = jetty.getCoreContainer(); if (cores != null) { monkeyLog("expire session for " + jetty.getLocalPort() + " !"); causeConnectionLoss(jetty); long sessionId = cores.getZkController().getZkClient() .getSolrZooKeeper().getSessionId(); zkServer.expire(sessionId); } }
Example 6
Source File: ChaosMonkey.java From lucene-solr with Apache License 2.0 | 5 votes |
public static void causeConnectionLoss(JettySolrRunner jetty) { CoreContainer cores = jetty.getCoreContainer(); if (cores != null) { monkeyLog("Will cause connection loss on " + jetty.getLocalPort()); SolrZkClient zkClient = cores.getZkController().getZkClient(); zkClient.getSolrZooKeeper().closeCnxn(); } }
Example 7
Source File: MiniSolrCloudCluster.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Make the zookeeper session on a particular jetty expire */ public void expireZkSession(JettySolrRunner jetty) { CoreContainer cores = jetty.getCoreContainer(); if (cores != null) { SolrZkClient zkClient = cores.getZkController().getZkClient(); zkClient.getSolrZooKeeper().closeCnxn(); long sessionId = zkClient.getSolrZooKeeper().getSessionId(); zkServer.expire(sessionId); if (log.isInfoEnabled()) { log.info("Expired zookeeper session {} from node {}", sessionId, jetty.getBaseUrl()); } } }
Example 8
Source File: MiniSolrCloudCluster.java From lucene-solr with Apache License 2.0 | 5 votes |
public Overseer getOpenOverseer() { List<Overseer> overseers = new ArrayList<>(); for (int i = 0; i < jettys.size(); i++) { JettySolrRunner runner = getJettySolrRunner(i); if (runner.getCoreContainer() != null) { overseers.add(runner.getCoreContainer().getZkController().getOverseer()); } } return getOpenOverseer(overseers); }
Example 9
Source File: TestReplicationHandler.java From lucene-solr with Apache License 2.0 | 5 votes |
private void checkForSingleIndex(JettySolrRunner jetty, boolean afterReload) { CoreContainer cores = jetty.getCoreContainer(); Collection<SolrCore> theCores = cores.getCores(); for (SolrCore core : theCores) { String ddir = core.getDataDir(); CachingDirectoryFactory dirFactory = getCachingDirectoryFactory(core); synchronized (dirFactory) { Set<String> livePaths = dirFactory.getLivePaths(); // one for data, one for the index under data and one for the snapshot metadata. // we also allow one extra index dir - it may not be removed until the core is closed if (afterReload) { assertTrue(livePaths.toString() + ":" + livePaths.size(), 3 == livePaths.size() || 4 == livePaths.size()); } else { assertTrue(livePaths.toString() + ":" + livePaths.size(), 3 == livePaths.size()); } // :TODO: assert that one of the paths is a subpath of hte other } if (dirFactory instanceof StandardDirectoryFactory) { System.out.println(Arrays.asList(new File(ddir).list())); // we also allow one extra index dir - it may not be removed until the core is closed int cnt = indexDirCount(ddir); // if after reload, there may be 2 index dirs while the reloaded SolrCore closes. if (afterReload) { assertTrue("found:" + cnt + Arrays.asList(new File(ddir).list()).toString(), 1 == cnt || 2 == cnt); } else { assertTrue("found:" + cnt + Arrays.asList(new File(ddir).list()).toString(), 1 == cnt); } } } }
Example 10
Source File: CollectionsAPIDistributedZkTest.java From lucene-solr with Apache License 2.0 | 5 votes |
private void checkInstanceDirs(JettySolrRunner jetty) throws IOException { CoreContainer cores = jetty.getCoreContainer(); Collection<SolrCore> theCores = cores.getCores(); for (SolrCore core : theCores) { // look for core props file Path instancedir = core.getInstancePath(); assertTrue("Could not find expected core.properties file", Files.exists(instancedir.resolve("core.properties"))); Path expected = Paths.get(jetty.getSolrHome()).toAbsolutePath().resolve(core.getName()); assertTrue("Expected: " + expected + "\nFrom core stats: " + instancedir, Files.isSameFile(expected, instancedir)); } }
Example 11
Source File: HttpPartitionTest.java From lucene-solr with Apache License 2.0 | 4 votes |
protected void testRf2() throws Exception { // create a collection that has 1 shard but 2 replicas String testCollectionName = "c8n_1x2"; createCollectionRetry(testCollectionName, "conf1", 1, 2, 1); cloudClient.setDefaultCollection(testCollectionName); sendDoc(1); Replica notLeader = ensureAllReplicasAreActive(testCollectionName, "shard1", 1, 2, maxWaitSecsToSeeAllActive).get(0); JettySolrRunner leaderJetty = getJettyOnPort(getReplicaPort(getShardLeader(testCollectionName, "shard1", 1000))); // ok, now introduce a network partition between the leader and the replica SocketProxy proxy = getProxyForReplica(notLeader); SocketProxy leaderProxy = getProxyForReplica(getShardLeader(testCollectionName, "shard1", 1000)); proxy.close(); leaderProxy.close(); // indexing during a partition sendDoc(2, null, leaderJetty); // replica should publish itself as DOWN if the network is not healed after some amount time waitForState(testCollectionName, notLeader.getName(), DOWN, 10000); proxy.reopen(); leaderProxy.reopen(); List<Replica> notLeaders = ensureAllReplicasAreActive(testCollectionName, "shard1", 1, 2, maxWaitSecsToSeeAllActive); int achievedRf = sendDoc(3); if (achievedRf == 1) { // this case can happen when leader reuse an connection get established before network partition // TODO: Remove when SOLR-11776 get committed ensureAllReplicasAreActive(testCollectionName, "shard1", 1, 2, maxWaitSecsToSeeAllActive); } // sent 3 docs in so far, verify they are on the leader and replica assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, 3); // Get the max version from the replica core to make sure it gets updated after recovery (see SOLR-7625) JettySolrRunner replicaJetty = getJettyOnPort(getReplicaPort(notLeader)); CoreContainer coreContainer = replicaJetty.getCoreContainer(); ZkCoreNodeProps replicaCoreNodeProps = new ZkCoreNodeProps(notLeader); String coreName = replicaCoreNodeProps.getCoreName(); Long maxVersionBefore = null; try (SolrCore core = coreContainer.getCore(coreName)) { assertNotNull("Core '"+coreName+"' not found for replica: "+notLeader.getName(), core); UpdateLog ulog = core.getUpdateHandler().getUpdateLog(); maxVersionBefore = ulog.getCurrentMaxVersion(); } assertNotNull("max version bucket seed not set for core " + coreName, maxVersionBefore); log.info("Looked up max version bucket seed {} for core {}", maxVersionBefore, coreName); // now up the stakes and do more docs int numDocs = TEST_NIGHTLY ? 1000 : 105; boolean hasPartition = false; for (int d = 0; d < numDocs; d++) { // create / restore partition every 100 docs if (d % 10 == 0) { if (hasPartition) { proxy.reopen(); leaderProxy.reopen(); hasPartition = false; } else { if (d >= 10) { proxy.close(); leaderProxy.close(); hasPartition = true; Thread.sleep(sleepMsBeforeHealPartition); } } } // always send doc directly to leader without going through proxy sendDoc(d + 4, null, leaderJetty); // 4 is offset as we've already indexed 1-3 } // restore connectivity if lost if (hasPartition) { proxy.reopen(); leaderProxy.reopen(); } notLeaders = ensureAllReplicasAreActive(testCollectionName, "shard1", 1, 2, maxWaitSecsToSeeAllActive); try (SolrCore core = coreContainer.getCore(coreName)) { assertNotNull("Core '" + coreName + "' not found for replica: " + notLeader.getName(), core); Long currentMaxVersion = core.getUpdateHandler().getUpdateLog().getCurrentMaxVersion(); log.info("After recovery, looked up NEW max version bucket seed {} for core {}, was: {}" , currentMaxVersion, coreName, maxVersionBefore); assertTrue("max version bucket seed not updated after recovery!", currentMaxVersion > maxVersionBefore); } // verify all docs received assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, numDocs + 3); log.info("testRf2 succeeded ... deleting the {} collection", testCollectionName); // try to clean up attemptCollectionDelete(cloudClient, testCollectionName); }
Example 12
Source File: TestRandomRequestDistribution.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Asserts that requests aren't always sent to the same poor node. See SOLR-7493 */ private void testRequestTracking() throws Exception { CollectionAdminRequest.createCollection("a1x2", "conf1", 1, 2) .setCreateNodeSet(nodeNames.get(0) + ',' + nodeNames.get(1)) .process(cloudClient); CollectionAdminRequest.createCollection("b1x1", "conf1", 1, 1) .setCreateNodeSet(nodeNames.get(2)) .process(cloudClient); waitForRecoveriesToFinish("a1x2", true); waitForRecoveriesToFinish("b1x1", true); cloudClient.getZkStateReader().forceUpdateCollection("b1x1"); // get direct access to the metrics counters for each core/replica we're interested to monitor them final Map<String,Counter> counters = new LinkedHashMap<>(); for (JettySolrRunner runner : jettys) { CoreContainer container = runner.getCoreContainer(); SolrMetricManager metricManager = container.getMetricManager(); for (SolrCore core : container.getCores()) { if ("a1x2".equals(core.getCoreDescriptor().getCollectionName())) { String registry = core.getCoreMetricManager().getRegistryName(); Counter cnt = metricManager.counter(null, registry, "requests", "QUERY./select"); // sanity check assertEquals(core.getName() + " has already received some requests?", 0, cnt.getCount()); counters.put(core.getName(), cnt); } } } assertEquals("Sanity Check: we know there should be 2 replicas", 2, counters.size()); // send queries to the node that doesn't host any core/replica and see where it routes them ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); DocCollection b1x1 = clusterState.getCollection("b1x1"); Collection<Replica> replicas = b1x1.getSlice("shard1").getReplicas(); assertEquals(1, replicas.size()); String baseUrl = replicas.iterator().next().getStr(ZkStateReader.BASE_URL_PROP); if (!baseUrl.endsWith("/")) baseUrl += "/"; try (HttpSolrClient client = getHttpSolrClient(baseUrl + "a1x2", 2000, 5000)) { long expectedTotalRequests = 0; Set<String> uniqueCoreNames = new LinkedHashSet<>(); log.info("Making requests to {} a1x2", baseUrl); while (uniqueCoreNames.size() < counters.keySet().size() && expectedTotalRequests < 1000L) { expectedTotalRequests++; client.query(new SolrQuery("*:*")); long actualTotalRequests = 0; for (Map.Entry<String,Counter> e : counters.entrySet()) { final long coreCount = e.getValue().getCount(); actualTotalRequests += coreCount; if (0 < coreCount) { uniqueCoreNames.add(e.getKey()); } } assertEquals("Sanity Check: Num Queries So Far Doesn't Match Total????", expectedTotalRequests, actualTotalRequests); } log.info("Total requests: {}", expectedTotalRequests); assertEquals("either request randomization code is broken of this test seed is really unlucky, " + "Gave up waiting for requests to hit every core at least once after " + expectedTotalRequests + " requests", uniqueCoreNames.size(), counters.size()); } }