Java Code Examples for org.apache.solr.common.cloud.ClusterState#getCollection()
The following examples show how to use
org.apache.solr.common.cloud.ClusterState#getCollection() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RebalanceLeaders.java From lucene-solr with Apache License 2.0 | 6 votes |
private void checkLeaderStatus() throws InterruptedException, KeeperException { for (int idx = 0; pendingOps.size() > 0 && idx < 600; ++idx) { ClusterState clusterState = coreContainer.getZkController().getClusterState(); Set<String> liveNodes = clusterState.getLiveNodes(); DocCollection dc = clusterState.getCollection(collectionName); for (Slice slice : dc.getSlices()) { for (Replica replica : slice.getReplicas()) { if (replica.isActive(liveNodes) && replica.getBool(SliceMutator.PREFERRED_LEADER_PROP, false)) { if (replica.getBool(LEADER_PROP, false)) { if (pendingOps.containsKey(slice.getName())) { // Record for return that the leader changed successfully pendingOps.remove(slice.getName()); addToSuccesses(slice, replica); break; } } } } } TimeUnit.MILLISECONDS.sleep(100); coreContainer.getZkController().getZkStateReader().forciblyRefreshAllClusterStateSlow(); } addAnyFailures(); }
Example 2
Source File: DeleteNodeCmd.java From lucene-solr with Apache License 2.0 | 6 votes |
static List<String> verifyReplicaAvailability(List<ZkNodeProps> sourceReplicas, ClusterState state) { List<String> res = new ArrayList<>(); for (ZkNodeProps sourceReplica : sourceReplicas) { String coll = sourceReplica.getStr(COLLECTION_PROP); String shard = sourceReplica.getStr(SHARD_ID_PROP); String replicaName = sourceReplica.getStr(ZkStateReader.REPLICA_PROP); DocCollection collection = state.getCollection(coll); Slice slice = collection.getSlice(shard); if (slice.getReplicas().size() < 2) { // can't delete the only replica in existence res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE)); } else { // check replica types int otherNonPullReplicas = 0; for (Replica r : slice.getReplicas()) { if (!r.getName().equals(replicaName) && !r.getType().equals(Replica.Type.PULL)) { otherNonPullReplicas++; } } // can't delete - there are no other non-pull replicas if (otherNonPullReplicas == 0) { res.add(coll + "/" + shard + "/" + replicaName + ", type=" + sourceReplica.getStr(ZkStateReader.REPLICA_TYPE)); } } } return res; }
Example 3
Source File: SolrCore.java From lucene-solr with Apache License 2.0 | 6 votes |
/** * Set UpdateLog to buffer updates if the slice is in construction. */ private void bufferUpdatesIfConstructing(CoreDescriptor coreDescriptor) { if (coreContainer != null && coreContainer.isZooKeeperAware()) { if (reqHandlers.get("/get") == null) { log.warn("WARNING: RealTimeGetHandler is not registered at /get. SolrCloud will always use full index replication instead of the more efficient PeerSync method."); } // ZK pre-register would have already happened so we read slice properties now final ClusterState clusterState = coreContainer.getZkController().getClusterState(); final DocCollection collection = clusterState.getCollection(coreDescriptor.getCloudDescriptor().getCollectionName()); final Slice slice = collection.getSlice(coreDescriptor.getCloudDescriptor().getShardId()); if (slice.getState() == Slice.State.CONSTRUCTION) { // set update log to buffer before publishing the core getUpdateHandler().getUpdateLog().bufferUpdates(); } } }
Example 4
Source File: SliceMutator.java From lucene-solr with Apache License 2.0 | 5 votes |
public ZkWriteCommand removeRoutingRule(final ClusterState clusterState, ZkNodeProps message) { String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP); if (!checkCollectionKeyExistence(message)) return ZkStateWriter.NO_OP; String shard = message.getStr(ZkStateReader.SHARD_ID_PROP); String routeKeyStr = message.getStr("routeKey"); log.info("Overseer.removeRoutingRule invoked for collection: {} shard: {} routeKey: {}" , collectionName, shard, routeKeyStr); DocCollection collection = clusterState.getCollection(collectionName); Slice slice = collection.getSlice(shard); if (slice == null) { log.warn("Unknown collection: {} shard: {}", collectionName, shard); return ZkStateWriter.NO_OP; } Map<String, RoutingRule> routingRules = slice.getRoutingRules(); if (routingRules != null) { routingRules.remove(routeKeyStr); // no rules left Map<String, Object> props = slice.shallowCopy(); props.put("routingRules", routingRules); Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props,collectionName); return new ZkWriteCommand(collectionName, CollectionMutator.updateSlice(collectionName, collection, newSlice)); } return ZkStateWriter.NO_OP; }
Example 5
Source File: CdcrUpdateLogSynchronizer.java From lucene-solr with Apache License 2.0 | 5 votes |
private String getLeaderUrl() { ZkController zkController = core.getCoreContainer().getZkController(); ClusterState cstate = zkController.getClusterState(); DocCollection docCollection = cstate.getCollection(collection); ZkNodeProps leaderProps = docCollection.getLeader(shardId); if (leaderProps == null) { // we might not have a leader yet, returns null return null; } ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps); return nodeProps.getCoreUrl(); }
Example 6
Source File: ReplicaMutator.java From lucene-solr with Apache License 2.0 | 5 votes |
public ZkWriteCommand deleteReplicaProperty(ClusterState clusterState, ZkNodeProps message) { if (checkKeyExistence(message, ZkStateReader.COLLECTION_PROP) == false || checkKeyExistence(message, ZkStateReader.SHARD_ID_PROP) == false || checkKeyExistence(message, ZkStateReader.REPLICA_PROP) == false || checkKeyExistence(message, ZkStateReader.PROPERTY_PROP) == false) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Overseer DELETEREPLICAPROP requires " + ZkStateReader.COLLECTION_PROP + " and " + ZkStateReader.SHARD_ID_PROP + " and " + ZkStateReader.REPLICA_PROP + " and " + ZkStateReader.PROPERTY_PROP + " no action taken."); } String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP); String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP); String replicaName = message.getStr(ZkStateReader.REPLICA_PROP); String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT); if (StringUtils.startsWith(property, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) { property = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + property; } DocCollection collection = clusterState.getCollection(collectionName); Replica replica = collection.getReplica(replicaName); if (replica == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Could not find collection/slice/replica " + collectionName + "/" + sliceName + "/" + replicaName + " no action taken."); } log.info("Deleting property {} for collection: {} slice: {} replica: {}", property, collectionName, sliceName, replicaName); log.debug("Full message: {}", message); String curProp = replica.getStr(property); if (curProp == null) return ZkStateWriter.NO_OP; // not there anyway, nothing to do. Slice slice = collection.getSlice(sliceName); DocCollection newCollection = SliceMutator.updateReplica(collection, slice, replicaName, unsetProperty(replica, property)); return new ZkWriteCommand(collectionName, newCollection); }
Example 7
Source File: SimClusterStateProvider.java From lucene-solr with Apache License 2.0 | 5 votes |
public QueryResponse simQuery(QueryRequest req) throws SolrException, InterruptedException, IOException { ensureNotClosed(); String collection = req.getCollection(); if (collection == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection not set"); } ensureSystemCollection(collection); if (!colShardReplicaMap.containsKey(collection)) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection does not exist"); } String query = req.getParams().get(CommonParams.Q); if (query == null || !query.equals("*:*")) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Only '*:*' query is supported"); } ClusterState clusterState = getClusterState(); DocCollection coll = clusterState.getCollection(collection); AtomicLong count = new AtomicLong(); for (Slice s : coll.getActiveSlicesArr()) { Replica r = s.getLeader(); if (r == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, collection + "/" + s.getName() + " has no leader"); } ReplicaInfo ri = getReplicaInfo(r); Number numDocs = (Number)ri.getVariable("SEARCHER.searcher.numDocs", 0L); count.addAndGet(numDocs.longValue()); AtomicLong bufferedUpdates = (AtomicLong)sliceProperties.get(collection).get(s.getName()).get(BUFFERED_UPDATES); if (bufferedUpdates != null) { count.addAndGet(bufferedUpdates.get()); } } QueryResponse rsp = new QueryResponse(); NamedList<Object> values = new NamedList<>(); values.add("responseHeader", new NamedList<>()); SolrDocumentList docs = new SolrDocumentList(); docs.setNumFound(count.get()); values.add("response", docs); rsp.setResponse(values); return rsp; }
Example 8
Source File: LeaderFailureAfterFreshStartTest.java From lucene-solr with Apache License 2.0 | 5 votes |
private void waitTillNodesActive() throws Exception { for (int i = 0; i < 60; i++) { Thread.sleep(3000); ZkStateReader zkStateReader = cloudClient.getZkStateReader(); ClusterState clusterState = zkStateReader.getClusterState(); DocCollection collection1 = clusterState.getCollection("collection1"); Slice slice = collection1.getSlice("shard1"); Collection<Replica> replicas = slice.getReplicas(); boolean allActive = true; Collection<String> nodesDownNames = nodesDown.stream() .map(n -> n.coreNodeName) .collect(Collectors.toList()); Collection<Replica> replicasToCheck = null; replicasToCheck = replicas.stream() .filter(r -> !nodesDownNames.contains(r.getName())) .collect(Collectors.toList()); for (Replica replica : replicasToCheck) { if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) { allActive = false; break; } } if (allActive) { return; } } printLayout(); fail("timeout waiting to see all nodes active"); }
Example 9
Source File: ShardSplitTest.java From lucene-solr with Apache License 2.0 | 5 votes |
private void doSplitMixedReplicaTypes(SolrIndexSplitter.SplitMethod splitMethod) throws Exception { waitForThingsToLevelOut(15, TimeUnit.SECONDS); String collectionName = "testSplitMixedReplicaTypes_" + splitMethod.toLower(); CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1", 1, 2, 0, 2); // TODO tlog replicas disabled right now. create.setMaxShardsPerNode(5); // some high number so we can create replicas without hindrance create.process(cloudClient); cloudClient.waitForState(collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(1, 4)); waitForRecoveriesToFinish(collectionName, false); for (int i = 0; i < 100; i++) { cloudClient.add(collectionName, getDoc("id", "id-" + i, "foo_s", "bar " + i)); } cloudClient.commit(collectionName); CollectionAdminRequest.SplitShard splitShard = CollectionAdminRequest.splitShard(collectionName); splitShard.setShardName(SHARD1); splitShard.setSplitMethod(splitMethod.toLower()); CollectionAdminResponse rsp = splitShard.process(cloudClient); waitForThingsToLevelOut(30, TimeUnit.SECONDS); cloudClient.waitForState(collectionName, 30, TimeUnit.SECONDS, SolrCloudTestCase.activeClusterShape(2, 12)); cloudClient.getZkStateReader().forceUpdateCollection(collectionName); ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); DocCollection coll = clusterState.getCollection(collectionName); log.info("coll: {}", coll); // verify the original shard verifyShard(coll, SHARD1, Slice.State.INACTIVE, 2, 0, 2); // verify new sub-shards verifyShard(coll, SHARD1_0, Slice.State.ACTIVE, 2, 0, 2); verifyShard(coll, SHARD1_1, Slice.State.ACTIVE, 2, 0, 2); }
Example 10
Source File: BackupManager.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * This method reads the meta-data information for the backed-up collection. * * @param backupLoc The base path used to store the backup data. * @param backupId The unique name for the backup. * @param collectionName The name of the collection whose meta-data is to be returned. * @return the meta-data information for the backed-up collection. * @throws IOException in case of errors. */ public DocCollection readCollectionState(URI backupLoc, String backupId, String collectionName) throws IOException { Objects.requireNonNull(collectionName); URI zkStateDir = repository.resolve(backupLoc, backupId, ZK_STATE_DIR); try (IndexInput is = repository.openInput(zkStateDir, COLLECTION_PROPS_FILE, IOContext.DEFAULT)) { byte[] arr = new byte[(int) is.length()]; // probably ok since the json file should be small. is.readBytes(arr, 0, (int) is.length()); ClusterState c_state = ClusterState.createFromJson(-1, arr, Collections.emptySet()); return c_state.getCollection(collectionName); } }
Example 11
Source File: AddReplicaCmd.java From lucene-solr with Apache License 2.0 | 5 votes |
public static CreateReplica assignReplicaDetails(SolrCloudManager cloudManager, ClusterState clusterState, ZkNodeProps message, ReplicaPosition replicaPosition) { boolean skipCreateReplicaInClusterState = message.getBool(SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, false); String collection = message.getStr(COLLECTION_PROP); String node = replicaPosition.node; String shard = message.getStr(SHARD_ID_PROP); String coreName = message.getStr(CoreAdminParams.NAME); String coreNodeName = message.getStr(CoreAdminParams.CORE_NODE_NAME); Replica.Type replicaType = replicaPosition.type; if (StringUtils.isBlank(coreName)) { coreName = message.getStr(CoreAdminParams.PROPERTY_PREFIX + CoreAdminParams.NAME); } log.info("Node Identified {} for creating new replica of shard {} for collection {}", node, shard, collection); if (!clusterState.liveNodesContain(node)) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Node: " + node + " is not live"); } DocCollection coll = clusterState.getCollection(collection); if (coreName == null) { coreName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), coll, shard, replicaType); } else if (!skipCreateReplicaInClusterState) { //Validate that the core name is unique in that collection for (Slice slice : coll.getSlices()) { for (Replica replica : slice.getReplicas()) { String replicaCoreName = replica.getStr(CORE_NAME_PROP); if (coreName.equals(replicaCoreName)) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Another replica with the same core name already exists" + " for this collection"); } } } } log.info("Returning CreateReplica command."); return new CreateReplica(collection, shard, node, replicaType, coreName, coreNodeName); }
Example 12
Source File: ExclusiveSliceProperty.java From lucene-solr with Apache License 2.0 | 5 votes |
ExclusiveSliceProperty(ClusterState clusterState, ZkNodeProps message) { this.clusterState = clusterState; String tmp = message.getStr(ZkStateReader.PROPERTY_PROP); if (StringUtils.startsWith(tmp, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) { tmp = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + tmp; } this.property = tmp.toLowerCase(Locale.ROOT); collectionName = message.getStr(ZkStateReader.COLLECTION_PROP); if (StringUtils.isBlank(collectionName) || StringUtils.isBlank(property)) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Overseer '" + message.getStr(Overseer.QUEUE_OPERATION) + "' requires both the '" + ZkStateReader.COLLECTION_PROP + "' and '" + ZkStateReader.PROPERTY_PROP + "' parameters. No action taken "); } Boolean shardUnique = Boolean.parseBoolean(message.getStr(SHARD_UNIQUE)); if (shardUnique == false && SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(this.property) == false) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that" + " the property be a pre-defined property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true' " + " Property: " + this.property + " shardUnique: " + Boolean.toString(shardUnique)); } collection = clusterState.getCollection(collectionName); if (collection == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Could not find collection ' " + collectionName + "' for overseer operation '" + message.getStr(Overseer.QUEUE_OPERATION) + "'. No action taken."); } onlyActiveNodes = Boolean.parseBoolean(message.getStr(ONLY_ACTIVE_NODES, "true")); }
Example 13
Source File: SimClusterStateProvider.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Delete a shard. This uses a similar algorithm as {@link org.apache.solr.cloud.api.collections.DeleteShardCmd} * @param message operation details * @param results operation results */ @SuppressWarnings({"unchecked", "rawtypes"}) public void simDeleteShard(ZkNodeProps message, NamedList results) throws Exception { ensureNotClosed(); if (message.getStr(CommonAdminParams.ASYNC) != null) { results.add(CoreAdminParams.REQUESTID, message.getStr(CommonAdminParams.ASYNC)); } String collectionName = message.getStr(COLLECTION_PROP); String sliceName = message.getStr(SHARD_ID_PROP); ClusterState clusterState = getClusterState(); DocCollection collection = clusterState.getCollection(collectionName); if (collection == null) { throw new Exception("Collection " + collectionName + " doesn't exist"); } Slice slice = collection.getSlice(sliceName); if (slice == null) { throw new Exception(" Collection " + collectionName + " slice " + sliceName + " doesn't exist."); } opDelay(collectionName, CollectionParams.CollectionAction.DELETESHARD.name()); lock.lockInterruptibly(); try { sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>()).remove(sliceName); colShardReplicaMap.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>()).remove(sliceName); nodeReplicaMap.forEach((n, replicas) -> { synchronized (replicas) { Iterator<ReplicaInfo> it = replicas.iterator(); while (it.hasNext()) { ReplicaInfo ri = it.next(); if (ri.getCollection().equals(collectionName) && ri.getShard().equals(sliceName)) { it.remove(); } } } }); collectionsStatesRef.get(collectionName).invalidate(); results.add("success", ""); } catch (Exception e) { results.add("failure", e.toString()); } finally { lock.unlock(); } }
Example 14
Source File: TestSimLargeCluster.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testFreediskTracking() throws Exception { int NUM_DOCS = 100000; String collectionName = "testFreeDisk"; SolrClient solrClient = cluster.simGetSolrClient(); CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf",2, 2); create.process(solrClient); CloudUtil.waitForState(cluster, "Timed out waiting for replicas of new collection to be active", collectionName, CloudUtil.clusterShape(2, 2, false, true)); ClusterState clusterState = cluster.getClusterStateProvider().getClusterState(); DocCollection coll = clusterState.getCollection(collectionName); Set<String> nodes = coll.getReplicas().stream() .map(r -> r.getNodeName()) .collect(Collectors.toSet()); Map<String, Number> initialFreedisk = getFreeDiskPerNode(nodes); // test small updates for (int i = 0; i < NUM_DOCS; i++) { SolrInputDocument doc = new SolrInputDocument("id", "id-" + i); solrClient.add(collectionName, doc); } Map<String, Number> updatedFreedisk = getFreeDiskPerNode(nodes); double delta = getDeltaFreeDiskBytes(initialFreedisk, updatedFreedisk); // 2 replicas - twice as much delta assertEquals(SimClusterStateProvider.DEFAULT_DOC_SIZE_BYTES * NUM_DOCS * 2, delta, delta * 0.1); // test small deletes - delete half of docs for (int i = 0; i < NUM_DOCS / 2; i++) { solrClient.deleteById(collectionName, "id-" + i); } Map<String, Number> updatedFreedisk1 = getFreeDiskPerNode(nodes); double delta1 = getDeltaFreeDiskBytes(initialFreedisk, updatedFreedisk1); // 2 replicas but half the docs assertEquals(SimClusterStateProvider.DEFAULT_DOC_SIZE_BYTES * NUM_DOCS * 2 / 2, delta1, delta1 * 0.1); // test bulk delete solrClient.deleteByQuery(collectionName, "*:*"); Map<String, Number> updatedFreedisk2 = getFreeDiskPerNode(nodes); double delta2 = getDeltaFreeDiskBytes(initialFreedisk, updatedFreedisk2); // 0 docs - initial freedisk if (log.isInfoEnabled()) { log.info(cluster.dumpClusterState(true)); } assertEquals(0.0, delta2, delta2 * 0.1); // test bulk update UpdateRequest ureq = new UpdateRequest(); ureq.setDocIterator(new FakeDocIterator(0, NUM_DOCS)); ureq.process(solrClient, collectionName); Map<String, Number> updatedFreedisk3 = getFreeDiskPerNode(nodes); double delta3 = getDeltaFreeDiskBytes(initialFreedisk, updatedFreedisk3); assertEquals(SimClusterStateProvider.DEFAULT_DOC_SIZE_BYTES * NUM_DOCS * 2, delta3, delta3 * 0.1); }
Example 15
Source File: TestRandomRequestDistribution.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Asserts that requests aren't always sent to the same poor node. See SOLR-7493 */ private void testRequestTracking() throws Exception { CollectionAdminRequest.createCollection("a1x2", "conf1", 1, 2) .setCreateNodeSet(nodeNames.get(0) + ',' + nodeNames.get(1)) .process(cloudClient); CollectionAdminRequest.createCollection("b1x1", "conf1", 1, 1) .setCreateNodeSet(nodeNames.get(2)) .process(cloudClient); waitForRecoveriesToFinish("a1x2", true); waitForRecoveriesToFinish("b1x1", true); cloudClient.getZkStateReader().forceUpdateCollection("b1x1"); // get direct access to the metrics counters for each core/replica we're interested to monitor them final Map<String,Counter> counters = new LinkedHashMap<>(); for (JettySolrRunner runner : jettys) { CoreContainer container = runner.getCoreContainer(); SolrMetricManager metricManager = container.getMetricManager(); for (SolrCore core : container.getCores()) { if ("a1x2".equals(core.getCoreDescriptor().getCollectionName())) { String registry = core.getCoreMetricManager().getRegistryName(); Counter cnt = metricManager.counter(null, registry, "requests", "QUERY./select"); // sanity check assertEquals(core.getName() + " has already received some requests?", 0, cnt.getCount()); counters.put(core.getName(), cnt); } } } assertEquals("Sanity Check: we know there should be 2 replicas", 2, counters.size()); // send queries to the node that doesn't host any core/replica and see where it routes them ClusterState clusterState = cloudClient.getZkStateReader().getClusterState(); DocCollection b1x1 = clusterState.getCollection("b1x1"); Collection<Replica> replicas = b1x1.getSlice("shard1").getReplicas(); assertEquals(1, replicas.size()); String baseUrl = replicas.iterator().next().getStr(ZkStateReader.BASE_URL_PROP); if (!baseUrl.endsWith("/")) baseUrl += "/"; try (HttpSolrClient client = getHttpSolrClient(baseUrl + "a1x2", 2000, 5000)) { long expectedTotalRequests = 0; Set<String> uniqueCoreNames = new LinkedHashSet<>(); log.info("Making requests to {} a1x2", baseUrl); while (uniqueCoreNames.size() < counters.keySet().size() && expectedTotalRequests < 1000L) { expectedTotalRequests++; client.query(new SolrQuery("*:*")); long actualTotalRequests = 0; for (Map.Entry<String,Counter> e : counters.entrySet()) { final long coreCount = e.getValue().getCount(); actualTotalRequests += coreCount; if (0 < coreCount) { uniqueCoreNames.add(e.getKey()); } } assertEquals("Sanity Check: Num Queries So Far Doesn't Match Total????", expectedTotalRequests, actualTotalRequests); } log.info("Total requests: {}", expectedTotalRequests); assertEquals("either request randomization code is broken of this test seed is really unlucky, " + "Gave up waiting for requests to hit every core at least once after " + expectedTotalRequests + " requests", uniqueCoreNames.size(), counters.size()); } }
Example 16
Source File: MigrateCmd.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override public void call(ClusterState clusterState, ZkNodeProps message, @SuppressWarnings({"rawtypes"})NamedList results) throws Exception { String extSourceCollectionName = message.getStr("collection"); String splitKey = message.getStr("split.key"); String extTargetCollectionName = message.getStr("target.collection"); int timeout = message.getInt("forward.timeout", 10 * 60) * 1000; boolean followAliases = message.getBool(FOLLOW_ALIASES, false); String sourceCollectionName; String targetCollectionName; if (followAliases) { sourceCollectionName = ocmh.cloudManager.getClusterStateProvider().resolveSimpleAlias(extSourceCollectionName); targetCollectionName = ocmh.cloudManager.getClusterStateProvider().resolveSimpleAlias(extTargetCollectionName); } else { sourceCollectionName = extSourceCollectionName; targetCollectionName = extTargetCollectionName; } DocCollection sourceCollection = clusterState.getCollection(sourceCollectionName); if (sourceCollection == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown source collection: " + sourceCollectionName); } DocCollection targetCollection = clusterState.getCollection(targetCollectionName); if (targetCollection == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown target collection: " + sourceCollectionName); } if (!(sourceCollection.getRouter() instanceof CompositeIdRouter)) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Source collection must use a compositeId router"); } if (!(targetCollection.getRouter() instanceof CompositeIdRouter)) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Target collection must use a compositeId router"); } if (splitKey == null || splitKey.trim().length() == 0) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key cannot be null or empty"); } CompositeIdRouter sourceRouter = (CompositeIdRouter) sourceCollection.getRouter(); CompositeIdRouter targetRouter = (CompositeIdRouter) targetCollection.getRouter(); Collection<Slice> sourceSlices = sourceRouter.getSearchSlicesSingle(splitKey, null, sourceCollection); if (sourceSlices.isEmpty()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No active slices available in source collection: " + sourceCollection + "for given split.key: " + splitKey); } Collection<Slice> targetSlices = targetRouter.getSearchSlicesSingle(splitKey, null, targetCollection); if (targetSlices.isEmpty()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No active slices available in target collection: " + targetCollection + "for given split.key: " + splitKey); } String asyncId = null; if (message.containsKey(ASYNC) && message.get(ASYNC) != null) asyncId = message.getStr(ASYNC); for (Slice sourceSlice : sourceSlices) { for (Slice targetSlice : targetSlices) { log.info("Migrating source shard: {} to target shard: {} for split.key = {}", sourceSlice, targetSlice, splitKey); migrateKey(clusterState, sourceCollection, sourceSlice, targetCollection, targetSlice, splitKey, timeout, results, asyncId, message); } } }
Example 17
Source File: ReplicaMutator.java From lucene-solr with Apache License 2.0 | 4 votes |
public ZkWriteCommand addReplicaProperty(ClusterState clusterState, ZkNodeProps message) { if (!checkKeyExistence(message, ZkStateReader.COLLECTION_PROP) || !checkKeyExistence(message, ZkStateReader.SHARD_ID_PROP) || !checkKeyExistence(message, ZkStateReader.REPLICA_PROP) || !checkKeyExistence(message, ZkStateReader.PROPERTY_PROP) || !checkKeyExistence(message, ZkStateReader.PROPERTY_VALUE_PROP)) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Overseer ADDREPLICAPROP requires " + ZkStateReader.COLLECTION_PROP + " and " + ZkStateReader.SHARD_ID_PROP + " and " + ZkStateReader.REPLICA_PROP + " and " + ZkStateReader.PROPERTY_PROP + " and " + ZkStateReader.PROPERTY_VALUE_PROP + " no action taken."); } String collectionName = message.getStr(ZkStateReader.COLLECTION_PROP); String sliceName = message.getStr(ZkStateReader.SHARD_ID_PROP); String replicaName = message.getStr(ZkStateReader.REPLICA_PROP); String property = message.getStr(ZkStateReader.PROPERTY_PROP).toLowerCase(Locale.ROOT); if (StringUtils.startsWith(property, OverseerCollectionMessageHandler.COLL_PROP_PREFIX) == false) { property = OverseerCollectionMessageHandler.COLL_PROP_PREFIX + property; } property = property.toLowerCase(Locale.ROOT); String propVal = message.getStr(ZkStateReader.PROPERTY_VALUE_PROP); String shardUnique = message.getStr(OverseerCollectionMessageHandler.SHARD_UNIQUE); boolean isUnique = false; if (SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property)) { if (StringUtils.isNotBlank(shardUnique) && Boolean.parseBoolean(shardUnique) == false) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Overseer ADDREPLICAPROP for " + property + " cannot have " + OverseerCollectionMessageHandler.SHARD_UNIQUE + " set to anything other than" + "'true'. No action taken"); } isUnique = true; } else { isUnique = Boolean.parseBoolean(shardUnique); } DocCollection collection = clusterState.getCollection(collectionName); Replica replica = collection.getReplica(replicaName); if (replica == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Could not find collection/slice/replica " + collectionName + "/" + sliceName + "/" + replicaName + " no action taken."); } log.info("Setting property {} with value {} for collection {}", property, propVal, collectionName); log.debug("Full message: {}", message); if (StringUtils.equalsIgnoreCase(replica.getStr(property), propVal)) return ZkStateWriter.NO_OP; // already the value we're going to set // OK, there's no way we won't change the cluster state now Map<String, Replica> replicas = collection.getSlice(sliceName).getReplicasCopy(); if (isUnique == false) { replicas.get(replicaName).getProperties().put(property, propVal); } else { // Set prop for this replica, but remove it for all others. for (Replica rep : replicas.values()) { if (rep.getName().equalsIgnoreCase(replicaName)) { rep.getProperties().put(property, propVal); } else { rep.getProperties().remove(property); } } } Slice newSlice = new Slice(sliceName, replicas, collection.getSlice(sliceName).shallowCopy(),collectionName); DocCollection newCollection = CollectionMutator.updateSlice(collectionName, collection, newSlice); return new ZkWriteCommand(collectionName, newCollection); }
Example 18
Source File: Assign.java From lucene-solr with Apache License 2.0 | 4 votes |
@SuppressWarnings({"unchecked"}) public static List<ReplicaPosition> getNodesForNewReplicas(ClusterState clusterState, String collectionName, String shard, int nrtReplicas, int tlogReplicas, int pullReplicas, Object createNodeSet, SolrCloudManager cloudManager) throws IOException, InterruptedException, AssignmentException { log.debug("getNodesForNewReplicas() shard: {} , nrtReplicas : {} , tlogReplicas: {} , pullReplicas: {} , createNodeSet {}" , shard, nrtReplicas, tlogReplicas, pullReplicas, createNodeSet); DocCollection coll = clusterState.getCollection(collectionName); int maxShardsPerNode = coll.getMaxShardsPerNode() == -1 ? Integer.MAX_VALUE : coll.getMaxShardsPerNode(); List<String> createNodeList; if (createNodeSet instanceof List) { createNodeList = (List<String>) createNodeSet; } else { // deduplicate createNodeList = createNodeSet == null ? null : new ArrayList<>(new LinkedHashSet<>(StrUtils.splitSmart((String) createNodeSet, ",", true))); } // produces clear message when down nodes are the root cause, without this the user just // gets a log message of detail about the nodes that are up, and a message that policies could not // be satisfied which then requires study to diagnose the issue. checkLiveNodes(createNodeList,clusterState); if (createNodeList == null) { // We only care if we haven't been told to put new replicas on specific nodes. HashMap<String, ReplicaCount> nodeNameVsShardCount = getNodeNameVsShardCount(collectionName, clusterState, null); long availableSlots = 0; for (Map.Entry<String, ReplicaCount> ent : nodeNameVsShardCount.entrySet()) { //ADDREPLICA can put more than maxShardsPerNode on an instance, so this test is necessary. if (maxShardsPerNode > ent.getValue().thisCollectionNodes) { availableSlots += (maxShardsPerNode - ent.getValue().thisCollectionNodes); } } if (availableSlots < nrtReplicas + tlogReplicas + pullReplicas) { throw new AssignmentException( String.format(Locale.ROOT, "Cannot create %d new replicas for collection %s given the current number of eligible live nodes %d and a maxShardsPerNode of %d", nrtReplicas, collectionName, nodeNameVsShardCount.size(), maxShardsPerNode)); } } AssignRequest assignRequest = new AssignRequestBuilder() .forCollection(collectionName) .forShard(Collections.singletonList(shard)) .assignNrtReplicas(nrtReplicas) .assignTlogReplicas(tlogReplicas) .assignPullReplicas(pullReplicas) .onNodes(createNodeList) .build(); AssignStrategyFactory assignStrategyFactory = new AssignStrategyFactory(cloudManager); AssignStrategy assignStrategy = assignStrategyFactory.create(clusterState, coll); return assignStrategy.assign(cloudManager, assignRequest); }
Example 19
Source File: SplitOp.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * This is called when splitByPrefix is used. * The overseer called us to get recommended splits taking into * account actual document distribution over the hash space. */ private void handleGetRanges(CoreAdminHandler.CallInfo it, String coreName) throws Exception { SolrCore parentCore = it.handler.coreContainer.getCore(coreName); if (parentCore == null) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown core " + coreName); } RefCounted<SolrIndexSearcher> searcherHolder = parentCore.getRealtimeSearcher(); try { if (!it.handler.coreContainer.isZooKeeperAware()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Shard splitByPrefix requires SolrCloud mode."); } else { SolrIndexSearcher searcher = searcherHolder.get(); String routeFieldName = null; String prefixField = "id_prefix"; ClusterState clusterState = it.handler.coreContainer.getZkController().getClusterState(); String collectionName = parentCore.getCoreDescriptor().getCloudDescriptor().getCollectionName(); DocCollection collection = clusterState.getCollection(collectionName); String sliceName = parentCore.getCoreDescriptor().getCloudDescriptor().getShardId(); Slice slice = collection.getSlice(sliceName); DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT; DocRouter.Range currentRange = slice.getRange(); Object routerObj = collection.get(DOC_ROUTER); // for back-compat with Solr 4.4 if (routerObj instanceof Map) { @SuppressWarnings({"rawtypes"}) Map routerProps = (Map) routerObj; routeFieldName = (String) routerProps.get("field"); } if (routeFieldName == null) { routeFieldName = searcher.getSchema().getUniqueKeyField().getName(); } Collection<RangeCount> counts = getHashHistogram(searcher, prefixField, router, collection); if (counts.size() == 0) { // How to determine if we should look at the id field to figure out the prefix buckets? // There may legitimately be no indexed terms in id_prefix if no ids have a prefix yet. // For now, avoid using splitByPrefix unless you are actually using prefixes. counts = getHashHistogramFromId(searcher, searcher.getSchema().getUniqueKeyField().getName(), router, collection); } Collection<DocRouter.Range> splits = getSplits(counts, currentRange); String splitString = toSplitString(splits); if (splitString == null) { return; } it.rsp.add(CoreAdminParams.RANGES, splitString); } } finally { if (searcherHolder != null) searcherHolder.decref(); if (parentCore != null) parentCore.close(); } }
Example 20
Source File: ExecutePlanActionTest.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testExecute() throws Exception { CloudSolrClient solrClient = cluster.getSolrClient(); String collectionName = "testExecute"; CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf", 1, 2); create.setMaxShardsPerNode(1); create.process(solrClient); cluster.waitForActiveCollection(collectionName, 1, 2); waitForState("Timed out waiting for replicas of new collection to be active", collectionName, clusterShape(1, 2)); JettySolrRunner sourceNode = cluster.getRandomJetty(random()); String sourceNodeName = sourceNode.getNodeName(); ClusterState clusterState = solrClient.getZkStateReader().getClusterState(); DocCollection docCollection = clusterState.getCollection(collectionName); List<Replica> replicas = docCollection.getReplicas(sourceNodeName); assertNotNull(replicas); assertFalse(replicas.isEmpty()); List<JettySolrRunner> otherJetties = cluster.getJettySolrRunners().stream() .filter(jettySolrRunner -> jettySolrRunner != sourceNode).collect(Collectors.toList()); assertFalse(otherJetties.isEmpty()); JettySolrRunner survivor = otherJetties.get(0); try (ExecutePlanAction action = new ExecutePlanAction()) { action.configure(loader, cloudManager, Collections.singletonMap("name", "execute_plan")); // used to signal if we found that ExecutePlanAction did in fact create the right znode before executing the operation AtomicBoolean znodeCreated = new AtomicBoolean(false); CollectionAdminRequest.AsyncCollectionAdminRequest moveReplica = new CollectionAdminRequest.MoveReplica(collectionName, replicas.get(0).getName(), survivor.getNodeName()); CollectionAdminRequest.AsyncCollectionAdminRequest mockRequest = new CollectionAdminRequest.AsyncCollectionAdminRequest(CollectionParams.CollectionAction.OVERSEERSTATUS) { @Override public void setAsyncId(String asyncId) { super.setAsyncId(asyncId); String parentPath = ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH + "/xyz/execute_plan"; try { if (zkClient().exists(parentPath, true)) { java.util.List<String> children = zkClient().getChildren(parentPath, null, true); if (!children.isEmpty()) { String child = children.get(0); byte[] data = zkClient().getData(parentPath + "/" + child, null, null, true); @SuppressWarnings({"rawtypes"}) Map m = (Map) Utils.fromJSON(data); if (m.containsKey("requestid")) { znodeCreated.set(m.get("requestid").equals(asyncId)); } } } } catch (Exception e) { throw new RuntimeException(e); } } }; List<CollectionAdminRequest.AsyncCollectionAdminRequest> operations = Lists.asList(moveReplica, new CollectionAdminRequest.AsyncCollectionAdminRequest[]{mockRequest}); NodeLostTrigger.NodeLostEvent nodeLostEvent = new NodeLostTrigger.NodeLostEvent (TriggerEventType.NODELOST, "mock_trigger_name", Collections.singletonList(cloudManager.getTimeSource().getTimeNs()), Collections.singletonList(sourceNodeName), CollectionParams.CollectionAction.MOVEREPLICA.toLower()); ActionContext actionContext = new ActionContext(survivor.getCoreContainer().getZkController().getSolrCloudManager(), null, new HashMap<>(Collections.singletonMap("operations", operations))); action.process(nodeLostEvent, actionContext); // assertTrue("ExecutePlanAction should have stored the requestid in ZK before executing the request", znodeCreated.get()); @SuppressWarnings({"unchecked"}) List<NamedList<Object>> responses = (List<NamedList<Object>>) actionContext.getProperty("responses"); assertNotNull(responses); assertEquals(2, responses.size()); NamedList<Object> response = responses.get(0); assertNull(response.get("failure")); assertNotNull(response.get("success")); } waitForState("Timed out waiting for replicas of new collection to be active", collectionName, clusterShape(1, 2)); }