Java Code Examples for org.apache.solr.cloud.ZkController#getNodeName()
The following examples show how to use
org.apache.solr.cloud.ZkController#getNodeName() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SolrClusterReporter.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public void init(PluginInfo pluginInfo, CoreContainer cc) { super.init(pluginInfo, cc); if (reporter != null) { reporter.close();; } if (!enabled) { log.info("Reporter disabled for registry {}", registryName); return; } // start reporter only in cloud mode if (!cc.isZooKeeperAware()) { log.warn("Not ZK-aware, not starting..."); return; } if (period < 1) { // don't start it log.info("Turning off node reporter, period={}", period); return; } HttpClient httpClient = cc.getUpdateShardHandler().getDefaultHttpClient(); ZkController zk = cc.getZkController(); String reporterId = zk.getNodeName(); reporter = SolrReporter.Builder.forReports(metricManager, reports) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .withHandler(handler) .withReporterId(reporterId) .setCompact(true) .cloudClient(false) // we want to send reports specifically to a selected leader instance .skipAggregateValues(true) // we don't want to transport details of aggregates .skipHistograms(true) // we don't want to transport histograms .build(httpClient, new OverseerUrlSupplier(zk)); reporter.start(period, TimeUnit.SECONDS); }
Example 2
Source File: ScoreJoinQParserPlugin.java From lucene-solr with Apache License 2.0 | 5 votes |
private static String findLocalReplicaForFromIndex(ZkController zkController, String fromIndex) { String fromReplica = null; String nodeName = zkController.getNodeName(); for (Slice slice : zkController.getClusterState().getCollection(fromIndex).getActiveSlicesArr()) { if (fromReplica != null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "SolrCloud join: To join with a sharded collection, use method=crossCollection."); for (Replica replica : slice.getReplicas()) { if (replica.getNodeName().equals(nodeName)) { fromReplica = replica.getStr(ZkStateReader.CORE_NAME_PROP); // found local replica, but is it Active? if (replica.getState() != Replica.State.ACTIVE) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "SolrCloud join: "+fromIndex+" has a local replica ("+fromReplica+ ") on "+nodeName+", but it is "+replica.getState()); break; } } } if (fromReplica == null) throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "SolrCloud join: To join with a collection that might not be co-located, use method=crossCollection."); return fromReplica; }
Example 3
Source File: StreamHandler.java From lucene-solr with Apache License 2.0 | 4 votes |
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception { SolrParams params = req.getParams(); params = adjustParams(params); req.setParams(params); if (params.get("action") != null) { handleAdmin(req, rsp, params); return; } TupleStream tupleStream; try { StreamExpression streamExpression = StreamExpressionParser.parse(params.get(StreamParams.EXPR)); if (this.streamFactory.isEvaluator(streamExpression)) { StreamExpression tupleExpression = new StreamExpression(StreamParams.TUPLE); tupleExpression.addParameter(new StreamExpressionNamedParameter(StreamParams.RETURN_VALUE, streamExpression)); tupleStream = this.streamFactory.constructStream(tupleExpression); } else { tupleStream = this.streamFactory.constructStream(streamExpression); } } catch (Exception e) { // Catch exceptions that occur while the stream is being created. This will include streaming expression parse // rules. SolrException.log(log, e); rsp.add(StreamParams.RESULT_SET, new DummyErrorStream(e)); return; } final SolrCore core = req.getCore(); // explicit check for null core (temporary?, for tests) @SuppressWarnings("resource") ZkController zkController = core == null ? null : core.getCoreContainer().getZkController(); RequestReplicaListTransformerGenerator requestReplicaListTransformerGenerator; if (zkController != null) { requestReplicaListTransformerGenerator = new RequestReplicaListTransformerGenerator( zkController.getZkStateReader().getClusterProperties() .getOrDefault(ZkStateReader.DEFAULT_SHARD_PREFERENCES, "") .toString(), zkController.getNodeName(), zkController.getBaseUrl(), zkController.getSysPropsCacher() ); } else { requestReplicaListTransformerGenerator = new RequestReplicaListTransformerGenerator(); } int worker = params.getInt("workerID", 0); int numWorkers = params.getInt("numWorkers", 1); boolean local = params.getBool("streamLocalOnly", false); StreamContext context = new StreamContext(); context.setRequestParams(params); context.setRequestReplicaListTransformerGenerator(requestReplicaListTransformerGenerator); context.put("shards", getCollectionShards(params)); context.workerID = worker; context.numWorkers = numWorkers; context.setSolrClientCache(solrClientCache); context.setModelCache(modelCache); context.setObjectCache(objectCache); context.put("core", this.coreName); context.put("solr-core", req.getCore()); context.setLocal(local); tupleStream.setStreamContext(context); // if asking for explanation then go get it if (params.getBool("explain", false)) { rsp.add("explanation", tupleStream.toExplanation(this.streamFactory)); } if (tupleStream instanceof DaemonStream) { DaemonStream daemonStream = (DaemonStream) tupleStream; if (daemons.containsKey(daemonStream.getId())) { daemons.remove(daemonStream.getId()).close(); } daemonStream.setDaemons(daemons); daemonStream.open(); // This will start the daemonStream daemons.put(daemonStream.getId(), daemonStream); rsp.add(StreamParams.RESULT_SET, new DaemonResponseStream("Daemon:" + daemonStream.getId() + " started on " + coreName)); } else { rsp.add(StreamParams.RESULT_SET, new TimerStream(new ExceptionStream(tupleStream))); } }