Java Code Examples for org.elasticsearch.cluster.ClusterState#getRoutingNodes()
The following examples show how to use
org.elasticsearch.cluster.ClusterState#getRoutingNodes() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TransportIndicesShardStoresAction.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override protected void masterOperation(IndicesShardStoresRequest request, ClusterState state, ActionListener<IndicesShardStoresResponse> listener) { final RoutingTable routingTables = state.routingTable(); final RoutingNodes routingNodes = state.getRoutingNodes(); final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); final Set<ShardId> shardIdsToFetch = new HashSet<>(); logger.trace("using cluster state version [{}] to determine shards", state.version()); // collect relevant shard ids of the requested indices for fetching store infos for (String index : concreteIndices) { IndexRoutingTable indexShardRoutingTables = routingTables.index(index); if (indexShardRoutingTables == null) { continue; } for (IndexShardRoutingTable routing : indexShardRoutingTables) { ClusterShardHealth shardHealth = new ClusterShardHealth(routing.shardId().id(), routing); if (request.shardStatuses().contains(shardHealth.getStatus())) { shardIdsToFetch.add(routing.shardId()); } } } // async fetch store infos from all the nodes // NOTE: instead of fetching shard store info one by one from every node (nShards * nNodes requests) // we could fetch all shard store info from every node once (nNodes requests) // we have to implement a TransportNodesAction instead of using TransportNodesListGatewayStartedShards // for fetching shard stores info, that operates on a list of shards instead of a single shard new AsyncShardStoresInfoFetches(state.nodes(), routingNodes, state.metaData(), shardIdsToFetch, listener).start(); }
Example 2
Source File: SysAllocations.java From crate with Apache License 2.0 | 5 votes |
@Override public Iterator<SysAllocation> iterator() { final ClusterState state = clusterService.state(); final RoutingNodes routingNodes = state.getRoutingNodes(); final ClusterInfo clusterInfo = clusterInfoService.getClusterInfo(); final RoutingAllocation allocation = new RoutingAllocation( allocationDeciders, routingNodes, state, clusterInfo, System.nanoTime()); return allocation.routingTable().allShards().stream() .filter(shardRouting -> !IndexParts.isDangling(shardRouting.getIndexName())) .map(shardRouting -> new SysAllocation( explainShard(shardRouting, allocation, gatewayAllocator, shardAllocator))).iterator(); }
Example 3
Source File: DiskThresholdDeciderTests.java From crate with Apache License 2.0 | 5 votes |
public void logShardStates(ClusterState state) { RoutingNodes rn = state.getRoutingNodes(); logger.info("--> counts: total: {}, unassigned: {}, initializing: {}, relocating: {}, started: {}", rn.shards(shard -> true).size(), rn.shardsWithState(UNASSIGNED).size(), rn.shardsWithState(INITIALIZING).size(), rn.shardsWithState(RELOCATING).size(), rn.shardsWithState(STARTED).size()); logger.info("--> unassigned: {}, initializing: {}, relocating: {}, started: {}", rn.shardsWithState(UNASSIGNED), rn.shardsWithState(INITIALIZING), rn.shardsWithState(RELOCATING), rn.shardsWithState(STARTED)); }
Example 4
Source File: DiskThresholdDeciderUnitTests.java From crate with Apache License 2.0 | 4 votes |
@Test public void testCanAllocateUsesMaxAvailableSpace() { ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) .build(); final Index index = metaData.index("test").getIndex(); ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); DiscoveryNode node_0 = new DiscoveryNode("node_0", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); DiscoveryNode node_1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); RoutingTable routingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() .add(node_0) .add(node_1) ).build(); // actual test -- after all that bloat :) ImmutableOpenMap.Builder<String, DiskUsage> leastAvailableUsages = ImmutableOpenMap.builder(); leastAvailableUsages.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, 0)); // all full leastAvailableUsages.put("node_1", new DiskUsage("node_1", "node_1", "_na_", 100, 0)); // all full ImmutableOpenMap.Builder<String, DiskUsage> mostAvailableUsage = ImmutableOpenMap.builder(); // 20 - 99 percent since after allocation there must be at least 10% left and shard is 10byte mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, randomIntBetween(20, 100))); // this is weird and smells like a bug! it should be up to 20%? mostAvailableUsage.put("node_1", new DiskUsage("node_1", "node_1", "_na_", 100, randomIntBetween(0, 10))); ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder(); shardSizes.put("[test][0][p]", 10L); // 10 bytes final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), ImmutableOpenMap.of()); RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime()); allocation.debugDecision(true); Decision decision = decider.canAllocate(test_0, new RoutingNode("node_0", node_0), allocation); assertEquals(mostAvailableUsage.toString(), Decision.Type.YES, decision.type()); assertThat(decision.getExplanation(), containsString("enough disk for shard on node")); decision = decider.canAllocate(test_0, new RoutingNode("node_1", node_1), allocation); assertEquals(mostAvailableUsage.toString(), Decision.Type.NO, decision.type()); assertThat(decision.getExplanation(), containsString("the node is above the high watermark cluster " + "setting [cluster.routing.allocation.disk.watermark.high=90%], using more disk space than the maximum allowed [90.0%]")); }
Example 5
Source File: DiskThresholdDeciderUnitTests.java From crate with Apache License 2.0 | 4 votes |
@Test public void testCannotAllocateDueToLackOfDiskResources() { ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) .build(); final Index index = metaData.index("test").getIndex(); ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true, EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); DiscoveryNode node_0 = new DiscoveryNode("node_0", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); DiscoveryNode node_1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); RoutingTable routingTable = RoutingTable.builder() .addAsNew(metaData.index("test")) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() .add(node_0) .add(node_1) ).build(); // actual test -- after all that bloat :) ImmutableOpenMap.Builder<String, DiskUsage> leastAvailableUsages = ImmutableOpenMap.builder(); leastAvailableUsages.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, 0)); // all full ImmutableOpenMap.Builder<String, DiskUsage> mostAvailableUsage = ImmutableOpenMap.builder(); final int freeBytes = randomIntBetween(20, 100); mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, freeBytes)); ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder(); // way bigger than available space final long shardSize = randomIntBetween(110, 1000); shardSizes.put("[test][0][p]", shardSize); ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), ImmutableOpenMap.of()); RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.singleton(decider)), clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime()); allocation.debugDecision(true); Decision decision = decider.canAllocate(test_0, new RoutingNode("node_0", node_0), allocation); assertEquals(Decision.Type.NO, decision.type()); assertThat(decision.getExplanation(), containsString( "allocating the shard to this node will bring the node above the high watermark cluster setting " +"[cluster.routing.allocation.disk.watermark.high=90%] " + "and cause it to have less than the minimum required [0b] of free space " + "(free: [" + freeBytes + "b], estimated shard size: [" + shardSize + "b])")); }