org.elasticsearch.common.Randomness Java Examples
The following examples show how to use
org.elasticsearch.common.Randomness.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RoutingTest.java From crate with Apache License 2.0 | 6 votes |
@Test public void testRoutingForRandomMasterOrDataNodePrefersLocal() throws Exception { Set<DiscoveryNode.Role> data = ImmutableSet.of(DiscoveryNode.Role.DATA); Map<String, String> attr = ImmutableMap.of(); DiscoveryNode local = new DiscoveryNode("local_data", buildNewFakeTransportAddress(), attr, data, null); DiscoveryNodes nodes = new DiscoveryNodes.Builder() .add(local) .localNodeId(local.getId()) .add(new DiscoveryNode("data_1", buildNewFakeTransportAddress(), attr, data, null)) .add(new DiscoveryNode("data_2", buildNewFakeTransportAddress(), attr, data, null)) .add(new DiscoveryNode("data_3", buildNewFakeTransportAddress(), attr, data, null)) .add(new DiscoveryNode("data_4", buildNewFakeTransportAddress(), attr, data, null)) .build(); RoutingProvider routingProvider = new RoutingProvider(Randomness.get().nextInt(), Collections.emptyList()); Routing routing = routingProvider.forRandomMasterOrDataNode(new RelationName("doc", "table"), nodes); assertThat(routing.locations().keySet(), contains("local_data")); }
Example #2
Source File: RoutingTest.java From crate with Apache License 2.0 | 6 votes |
@Test public void testRoutingForRandomMasterOrDataNode() throws IOException { Map<String, String> attr = ImmutableMap.of(); Set<DiscoveryNode.Role> master_and_data = ImmutableSet.of(DiscoveryNode.Role.MASTER, DiscoveryNode.Role.DATA); DiscoveryNode local = new DiscoveryNode("client_node_1", buildNewFakeTransportAddress(), attr, ImmutableSet.of(), null); DiscoveryNodes nodes = new DiscoveryNodes.Builder() .add(new DiscoveryNode("data_master_node_1", buildNewFakeTransportAddress(), attr, master_and_data, null)) .add(new DiscoveryNode("data_master_node_2", buildNewFakeTransportAddress(), attr, master_and_data, null)) .add(local) .add(new DiscoveryNode("client_node_2", buildNewFakeTransportAddress(), attr, ImmutableSet.of(), null)) .add(new DiscoveryNode("client_node_3", buildNewFakeTransportAddress(), attr, ImmutableSet.of(), null)) .localNodeId(local.getId()) .build(); RoutingProvider routingProvider = new RoutingProvider(Randomness.get().nextInt(), Collections.emptyList()); Routing routing = routingProvider.forRandomMasterOrDataNode(new RelationName("doc", "table"), nodes); assertThat(routing.locations().keySet(), anyOf(contains("data_master_node_1"), contains("data_master_node_2"))); Routing routing2 = routingProvider.forRandomMasterOrDataNode(new RelationName("doc", "table"), nodes); assertThat("routingProvider is seeded and must return deterministic routing", routing.locations(), equalTo(routing2.locations())); }
Example #3
Source File: CreateAlterTableStatementAnalyzerTest.java From crate with Apache License 2.0 | 6 votes |
@Before public void prepare() throws IOException { String analyzerSettings = FulltextAnalyzerResolver.encodeSettings( Settings.builder().put("search", "foobar").build()).utf8ToString(); MetaData metaData = MetaData.builder() .persistentSettings( Settings.builder().put(ANALYZER.buildSettingName("ft_search"), analyzerSettings).build()) .build(); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .metaData(metaData) .build(); ClusterServiceUtils.setState(clusterService, state); e = SQLExecutor.builder(clusterService, 3, Randomness.get(), List.of()) .enableDefaultTables() .build(); plannerContext = e.getPlannerContext(clusterService.state()); }
Example #4
Source File: ShardStateObserverTest.java From crate with Apache License 2.0 | 6 votes |
@Test public void test_wait_for_active_shard_completes_on_shard_state_change() throws Throwable { // Add 2 nodes and table to cluster state SQLExecutor.builder(clusterService, 2, Randomness.get(), List.of()) .addTable("create table t1 (x int) clustered into 1 shards"); var observer = new ShardStateObserver(clusterService); IndexShardRoutingTable routingTable = clusterService.state().routingTable().shardRoutingTable("t1", 0); ShardId shardId = routingTable.shardId(); CompletableFuture<ShardRouting> shard0Active = observer.waitForActiveShard(shardId); assertThat(shard0Active.isDone(), is(false)); ShardRouting startedPrimaryShard = routingTable.primaryShard().moveToStarted(); ClusterState newClusterState = ClusterState.builder(clusterService.state()) .routingTable( RoutingTable.builder() .add(IndexRoutingTable.builder(shardId.getIndex()).addShard(startedPrimaryShard).build()) .build() ).build(); ClusterServiceUtils.setState(clusterService, newClusterState); // This now shouldn't timeout shard0Active.get(5, TimeUnit.SECONDS); }
Example #5
Source File: DocLevelCollectTest.java From crate with Apache License 2.0 | 6 votes |
@Test public void testCollectWithPartitionedColumns() throws Throwable { RelationName relationName = new RelationName(Schemas.DOC_SCHEMA_NAME, PARTITIONED_TABLE_NAME); TableInfo tableInfo = schemas.getTableInfo(relationName); Routing routing = tableInfo.getRouting( clusterService().state(), new RoutingProvider(Randomness.get().nextInt(), Collections.emptyList()), WhereClause.MATCH_ALL, RoutingProvider.ShardSelection.ANY, SessionContext.systemSessionContext()); RoutedCollectPhase collectNode = getCollectNode( Arrays.asList( tableInfo.getReference(new ColumnIdent("id")), tableInfo.getReference(new ColumnIdent("date")) ), routing, WhereClause.MATCH_ALL ); Bucket result = collect(collectNode); assertThat(result, containsInAnyOrder( isRow(1, 0L), isRow(2, 1L) )); }
Example #6
Source File: InsertPlannerTest.java From crate with Apache License 2.0 | 6 votes |
@Before public void prepare() throws IOException { e = SQLExecutor.builder(clusterService, 2, Randomness.get(), List.of()) .addPartitionedTable( "create table parted_pks (" + " id int," + " name string," + " date timestamp with time zone," + " obj object," + " primary key (id, date)" + ") partitioned by (date) clustered by (id) ", new PartitionName(new RelationName("doc", "parted_pks"), singletonList("1395874800000")).asIndexName(), new PartitionName(new RelationName("doc", "parted_pks"), singletonList("1395961200000")).asIndexName(), new PartitionName(new RelationName("doc", "parted_pks"), singletonList(null)).asIndexName() ) .addTable( "create table users (" + " id long primary key," + " name string," + " date timestamp with time zone" + ") clustered into 4 shards") .addTable("create table source (id int primary key, name string)") .build(); }
Example #7
Source File: StoredLtrModelParserTests.java From elasticsearch-learning-to-rank with Apache License 2.0 | 5 votes |
public void testFeatureMinMaxParsing() throws IOException { String modelJson = "{\n" + " \"name\":\"my_model\",\n" + " \"feature_set\":" + getSimpleFeatureSet() + "," + " \"model\": {\n" + " \"type\": \"model/dummy\",\n" + " \"definition\": \"completely ignored\",\n"+ " \"feature_normalizers\": {\n"+ " \"feature_2\": { \"min_max\":" + " {\"minimum\": 0.05," + " \"maximum\": 1.25}}}" + " }" + "}"; StoredLtrModel model = parse(modelJson); StoredFeatureNormalizers ftrNormSet = model.getFeatureNormalizers(); assertNotNull(ftrNormSet); MinMaxFeatureNormalizer minMaxFtrNorm = (MinMaxFeatureNormalizer)ftrNormSet.getNormalizer("feature_2"); float expectedMin = 0.05f; float expectedMax = 1.25f; float testVal = Randomness.get().nextFloat(); float expectedNormalized = (testVal - expectedMin) / (expectedMax - expectedMin); assertEquals(expectedNormalized, minMaxFtrNorm.normalize(testVal), 0.01); StoredLtrModel reparsedModel = reparseModel(model); ftrNormSet = reparsedModel.getFeatureNormalizers(); minMaxFtrNorm = (MinMaxFeatureNormalizer)ftrNormSet.getNormalizer("feature_2"); testVal = Randomness.get().nextFloat(); expectedNormalized = (testVal - expectedMin) / (expectedMax - expectedMin); assertEquals(expectedNormalized, minMaxFtrNorm.normalize(testVal), 0.01); assertEquals(reparsedModel, model); assertEquals(reparsedModel.hashCode(), model.hashCode()); }
Example #8
Source File: CreateAnalyzerAnalyzerTest.java From crate with Apache License 2.0 | 5 votes |
@Before public void prepare() throws IOException { e = SQLExecutor.builder(clusterService, 1, Randomness.get(), List.of(new CommonAnalysisPlugin())) .enableDefaultTables() .build(); plannerContext = e.getPlannerContext(clusterService.state()); }
Example #9
Source File: AwsEc2ServiceImpl.java From crate with Apache License 2.0 | 5 votes |
static ClientConfiguration buildConfiguration(Logger logger, Ec2ClientSettings clientSettings) { final ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. clientConfiguration.setResponseMetadataCacheSize(0); clientConfiguration.setProtocol(clientSettings.protocol); if (Strings.hasText(clientSettings.proxyHost)) { // TODO: remove this leniency, these settings should exist together and be validated clientConfiguration.setProxyHost(clientSettings.proxyHost); clientConfiguration.setProxyPort(clientSettings.proxyPort); clientConfiguration.setProxyUsername(clientSettings.proxyUsername); clientConfiguration.setProxyPassword(clientSettings.proxyPassword); } // Increase the number of retries in case of 5xx API responses final Random rand = Randomness.get(); final RetryPolicy retryPolicy = new RetryPolicy( RetryPolicy.RetryCondition.NO_RETRY_CONDITION, (originalRequest, exception, retriesAttempted) -> { // with 10 retries the max delay time is 320s/320000ms (10 * 2^5 * 1 * 1000) logger.warn("EC2 API request failed, retry again. Reason was:", exception); return 1000L * (long) (10d * Math.pow(2, retriesAttempted / 2.0d) * (1.0d + rand.nextDouble())); }, 10, false); clientConfiguration.setRetryPolicy(retryPolicy); clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis); return clientConfiguration; }
Example #10
Source File: CreateAlterTableStatementAnalyzerTest.java From crate with Apache License 2.0 | 5 votes |
@Test public void testCreateTableUsesDefaultSchema() { SQLExecutor sqlExecutor = SQLExecutor.builder(clusterService, 1, Randomness.get(), List.of()) .setSearchPath("firstSchema", "secondSchema") .build(); BoundCreateTable analysis = analyze(sqlExecutor, "create table t (id int)"); assertThat(analysis.tableIdent().schema(), is(sqlExecutor.getSessionContext().searchPath().currentSchema())); }
Example #11
Source File: JoinTest.java From crate with Apache License 2.0 | 5 votes |
@Before public void setUpExecutor() throws IOException { e = SQLExecutor.builder(clusterService, 2, Randomness.get(), List.of()) .addTable(USER_TABLE_DEFINITION) .addTable(TEST_DOC_LOCATIONS_TABLE_DEFINITION) .addTable(T3.T1_DEFINITION) .addTable(T3.T2_DEFINITION) .addTable(T3.T3_DEFINITION) .addTable(T3.T4_DEFINITION) .build(); plannerCtx = e.getPlannerContext(clusterService.state()); }
Example #12
Source File: RoutingBuilderTest.java From crate with Apache License 2.0 | 5 votes |
@Before public void prepare() throws Exception { SQLExecutor e = SQLExecutor.builder(clusterService, 2, Randomness.get(), List.of()) .addTable("create table custom.t1 (id int)") .build(); tableInfo = e.schemas().getTableInfo(relationName); }
Example #13
Source File: PlannerTest.java From crate with Apache License 2.0 | 5 votes |
@Test public void testExecutionPhaseIdSequence() throws Exception { PlannerContext plannerContext = new PlannerContext( clusterService.state(), new RoutingProvider(Randomness.get().nextInt(), Collections.emptyList()), UUID.randomUUID(), e.functions(), new CoordinatorTxnCtx(SessionContext.systemSessionContext()), 0, null ); assertThat(plannerContext.nextExecutionPhaseId(), is(0)); assertThat(plannerContext.nextExecutionPhaseId(), is(1)); }
Example #14
Source File: MockTransport.java From crate with Apache License 2.0 | 5 votes |
/** * simulate a remote error for the given requestId, will be wrapped * by a {@link RemoteTransportException} * * @param requestId the id corresponding to the captured send * request * @param t the failure to wrap */ public void handleRemoteError(final long requestId, final Throwable t) { final RemoteTransportException remoteException; if (rarely(Randomness.get())) { remoteException = new RemoteTransportException("remote failure, coming from local node", t); } else { try (BytesStreamOutput output = new BytesStreamOutput()) { output.writeException(t); remoteException = new RemoteTransportException("remote failure", output.bytes().streamInput().readException()); } catch (IOException ioException) { throw new AssertionError("failed to serialize/deserialize supplied exception " + t, ioException); } } this.handleError(requestId, remoteException); }
Example #15
Source File: NodeJoinTests.java From crate with Apache License 2.0 | 5 votes |
private void setupRealMasterServiceAndCoordinator(long term, ClusterState initialState) { MasterService masterService = new MasterService("test_node", Settings.EMPTY, threadPool); AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialState); masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); setupMasterServiceAndCoordinator(term, initialState, masterService, threadPool, new Random(Randomness.get().nextLong())); masterService.setClusterStateSupplier(clusterStateRef::get); masterService.start(); }
Example #16
Source File: StoredLtrModelParserTests.java From elasticsearch-learning-to-rank with Apache License 2.0 | 5 votes |
public void testFeatureStdNormParsing() throws IOException { String modelJson = "{\n" + " \"name\":\"my_model\",\n" + " \"feature_set\":" + getSimpleFeatureSet() + "," + " \"model\": {\n" + " \"type\": \"model/dummy\",\n" + " \"definition\": \"completely ignored\",\n"+ " \"feature_normalizers\": {\n"+ " \"feature_1\": { \"standard\":" + " {\"mean\": 1.25," + " \"standard_deviation\": 0.25}}}" + " }" + "}"; StoredLtrModel model = parse(modelJson); StoredFeatureNormalizers ftrNormSet = model.getFeatureNormalizers(); assertNotNull(ftrNormSet); StandardFeatureNormalizer stdFtrNorm = (StandardFeatureNormalizer)ftrNormSet.getNormalizer("feature_1"); assertNotNull(stdFtrNorm); float expectedMean = 1.25f; float expectedStdDev = 0.25f; float testVal = Randomness.get().nextFloat(); float expectedNormalized = (testVal - expectedMean) / expectedStdDev; assertEquals(expectedNormalized, stdFtrNorm.normalize(testVal), 0.01); StoredLtrModel reparsedModel = reparseModel(model); ftrNormSet = reparsedModel.getFeatureNormalizers(); stdFtrNorm = (StandardFeatureNormalizer)ftrNormSet.getNormalizer("feature_1"); testVal = Randomness.get().nextFloat(); expectedNormalized = (testVal - expectedMean) / expectedStdDev; assertEquals(expectedNormalized, stdFtrNorm.normalize(testVal), 0.01); assertEquals(reparsedModel, model); assertEquals(reparsedModel.hashCode(), model.hashCode()); }
Example #17
Source File: EngineTestCase.java From crate with Apache License 2.0 | 5 votes |
public List<Engine.Operation> generateHistoryOnReplica(int numOps, boolean allowGapInSeqNo, boolean allowDuplicate) throws Exception { long seqNo = 0; final int maxIdValue = randomInt(numOps * 2); final List<Engine.Operation> operations = new ArrayList<>(numOps); for (int i = 0; i < numOps; i++) { final String id = Integer.toString(randomInt(maxIdValue)); final Engine.Operation.TYPE opType = randomFrom(Engine.Operation.TYPE.values()); final long startTime = threadPool.relativeTimeInMillis(); final int copies = allowDuplicate && rarely() ? between(2, 4) : 1; for (int copy = 0; copy < copies; copy++) { final ParsedDocument doc = createParsedDoc(id, null); switch (opType) { case INDEX: operations.add(new Engine.Index(EngineTestCase.newUid(doc), doc, seqNo, primaryTerm.get(), i, null, randomFrom(REPLICA, PEER_RECOVERY), startTime, -1, true, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); break; case DELETE: operations.add(new Engine.Delete("default", doc.id(), EngineTestCase.newUid(doc), seqNo, primaryTerm.get(), i, null, randomFrom(REPLICA, PEER_RECOVERY), startTime, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); break; case NO_OP: operations.add(new Engine.NoOp(seqNo, primaryTerm.get(), randomFrom(REPLICA, PEER_RECOVERY), startTime, "test-" + i)); break; default: throw new IllegalStateException("Unknown operation type [" + opType + "]"); } } seqNo++; if (allowGapInSeqNo && rarely()) { seqNo++; } } Randomness.shuffle(operations); return operations; }
Example #18
Source File: LocalCheckpointTrackerTests.java From crate with Apache License 2.0 | 5 votes |
public void testWaitForOpsToComplete() throws BrokenBarrierException, InterruptedException { final int seqNo = randomIntBetween(0, 32); final CyclicBarrier barrier = new CyclicBarrier(2); final AtomicBoolean complete = new AtomicBoolean(); final Thread thread = new Thread(() -> { try { // sychronize starting with the test thread barrier.await(); tracker.waitForProcessedOpsToComplete(seqNo); complete.set(true); // synchronize with the test thread checking if we are no longer waiting barrier.await(); } catch (BrokenBarrierException | InterruptedException e) { throw new RuntimeException(e); } }); thread.start(); // synchronize starting with the waiting thread barrier.await(); final List<Integer> elements = IntStream.rangeClosed(0, seqNo).boxed().collect(Collectors.toList()); Randomness.shuffle(elements); for (int i = 0; i < elements.size() - 1; i++) { tracker.markSeqNoAsProcessed(elements.get(i)); assertFalse(complete.get()); } tracker.markSeqNoAsProcessed(elements.get(elements.size() - 1)); // synchronize with the waiting thread to mark that it is complete barrier.await(); assertTrue(complete.get()); thread.join(); }
Example #19
Source File: IcuCollationKeyAnalyzerTests.java From elasticsearch-plugin-bundle with GNU Affero General Public License v3.0 | 5 votes |
public void testThreadSafe() throws Exception { int iters = 20; for (int i = 0; i < iters; i++) { Locale locale = Locale.GERMAN; Collator collator = Collator.getInstance(locale); collator.setStrength(Collator.IDENTICAL); assertThreadSafe(Randomness.get(), new IcuCollationKeyAnalyzer(collator)); } }
Example #20
Source File: ReservoirSampler.java From crate with Apache License 2.0 | 5 votes |
public Samples getSamples(RelationName relationName, List<Reference> columns, int maxSamples) { TableInfo table; try { table = schemas.getTableInfo(relationName); } catch (RelationUnknown e) { return Samples.EMPTY; } if (!(table instanceof DocTableInfo)) { return Samples.EMPTY; } DocTableInfo docTable = (DocTableInfo) table; Random random = Randomness.get(); MetaData metaData = clusterService.state().metaData(); CoordinatorTxnCtx coordinatorTxnCtx = CoordinatorTxnCtx.systemTransactionContext(); List<Streamer> streamers = Arrays.asList(Symbols.streamerArray(columns)); List<Engine.Searcher> searchersToRelease = new ArrayList<>(); CircuitBreaker breaker = circuitBreakerService.getBreaker(HierarchyCircuitBreakerService.QUERY); RamAccounting ramAccounting = new BlockBasedRamAccounting( b -> breaker.addEstimateBytesAndMaybeBreak(b, "Reservoir-sampling"), MAX_BLOCK_SIZE_IN_BYTES); try { return getSamples( columns, maxSamples, docTable, random, metaData, coordinatorTxnCtx, streamers, searchersToRelease, ramAccounting ); } finally { ramAccounting.close(); for (Engine.Searcher searcher : searchersToRelease) { searcher.close(); } } }
Example #21
Source File: NodeJoinTests.java From crate with Apache License 2.0 | 5 votes |
private void setupFakeMasterServiceAndCoordinator(long term, ClusterState initialState) { deterministicTaskQueue = new DeterministicTaskQueue(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), random()); FakeThreadPoolMasterService fakeMasterService = new FakeThreadPoolMasterService("test_node","test", deterministicTaskQueue::scheduleNow); setupMasterServiceAndCoordinator(term, initialState, fakeMasterService, deterministicTaskQueue.getThreadPool(), Randomness.get()); fakeMasterService.setClusterStatePublisher((event, publishListener, ackListener) -> { coordinator.handlePublishRequest(new PublishRequest(event.state())); publishListener.onResponse(null); }); fakeMasterService.start(); }
Example #22
Source File: IndexRoutingTable.java From crate with Apache License 2.0 | 5 votes |
IndexRoutingTable(Index index, ImmutableOpenIntMap<IndexShardRoutingTable> shards) { this.index = index; this.shuffler = new RotationShardShuffler(Randomness.get().nextInt()); this.shards = shards; List<ShardRouting> allActiveShards = new ArrayList<>(); for (IntObjectCursor<IndexShardRoutingTable> cursor : shards) { for (ShardRouting shardRouting : cursor.value) { if (shardRouting.active()) { allActiveShards.add(shardRouting); } } } this.allActiveShards = Collections.unmodifiableList(allActiveShards); }
Example #23
Source File: IndexShardRoutingTable.java From crate with Apache License 2.0 | 4 votes |
IndexShardRoutingTable(ShardId shardId, List<ShardRouting> shards) { this.shardId = shardId; this.shuffler = new RotationShardShuffler(Randomness.get().nextInt()); this.shards = Collections.unmodifiableList(shards); ShardRouting primary = null; List<ShardRouting> replicas = new ArrayList<>(); List<ShardRouting> activeShards = new ArrayList<>(); List<ShardRouting> assignedShards = new ArrayList<>(); List<ShardRouting> allInitializingShards = new ArrayList<>(); Set<String> allAllocationIds = new HashSet<>(); boolean allShardsStarted = true; for (ShardRouting shard : shards) { if (shard.primary()) { primary = shard; } else { replicas.add(shard); } if (shard.active()) { activeShards.add(shard); } if (shard.initializing()) { allInitializingShards.add(shard); } if (shard.relocating()) { // create the target initializing shard routing on the node the shard is relocating to allInitializingShards.add(shard.getTargetRelocatingShard()); allAllocationIds.add(shard.getTargetRelocatingShard().allocationId().getId()); } if (shard.assignedToNode()) { assignedShards.add(shard); allAllocationIds.add(shard.allocationId().getId()); } if (shard.state() != ShardRoutingState.STARTED) { allShardsStarted = false; } } this.allShardsStarted = allShardsStarted; this.primary = primary; if (primary != null) { this.primaryAsList = Collections.singletonList(primary); } else { this.primaryAsList = Collections.emptyList(); } this.replicas = Collections.unmodifiableList(replicas); this.activeShards = Collections.unmodifiableList(activeShards); this.assignedShards = Collections.unmodifiableList(assignedShards); this.allInitializingShards = Collections.unmodifiableList(allInitializingShards); this.allAllocationIds = Collections.unmodifiableSet(allAllocationIds); }
Example #24
Source File: SQLExecutor.java From crate with Apache License 2.0 | 4 votes |
public static Builder builder(ClusterService clusterService) { return new Builder(clusterService, 1, Randomness.get(), List.of()); }
Example #25
Source File: DiscoveryModule.java From crate with Apache License 2.0 | 4 votes |
public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, MasterService masterService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, List<DiscoveryPlugin> plugins, AllocationService allocationService, Path configFile, GatewayMetaState gatewayMetaState) { final Collection<BiConsumer<DiscoveryNode, ClusterState>> joinValidators = new ArrayList<>(); final Map<String, Supplier<SeedHostsProvider>> hostProviders = new HashMap<>(); hostProviders.put("settings", () -> new SettingsBasedSeedHostsProvider(settings, transportService)); hostProviders.put("file", () -> new FileBasedSeedHostsProvider(configFile)); for (DiscoveryPlugin plugin : plugins) { plugin.getSeedHostProviders(transportService, networkService).forEach((key, value) -> { if (hostProviders.put(key, value) != null) { throw new IllegalArgumentException("Cannot register seed provider [" + key + "] twice"); } }); BiConsumer<DiscoveryNode, ClusterState> joinValidator = plugin.getJoinValidator(); if (joinValidator != null) { joinValidators.add(joinValidator); } } List<String> seedProviderNames = DISCOVERY_SEED_PROVIDERS_SETTING.get(settings); // for bwc purposes, add settings provider even if not explicitly specified if (seedProviderNames.contains("settings") == false) { List<String> extendedSeedProviderNames = new ArrayList<>(); extendedSeedProviderNames.add("settings"); extendedSeedProviderNames.addAll(seedProviderNames); seedProviderNames = extendedSeedProviderNames; } final Set<String> missingProviderNames = new HashSet<>(seedProviderNames); missingProviderNames.removeAll(hostProviders.keySet()); if (missingProviderNames.isEmpty() == false) { throw new IllegalArgumentException("Unknown seed providers " + missingProviderNames); } List<SeedHostsProvider> filteredSeedProviders = seedProviderNames.stream() .map(hostProviders::get).map(Supplier::get).collect(Collectors.toList()); String discoveryType = DISCOVERY_TYPE_SETTING.get(settings); final SeedHostsProvider seedHostsProvider = hostsResolver -> { final List<TransportAddress> addresses = new ArrayList<>(); for (SeedHostsProvider provider : filteredSeedProviders) { addresses.addAll(provider.getSeedAddresses(hostsResolver)); } return Collections.unmodifiableList(addresses); }; if (ZEN2_DISCOVERY_TYPE.equals(discoveryType) || SINGLE_NODE_DISCOVERY_TYPE.equals(discoveryType)) { discovery = new Coordinator(NODE_NAME_SETTING.get(settings), settings, clusterSettings, transportService, namedWriteableRegistry, allocationService, masterService, () -> gatewayMetaState.getPersistedState(settings, (ClusterApplierService) clusterApplier), seedHostsProvider, clusterApplier, joinValidators, new Random(Randomness.get().nextLong())); } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); } LOGGER.info("using discovery type [{}] and seed hosts providers {}", discoveryType, seedProviderNames); }
Example #26
Source File: NodeEnvironment.java From crate with Apache License 2.0 | 4 votes |
public static String generateNodeId(Settings settings) { Random random = Randomness.get(settings, NODE_ID_SEED_SETTING); return UUIDs.randomBase64UUID(random); }
Example #27
Source File: RoutingNodes.java From crate with Apache License 2.0 | 4 votes |
public void shuffle() { nodes.ensureMutable(); Randomness.shuffle(unassigned); }
Example #28
Source File: FetchSampleResponse.java From crate with Apache License 2.0 | 4 votes |
public static FetchSampleResponse merge(int maxSampleSize, FetchSampleResponse s1, FetchSampleResponse s2) { return new FetchSampleResponse(Samples.merge(maxSampleSize, s1.samples(), s2.samples(), Randomness.get())); }
Example #29
Source File: Session.java From crate with Apache License 2.0 | 4 votes |
/** * Execute a query in one step, avoiding the parse/bind/execute/sync procedure. * Opposed to using parse/bind/execute/sync this method is thread-safe. * * @param parse A function to parse the statement; This can be used to cache the parsed statement. * Use {@link #quickExec(String, ResultReceiver, Row)} to use the regular parser */ public void quickExec(String statement, Function<String, Statement> parse, ResultReceiver<?> resultReceiver, Row params) { CoordinatorTxnCtx txnCtx = new CoordinatorTxnCtx(sessionContext); Statement parsedStmt = parse.apply(statement); AnalyzedStatement analyzedStatement = analyzer.analyze(parsedStmt, sessionContext, ParamTypeHints.EMPTY); RoutingProvider routingProvider = new RoutingProvider(Randomness.get().nextInt(), planner.getAwarenessAttributes()); UUID jobId = UUID.randomUUID(); ClusterState clusterState = planner.currentClusterState(); PlannerContext plannerContext = new PlannerContext( clusterState, routingProvider, jobId, planner.functions(), txnCtx, 0, params ); Plan plan; try { plan = planner.plan(analyzedStatement, plannerContext); } catch (Throwable t) { jobsLogs.logPreExecutionFailure(jobId, statement, SQLExceptions.messageOf(t), sessionContext.user()); throw t; } StatementClassifier.Classification classification = StatementClassifier.classify(plan); jobsLogs.logExecutionStart(jobId, statement, sessionContext.user(), classification); JobsLogsUpdateListener jobsLogsUpdateListener = new JobsLogsUpdateListener(jobId, jobsLogs); if (!analyzedStatement.isWriteOperation()) { resultReceiver = new RetryOnFailureResultReceiver( executor.clusterService(), clusterState, // not using planner.currentClusterState().metaData()::hasIndex to make sure the *current* // clusterState at the time of the index check is used indexName -> clusterState.metaData().hasIndex(indexName), resultReceiver, jobId, (newJobId, retryResultReceiver) -> retryQuery( newJobId, analyzedStatement, routingProvider, new RowConsumerToResultReceiver(retryResultReceiver, 0, jobsLogsUpdateListener), params, txnCtx ) ); } RowConsumerToResultReceiver consumer = new RowConsumerToResultReceiver(resultReceiver, 0, jobsLogsUpdateListener); plan.execute(executor, plannerContext, consumer, params, SubQueryResults.EMPTY); }
Example #30
Source File: Session.java From crate with Apache License 2.0 | 4 votes |
@VisibleForTesting CompletableFuture<?> singleExec(Portal portal, ResultReceiver<?> resultReceiver, int maxRows) { var activeConsumer = portal.activeConsumer(); if (activeConsumer != null && activeConsumer.suspended()) { activeConsumer.replaceResultReceiver(resultReceiver, maxRows); activeConsumer.resume(); return resultReceiver.completionFuture(); } var jobId = UUID.randomUUID(); var routingProvider = new RoutingProvider(Randomness.get().nextInt(), planner.getAwarenessAttributes()); var clusterState = executor.clusterService().state(); var txnCtx = new CoordinatorTxnCtx(sessionContext); var params = new RowN(portal.params().toArray()); var plannerContext = new PlannerContext( clusterState, routingProvider, jobId, executor.functions(), txnCtx, maxRows, params); var analyzedStmt = portal.analyzedStatement(); String rawStatement = portal.preparedStmt().rawStatement(); if (analyzedStmt == null) { String errorMsg = "Statement must have been analyzed: " + rawStatement; jobsLogs.logPreExecutionFailure(jobId, rawStatement, errorMsg, sessionContext.user()); throw new IllegalStateException(errorMsg); } Plan plan; try { plan = planner.plan(analyzedStmt, plannerContext); } catch (Throwable t) { jobsLogs.logPreExecutionFailure(jobId, rawStatement, SQLExceptions.messageOf(t), sessionContext.user()); throw t; } if (!analyzedStmt.isWriteOperation()) { resultReceiver = new RetryOnFailureResultReceiver( executor.clusterService(), clusterState, indexName -> executor.clusterService().state().metaData().hasIndex(indexName), resultReceiver, jobId, (newJobId, resultRec) -> retryQuery( newJobId, analyzedStmt, routingProvider, new RowConsumerToResultReceiver( resultRec, maxRows, new JobsLogsUpdateListener(newJobId, jobsLogs)), params, txnCtx ) ); } jobsLogs.logExecutionStart( jobId, rawStatement, sessionContext.user(), StatementClassifier.classify(plan)); RowConsumerToResultReceiver consumer = new RowConsumerToResultReceiver( resultReceiver, maxRows, new JobsLogsUpdateListener(jobId, jobsLogs)); portal.setActiveConsumer(consumer); plan.execute(executor, plannerContext, consumer, params, SubQueryResults.EMPTY); return resultReceiver.completionFuture(); }