org.elasticsearch.common.StopWatch Java Examples
The following examples show how to use
org.elasticsearch.common.StopWatch.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DLBasedIndexRecoverySourceHandler.java From Elasticsearch with Apache License 2.0 | 6 votes |
protected void prepareTargetForTranslog() { StopWatch stopWatch = new StopWatch().start(); logger.trace("{} recovery [phase1] to {}: prepare remote engine for translog", request.shardId(), request.targetNode()); final long startEngineStart = stopWatch.totalTime().millis(); cancellableThreads.execute(new Interruptable() { @Override public void run() throws InterruptedException { // Send a request preparing the new shard's translog to receive // operations. This ensures the shard engine is started and disables // garbage collection (not the JVM's GC!) of tombstone deletes transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG, new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId(), 0), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } }); stopWatch.stop(); response.startTime = stopWatch.totalTime().millis() - startEngineStart; logger.trace("{} recovery [phase1] to {}: remote engine start took [{}]", request.shardId(), request.targetNode(), stopWatch.totalTime()); }
Example #2
Source File: BlobRecoverySourceHandler.java From Elasticsearch with Apache License 2.0 | 6 votes |
protected void prepareTargetForTranslog(final Translog.View translogView) { StopWatch stopWatch = new StopWatch().start(); logger.trace("{} recovery [phase1] to {}: prepare remote engine for translog", request.shardId(), request.targetNode()); final long startEngineStart = stopWatch.totalTime().millis(); cancellableThreads.execute(new Interruptable() { @Override public void run() throws InterruptedException { // Send a request preparing the new shard's translog to receive // operations. This ensures the shard engine is started and disables // garbage collection (not the JVM's GC!) of tombstone deletes transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG, new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId(), translogView.totalOperations()), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } }); stopWatch.stop(); response.startTime = stopWatch.totalTime().millis() - startEngineStart; logger.trace("{} recovery [phase1] to {}: remote engine start took [{}]", request.shardId(), request.targetNode(), stopWatch.totalTime()); }
Example #3
Source File: BlobRecoverySourceHandler.java From Elasticsearch with Apache License 2.0 | 6 votes |
/** * Perform phase2 of the recovery process * <p/> * Phase2 takes a snapshot of the current translog *without* acquiring the * write lock (however, the translog snapshot is a point-in-time view of * the translog). It then sends each translog operation to the target node * so it can be replayed into the new shard. */ public void phase2(Translog.Snapshot snapshot) { if (shard.state() == IndexShardState.CLOSED) { throw new IndexShardClosedException(request.shardId()); } cancellableThreads.checkForCancel(); StopWatch stopWatch = new StopWatch().start(); logger.trace("{} recovery [phase2] to {}: sending transaction log operations", request.shardId(), request.targetNode()); // Send all the snapshot's translog operations to the target int totalOperations = sendSnapshot(snapshot); stopWatch.stop(); logger.trace("{} recovery [phase2] to {}: took [{}]", request.shardId(), request.targetNode(), stopWatch.totalTime()); response.phase2Time = stopWatch.totalTime().millis(); response.phase2Operations = totalOperations; }
Example #4
Source File: RecoverySourceHandler.java From crate with Apache License 2.0 | 6 votes |
void prepareTargetForTranslog(boolean fileBasedRecovery, int totalTranslogOps, ActionListener<TimeValue> listener) { StopWatch stopWatch = new StopWatch().start(); final ActionListener<Void> wrappedListener = ActionListener.wrap( nullVal -> { stopWatch.stop(); final TimeValue tookTime = stopWatch.totalTime(); logger.trace("recovery [phase1]: remote engine start took [{}]", tookTime); listener.onResponse(tookTime); }, e -> listener.onFailure(new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e))); // Send a request preparing the new shard's translog to receive operations. This ensures the shard engine is started and disables // garbage collection (not the JVM's GC!) of tombstone deletes. logger.trace("recovery [phase1]: prepare remote engine for translog"); cancellableThreads.execute( () -> recoveryTarget.prepareForTranslogOperations( fileBasedRecovery, totalTranslogOps, wrappedListener) ); }
Example #5
Source File: RecoverySourceHandler.java From Elasticsearch with Apache License 2.0 | 6 votes |
protected void prepareTargetForTranslog(final Translog.View translogView) { StopWatch stopWatch = new StopWatch().start(); logger.trace("{} recovery [phase1] to {}: prepare remote engine for translog", request.shardId(), request.targetNode()); final long startEngineStart = stopWatch.totalTime().millis(); cancellableThreads.execute(new Interruptable() { @Override public void run() throws InterruptedException { // Send a request preparing the new shard's translog to receive // operations. This ensures the shard engine is started and disables // garbage collection (not the JVM's GC!) of tombstone deletes transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.PREPARE_TRANSLOG, new RecoveryPrepareForTranslogOperationsRequest(request.recoveryId(), request.shardId(), translogView.totalOperations()), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } }); stopWatch.stop(); response.startTime = stopWatch.totalTime().millis() - startEngineStart; logger.trace("{} recovery [phase1] to {}: remote engine start took [{}]", request.shardId(), request.targetNode(), stopWatch.totalTime()); }
Example #6
Source File: RecoverySourceHandler.java From Elasticsearch with Apache License 2.0 | 6 votes |
/** * Perform phase2 of the recovery process * <p> * Phase2 takes a snapshot of the current translog *without* acquiring the * write lock (however, the translog snapshot is a point-in-time view of * the translog). It then sends each translog operation to the target node * so it can be replayed into the new shard. */ public void phase2(Translog.Snapshot snapshot) { if (shard.state() == IndexShardState.CLOSED) { throw new IndexShardClosedException(request.shardId()); } cancellableThreads.checkForCancel(); StopWatch stopWatch = new StopWatch().start(); logger.trace("{} recovery [phase2] to {}: sending transaction log operations", request.shardId(), request.targetNode()); // Send all the snapshot's translog operations to the target int totalOperations = sendSnapshot(snapshot); stopWatch.stop(); logger.trace("{} recovery [phase2] to {}: took [{}]", request.shardId(), request.targetNode(), stopWatch.totalTime()); response.phase2Time = stopWatch.totalTime().millis(); response.phase2Operations = totalOperations; }
Example #7
Source File: OpenNlpService.java From elasticsearch-ingest-opennlp with Apache License 2.0 | 6 votes |
protected OpenNlpService start() { StopWatch sw = new StopWatch("models-loading"); Map<String, String> settingsMap = IngestOpenNlpPlugin.MODEL_FILE_SETTINGS.getAsMap(settings); for (Map.Entry<String, String> entry : settingsMap.entrySet()) { String name = entry.getKey(); sw.start(name); Path path = configDirectory.resolve(entry.getValue()); try (InputStream is = Files.newInputStream(path)) { nameFinderModels.put(name, new TokenNameFinderModel(is)); } catch (IOException e) { logger.error((Supplier<?>) () -> new ParameterizedMessage("Could not load model [{}] with path [{}]", name, path), e); } sw.stop(); } if (settingsMap.keySet().size() == 0) { logger.error("Did not load any models for ingest-opennlp plugin, none configured"); } else { logger.info("Read models in [{}] for {}", sw.totalTime(), settingsMap.keySet()); } return this; }
Example #8
Source File: RecoverySourceHandler.java From crate with Apache License 2.0 | 5 votes |
void finalizeRecovery(final long targetLocalCheckpoint, final ActionListener<Void> listener) throws IOException { if (shard.state() == IndexShardState.CLOSED) { throw new IndexShardClosedException(request.shardId()); } cancellableThreads.checkForCancel(); final StopWatch stopWatch = new StopWatch().start(); logger.trace("finalizing recovery"); /* * Before marking the shard as in-sync we acquire an operation permit. We do this so that there is a barrier between marking a * shard as in-sync and relocating a shard. If we acquire the permit then no relocation handoff can complete before we are done * marking the shard as in-sync. If the relocation handoff holds all the permits then after the handoff completes and we acquire * the permit then the state of the shard will be relocated and this recovery will fail. */ runUnderPrimaryPermit(() -> shard.markAllocationIdAsInSync(request.targetAllocationId(), targetLocalCheckpoint), shardId + " marking " + request.targetAllocationId() + " as in sync", shard, cancellableThreads, logger); final long globalCheckpoint = shard.getLastKnownGlobalCheckpoint(); // this global checkpoint is persisted in finalizeRecovery final StepListener<Void> finalizeListener = new StepListener<>(); cancellableThreads.executeIO(() -> recoveryTarget.finalizeRecovery(globalCheckpoint, finalizeListener)); finalizeListener.whenComplete(r -> { runUnderPrimaryPermit(() -> shard.updateGlobalCheckpointForShard(request.targetAllocationId(), globalCheckpoint), shardId + " updating " + request.targetAllocationId() + "'s global checkpoint", shard, cancellableThreads, logger); if (request.isPrimaryRelocation()) { logger.trace("performing relocation hand-off"); // TODO: make relocated async // this acquires all IndexShard operation permits and will thus delay new recoveries until it is done cancellableThreads.execute(() -> shard.relocated(recoveryTarget::handoffPrimaryContext)); /* * if the recovery process fails after disabling primary mode on the source shard, both relocation source and * target are failed (see {@link IndexShard#updateRoutingEntry}). */ } stopWatch.stop(); logger.trace("finalizing recovery took [{}]", stopWatch.totalTime()); listener.onResponse(null); }, listener::onFailure); }
Example #9
Source File: DLBasedIndexRecoverySourceHandler.java From Elasticsearch with Apache License 2.0 | 5 votes |
public void finalizeRecovery() { if (shard.state() == IndexShardState.CLOSED) { throw new IndexShardClosedException(request.shardId()); } cancellableThreads.checkForCancel(); StopWatch stopWatch = new StopWatch().start(); logger.trace("[{}][{}] finalizing recovery to {}", indexName, shardId, request.targetNode()); cancellableThreads.execute(new Interruptable() { @Override public void run() throws InterruptedException { // Send the FINALIZE request to the target node. The finalize request // clears unreferenced translog files, refreshes the engine now that // new segments are available, and enables garbage collection of // tombstone files. The shard is also moved to the POST_RECOVERY phase // during this time transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE, new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } }); if (request.markAsRelocated()) { // TODO what happens if the recovery process fails afterwards, we need to mark this back to started try { shard.relocated("to " + request.targetNode()); } catch (IllegalIndexShardStateException e) { // we can ignore this exception since, on the other node, when it moved to phase3 // it will also send shard started, which might cause the index shard we work against // to move be closed by the time we get to the the relocated method } } stopWatch.stop(); logger.trace("[{}][{}] finalizing recovery to {}: took [{}]", indexName, shardId, request.targetNode(), stopWatch.totalTime()); }
Example #10
Source File: BlobRecoverySourceHandler.java From Elasticsearch with Apache License 2.0 | 5 votes |
/** * finalizes the recovery process */ public void finalizeRecovery() { if (shard.state() == IndexShardState.CLOSED) { throw new IndexShardClosedException(request.shardId()); } cancellableThreads.checkForCancel(); StopWatch stopWatch = new StopWatch().start(); logger.trace("[{}][{}] finalizing recovery to {}", indexName, shardId, request.targetNode()); cancellableThreads.execute(new Interruptable() { @Override public void run() throws InterruptedException { // Send the FINALIZE request to the target node. The finalize request // clears unreferenced translog files, refreshes the engine now that // new segments are available, and enables garbage collection of // tombstone files. The shard is also moved to the POST_RECOVERY phase // during this time transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE, new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } }); if (request.markAsRelocated()) { // TODO what happens if the recovery process fails afterwards, we need to mark this back to started try { shard.relocated("to " + request.targetNode()); } catch (IllegalIndexShardStateException e) { // we can ignore this exception since, on the other node, when it moved to phase3 // it will also send shard started, which might cause the index shard we work against // to move be closed by the time we get to the the relocated method } } stopWatch.stop(); logger.trace("[{}][{}] finalizing recovery to {}: took [{}]", indexName, shardId, request.targetNode(), stopWatch.totalTime()); }
Example #11
Source File: JobCollectContext.java From Elasticsearch with Apache License 2.0 | 5 votes |
private void measureCollectTime() { final StopWatch stopWatch = new StopWatch(collectPhase.executionPhaseId() + ": " + collectPhase.name()); stopWatch.start("starting collectors"); listenableRowReceiver.finishFuture().addListener(new Runnable() { @Override public void run() { stopWatch.stop(); logger.trace("Collectors finished: {}", stopWatch.shortSummary()); } }, MoreExecutors.directExecutor()); }
Example #12
Source File: RecoverySourceHandler.java From Elasticsearch with Apache License 2.0 | 5 votes |
/** * finalizes the recovery process */ public void finalizeRecovery() { if (shard.state() == IndexShardState.CLOSED) { throw new IndexShardClosedException(request.shardId()); } cancellableThreads.checkForCancel(); StopWatch stopWatch = new StopWatch().start(); logger.trace("[{}][{}] finalizing recovery to {}", indexName, shardId, request.targetNode()); cancellableThreads.execute(new Interruptable() { @Override public void run() throws InterruptedException { // Send the FINALIZE request to the target node. The finalize request // clears unreferenced translog files, refreshes the engine now that // new segments are available, and enables garbage collection of // tombstone files. The shard is also moved to the POST_RECOVERY phase // during this time transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.FINALIZE, new RecoveryFinalizeRecoveryRequest(request.recoveryId(), request.shardId()), TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionLongTimeout()).build(), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(); } }); if (request.markAsRelocated()) { // TODO what happens if the recovery process fails afterwards, we need to mark this back to started try { shard.relocated("to " + request.targetNode()); } catch (IllegalIndexShardStateException e) { // we can ignore this exception since, on the other node, when it moved to phase3 // it will also send shard started, which might cause the index shard we work against // to move be closed by the time we get to the the relocated method } } stopWatch.stop(); logger.trace("[{}][{}] finalizing recovery to {}: took [{}]", indexName, shardId, request.targetNode(), stopWatch.totalTime()); }
Example #13
Source File: BlobRecoveryHandler.java From Elasticsearch with Apache License 2.0 | 4 votes |
public void phase1() throws Exception { logger.debug("[{}][{}] recovery [phase1] to {}: start", request.shardId().index().name(), request.shardId().id(), request.targetNode().getName()); StopWatch stopWatch = new StopWatch().start(); blobTransferTarget.startRecovery(); blobTransferTarget.createActiveTransfersSnapshot(); sendStartRecoveryRequest(); final AtomicReference<Exception> lastException = new AtomicReference<Exception>(); try { syncVarFiles(lastException); } catch (InterruptedException ex) { throw new ElasticsearchException("blob recovery phase1 failed", ex); } Exception exception = lastException.get(); if (exception != null) { throw exception; } /** * as soon as the recovery starts the target node will receive PutChunkReplicaRequests * the target node will then request the bytes it is missing from the source node * (it is missing bytes from PutChunk/StartBlob requests that happened before the recovery) * here we need to block so that the target node has enough time to request the head chunks * * e.g. * Target Node receives Chunk X with bytes 10-19 * Target Node requests bytes 0-9 from Source Node * Source Node sends bytes 0-9 * Source Node sets transferTakenOver */ blobTransferTarget.waitForGetHeadRequests(GET_HEAD_TIMEOUT, TimeUnit.SECONDS); blobTransferTarget.createActivePutHeadChunkTransfersSnapshot(); /** * After receiving a getHeadRequest the source node starts to send HeadChunks to the target * wait for all PutHeadChunk-Runnables to finish before ending the recovery. */ blobTransferTarget.waitUntilPutHeadChunksAreFinished(); sendFinalizeRecoveryRequest(); blobTransferTarget.stopRecovery(); stopWatch.stop(); logger.debug("[{}][{}] recovery [phase1] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode().getName(), stopWatch.totalTime()); }
Example #14
Source File: BlobRecoveryHandler.java From crate with Apache License 2.0 | 4 votes |
@Override protected void blobRecoveryHook() throws Exception { LOGGER.debug("[{}][{}] recovery [phase1] to {}: start", request.shardId().getIndexName(), request.shardId().id(), request.targetNode().getName()); final StopWatch stopWatch = new StopWatch().start(); blobTransferTarget.startRecovery(); blobTransferTarget.createActiveTransfersSnapshot(); sendStartRecoveryRequest(); final AtomicReference<Exception> lastException = new AtomicReference<>(); try { syncVarFiles(lastException); } catch (InterruptedException ex) { throw new ElasticsearchException("blob recovery phase1 failed", ex); } Exception exception = lastException.get(); if (exception != null) { throw exception; } /* as soon as the recovery starts the target node will receive PutChunkReplicaRequests the target node will then request the bytes it is missing from the source node (it is missing bytes from PutChunk/StartBlob requests that happened before the recovery) here we need to block so that the target node has enough time to request the head chunks e.g. Target Node receives Chunk X with bytes 10-19 Target Node requests bytes 0-9 from Source Node Source Node sends bytes 0-9 Source Node sets transferTakenOver */ blobTransferTarget.waitForGetHeadRequests(GET_HEAD_TIMEOUT, TimeUnit.SECONDS); blobTransferTarget.createActivePutHeadChunkTransfersSnapshot(); /* After receiving a getHeadRequest the source node starts to send HeadChunks to the target wait for all PutHeadChunk-Runnables to finish before ending the recovery. */ blobTransferTarget.waitUntilPutHeadChunksAreFinished(); sendFinalizeRecoveryRequest(); blobTransferTarget.stopRecovery(); stopWatch.stop(); LOGGER.debug("[{}][{}] recovery [phase1] to {}: took [{}]", request.shardId().getIndexName(), request.shardId().id(), request.targetNode().getName(), stopWatch.totalTime()); }
Example #15
Source File: Node.java From crate with Apache License 2.0 | 4 votes |
@Override public synchronized void close() throws IOException { if (lifecycle.started()) { stop(); } if (!lifecycle.moveToClosed()) { return; } logger.info("closing ..."); List<Closeable> toClose = new ArrayList<>(); StopWatch stopWatch = new StopWatch("node_close"); toClose.add(() -> stopWatch.start("node_service")); toClose.add(nodeService); toClose.add(() -> stopWatch.stop().start("http")); toClose.add(injector.getInstance(HttpServerTransport.class)); toClose.add(() -> stopWatch.stop().start("snapshot_service")); toClose.add(injector.getInstance(SnapshotsService.class)); toClose.add(injector.getInstance(SnapshotShardsService.class)); toClose.add(() -> stopWatch.stop().start("client")); Releasables.close(injector.getInstance(Client.class)); toClose.add(() -> stopWatch.stop().start("indices_cluster")); toClose.add(injector.getInstance(IndicesClusterStateService.class)); toClose.add(() -> stopWatch.stop().start("indices")); toClose.add(injector.getInstance(IndicesService.class)); // close filter/fielddata caches after indices toClose.add(injector.getInstance(IndicesStore.class)); toClose.add(() -> stopWatch.stop().start("routing")); toClose.add(injector.getInstance(RoutingService.class)); toClose.add(() -> stopWatch.stop().start("cluster")); toClose.add(injector.getInstance(ClusterService.class)); toClose.add(() -> stopWatch.stop().start("node_connections_service")); toClose.add(injector.getInstance(NodeConnectionsService.class)); toClose.add(() -> stopWatch.stop().start("discovery")); toClose.add(injector.getInstance(Discovery.class)); toClose.add(() -> stopWatch.stop().start("monitor")); toClose.add(nodeService.getMonitorService()); toClose.add(() -> stopWatch.stop().start("gateway")); toClose.add(injector.getInstance(GatewayService.class)); toClose.add(() -> stopWatch.stop().start("transport")); toClose.add(injector.getInstance(TransportService.class)); for (LifecycleComponent plugin : pluginLifecycleComponents) { toClose.add(() -> stopWatch.stop().start("plugin(" + plugin.getClass().getName() + ")")); toClose.add(plugin); } toClose.addAll(pluginsService.filterPlugins(Plugin.class)); toClose.add(() -> stopWatch.stop().start("thread_pool")); // TODO this should really use ThreadPool.terminate() toClose.add(() -> injector.getInstance(ThreadPool.class).shutdown()); toClose.add(() -> { try { injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS); } catch (InterruptedException e) { // ignore } }); toClose.add(() -> stopWatch.stop().start("thread_pool_force_shutdown")); toClose.add(() -> injector.getInstance(ThreadPool.class).shutdownNow()); toClose.add(() -> stopWatch.stop()); toClose.add(injector.getInstance(NodeEnvironment.class)); toClose.add(injector.getInstance(PageCacheRecycler.class)); if (logger.isTraceEnabled()) { logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint()); } IOUtils.close(toClose); logger.info("closed"); }
Example #16
Source File: TermsByQueryBenchmark.java From siren-join with GNU Affero General Public License v3.0 | 4 votes |
public void setupIndex() { log("==== INDEX SETUP ===="); try { client.admin().indices().create(createIndexRequest(PARENT_INDEX)).actionGet(); client.admin().indices().create(createIndexRequest(CHILD_INDEX)).actionGet(); Thread.sleep(5000); StopWatch stopWatch = new StopWatch().start(); log("Indexing [" + NUM_PARENTS + "] parent documents into [" + PARENT_INDEX + "]"); log("Indexing [" + (NUM_PARENTS * NUM_CHILDREN_PER_PARENT) + "] child documents into [" + CHILD_INDEX + "]"); int ITERS = NUM_PARENTS / BATCH_SIZE; int i = 1; int counter = 0; for (; i <= ITERS; i++) { BulkRequestBuilder request = client.prepareBulk(); for (int j = 0; j < BATCH_SIZE; j++) { String parentId = Integer.toString(counter); counter++; request.add(Requests.indexRequest(PARENT_INDEX) .type(PARENT_TYPE) .id(parentId) .source(parentSource(counter, "test" + counter))); for (int k = 0; k < NUM_CHILDREN_PER_PARENT; k++) { String childId = parentId + "_" + k; request.add(Requests.indexRequest(CHILD_INDEX) .type(CHILD_TYPE) .id(childId) .source(childSource(childId, counter, "tag" + k))); } } BulkResponse response = request.execute().actionGet(); if (response.hasFailures()) { log("Index Failures..."); } if (((i * BATCH_SIZE) % 10000) == 0) { log("Indexed [" + (i * BATCH_SIZE) * (1 + NUM_CHILDREN_PER_PARENT) + "] took [" + stopWatch.stop().lastTaskTime() + "]"); stopWatch.start(); } } log("Indexing took [" + stopWatch.totalTime() + "]"); log("TPS [" + (((double) (NUM_PARENTS * (1 + NUM_CHILDREN_PER_PARENT))) / stopWatch.totalTime().secondsFrac()) + "]"); } catch (Exception e) { log("Indices exist, wait for green"); waitForGreen(); } client.admin().indices().prepareRefresh().execute().actionGet(); log("Number of docs in index: " + client.prepareCount(PARENT_INDEX, CHILD_INDEX).setQuery(matchAllQuery()).execute().actionGet().getCount()); log(""); }
Example #17
Source File: FilterJoinBenchmark.java From siren-join with GNU Affero General Public License v3.0 | 4 votes |
public void setupIndex() { log("==== INDEX SETUP ===="); try { client.admin().indices().create(createIndexRequest(PARENT_INDEX).mapping(PARENT_TYPE, "id", "type=string,index=not_analyzed,doc_values=true", "num", "type=integer,doc_values=true")).actionGet(); client.admin().indices().create(createIndexRequest(CHILD_INDEX).mapping(CHILD_TYPE, "id", "type=string,index=not_analyzed,doc_values=true", "pid", "type=string,index=not_analyzed,doc_values=true", "num", "type=integer,doc_values=true")).actionGet(); Thread.sleep(5000); StopWatch stopWatch = new StopWatch().start(); log("Indexing [" + NUM_PARENTS + "] parent documents into [" + PARENT_INDEX + "]"); log("Indexing [" + (NUM_PARENTS * NUM_CHILDREN_PER_PARENT) + "] child documents into [" + CHILD_INDEX + "]"); int ITERS = NUM_PARENTS / BATCH_SIZE; int i = 1; int counter = 0; for (; i <= ITERS; i++) { BulkRequestBuilder request = client.prepareBulk(); for (int j = 0; j < BATCH_SIZE; j++) { String parentId = Integer.toString(counter); counter++; request.add(Requests.indexRequest(PARENT_INDEX) .type(PARENT_TYPE) .id(parentId) .source(parentSource(counter, "test" + counter))); for (int k = 0; k < NUM_CHILDREN_PER_PARENT; k++) { String childId = parentId + "_" + k; request.add(Requests.indexRequest(CHILD_INDEX) .type(CHILD_TYPE) .id(childId) .source(childSource(childId, counter, "tag" + k))); } } BulkResponse response = request.execute().actionGet(); if (response.hasFailures()) { log("Index Failures..."); } if (((i * BATCH_SIZE) % 10000) == 0) { log("Indexed [" + (i * BATCH_SIZE) * (1 + NUM_CHILDREN_PER_PARENT) + "] took [" + stopWatch.stop().lastTaskTime() + "]"); stopWatch.start(); } } log("Indexing took [" + stopWatch.totalTime() + "]"); log("TPS [" + (((double) (NUM_PARENTS * (1 + NUM_CHILDREN_PER_PARENT))) / stopWatch.totalTime().secondsFrac()) + "]"); } catch (Exception e) { log("Indices exist, wait for green"); waitForGreen(); } client.admin().indices().prepareRefresh().execute().actionGet(); log("Number of docs in index: " + client.prepareCount(PARENT_INDEX, CHILD_INDEX).setQuery(matchAllQuery()).execute().actionGet().getCount()); log(""); }
Example #18
Source File: FlsPerfTest.java From deprecated-security-advanced-modules with Apache License 2.0 | 4 votes |
@Test public void testFlsPerfNamed() throws Exception { setup(); HttpResponse res; StopWatch sw = new StopWatch("testFlsPerfNamed"); sw.start("non fls"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty", encodeBasicHeader("admin", "admin"))).getStatusCode()); sw.stop(); Assert.assertTrue(res.getBody().contains("field1\"")); Assert.assertTrue(res.getBody().contains("field2\"")); Assert.assertTrue(res.getBody().contains("field50\"")); Assert.assertTrue(res.getBody().contains("field997\"")); sw.start("with fls"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty&size=1000", encodeBasicHeader("perf_named_only", "password"))).getStatusCode()); sw.stop(); Assert.assertFalse(res.getBody().contains("field1\"")); Assert.assertFalse(res.getBody().contains("field2\"")); Assert.assertTrue(res.getBody().contains("field50\"")); Assert.assertTrue(res.getBody().contains("field997\"")); sw.start("with fls 2 after warmup"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty&size=1000", encodeBasicHeader("perf_named_only", "password"))).getStatusCode()); sw.stop(); Assert.assertFalse(res.getBody().contains("field1\"")); Assert.assertFalse(res.getBody().contains("field2\"")); Assert.assertTrue(res.getBody().contains("field50\"")); Assert.assertTrue(res.getBody().contains("field997\"")); sw.start("with fls 3 after warmup"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty&size=1000", encodeBasicHeader("perf_named_only", "password"))).getStatusCode()); sw.stop(); Assert.assertFalse(res.getBody().contains("field1\"")); Assert.assertFalse(res.getBody().contains("field2\"")); Assert.assertTrue(res.getBody().contains("field50\"")); Assert.assertTrue(res.getBody().contains("field997\"")); System.out.println(sw.prettyPrint()); }
Example #19
Source File: FlsPerfTest.java From deprecated-security-advanced-modules with Apache License 2.0 | 4 votes |
@Test public void testFlsWcIn() throws Exception { setup(); HttpResponse res; StopWatch sw = new StopWatch("testFlsWcIn"); sw.start("non fls"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty", encodeBasicHeader("admin", "admin"))).getStatusCode()); sw.stop(); Assert.assertTrue(res.getBody().contains("field1\"")); Assert.assertTrue(res.getBody().contains("field2\"")); Assert.assertTrue(res.getBody().contains("field50\"")); Assert.assertTrue(res.getBody().contains("field997\"")); sw.start("with fls"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty&size=1000", encodeBasicHeader("perf_wc_in", "password"))).getStatusCode()); sw.stop(); Assert.assertFalse(res.getBody().contains("field0\"")); Assert.assertTrue(res.getBody().contains("field50\"")); Assert.assertTrue(res.getBody().contains("field997\"")); sw.start("with fls 2 after warmup"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty&size=1000", encodeBasicHeader("perf_wc_in", "password"))).getStatusCode()); sw.stop(); Assert.assertFalse(res.getBody().contains("field0\"")); Assert.assertTrue(res.getBody().contains("field50\"")); Assert.assertTrue(res.getBody().contains("field997\"")); sw.start("with fls 3 after warmup"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty&size=1000", encodeBasicHeader("perf_wc_in", "password"))).getStatusCode()); sw.stop(); Assert.assertFalse(res.getBody().contains("field0\"")); Assert.assertTrue(res.getBody().contains("field50\"")); Assert.assertTrue(res.getBody().contains("field997\"")); System.out.println(sw.prettyPrint()); }
Example #20
Source File: FlsPerfTest.java From deprecated-security-advanced-modules with Apache License 2.0 | 4 votes |
@Test public void testFlsPerfNamedEx() throws Exception { setup(); HttpResponse res; StopWatch sw = new StopWatch("testFlsPerfNamedEx"); sw.start("non fls"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty", encodeBasicHeader("admin", "admin"))).getStatusCode()); sw.stop(); Assert.assertTrue(res.getBody().contains("field1\"")); Assert.assertTrue(res.getBody().contains("field2\"")); Assert.assertTrue(res.getBody().contains("field50\"")); Assert.assertTrue(res.getBody().contains("field997\"")); sw.start("with fls"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty&size=1000", encodeBasicHeader("perf_named_ex", "password"))).getStatusCode()); sw.stop(); Assert.assertTrue(res.getBody().contains("field1\"")); Assert.assertTrue(res.getBody().contains("field2\"")); Assert.assertFalse(res.getBody().contains("field50\"")); Assert.assertFalse(res.getBody().contains("field997\"")); sw.start("with fls 2 after warmup"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty&size=1000", encodeBasicHeader("perf_named_ex", "password"))).getStatusCode()); sw.stop(); Assert.assertTrue(res.getBody().contains("field1\"")); Assert.assertTrue(res.getBody().contains("field2\"")); Assert.assertFalse(res.getBody().contains("field50\"")); Assert.assertFalse(res.getBody().contains("field997\"")); sw.start("with fls 3 after warmup"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty&size=1000", encodeBasicHeader("perf_named_ex", "password"))).getStatusCode()); sw.stop(); Assert.assertTrue(res.getBody().contains("field1\"")); Assert.assertTrue(res.getBody().contains("field2\"")); Assert.assertFalse(res.getBody().contains("field50\"")); Assert.assertFalse(res.getBody().contains("field997\"")); System.out.println(sw.prettyPrint()); }
Example #21
Source File: FlsPerfTest.java From deprecated-security-advanced-modules with Apache License 2.0 | 4 votes |
@Test public void testFlsPerfWcEx() throws Exception { setup(); HttpResponse res; StopWatch sw = new StopWatch("testFlsPerfWcEx"); sw.start("non fls"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty", encodeBasicHeader("admin", "admin"))).getStatusCode()); sw.stop(); Assert.assertTrue(res.getBody().contains("field1\"")); Assert.assertTrue(res.getBody().contains("field2\"")); Assert.assertTrue(res.getBody().contains("field50\"")); Assert.assertTrue(res.getBody().contains("field997\"")); sw.start("with fls"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty&size=1000", encodeBasicHeader("perf_wc_ex", "password"))).getStatusCode()); sw.stop(); Assert.assertTrue(res.getBody().contains("field1\"")); Assert.assertTrue(res.getBody().contains("field2\"")); Assert.assertFalse(res.getBody().contains("field50\"")); Assert.assertFalse(res.getBody().contains("field997\"")); sw.start("with fls 2 after warmup"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty&size=1000", encodeBasicHeader("perf_wc_ex", "password"))).getStatusCode()); sw.stop(); Assert.assertTrue(res.getBody().contains("field1\"")); Assert.assertTrue(res.getBody().contains("field2\"")); Assert.assertFalse(res.getBody().contains("field50\"")); Assert.assertFalse(res.getBody().contains("field997\"")); sw.start("with fls 3 after warmup"); Assert.assertEquals(HttpStatus.SC_OK, (res = rh.executeGetRequest("/deals/_search?pretty&size=1000", encodeBasicHeader("perf_wc_ex", "password"))).getStatusCode()); sw.stop(); Assert.assertTrue(res.getBody().contains("field1\"")); Assert.assertTrue(res.getBody().contains("field2\"")); Assert.assertFalse(res.getBody().contains("field50\"")); Assert.assertFalse(res.getBody().contains("field997\"")); System.out.println(sw.prettyPrint()); }