Java Code Examples for org.elasticsearch.action.bulk.BulkRequestBuilder#numberOfActions()
The following examples show how to use
org.elasticsearch.action.bulk.BulkRequestBuilder#numberOfActions() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CassandraRiver.java From cassandra-river with Apache License 2.0 | 6 votes |
@Override public void run() { logger.info("Starting thread with {} keys", this.keys.rowColumnMap.size()); if (closed) { return; } BulkRequestBuilder bulk = client.prepareBulk(); for(String key : this.keys.rowColumnMap.keySet()){ try { String id = UUID.nameUUIDFromBytes(key.getBytes()).toString(); bulk.add(indexRequest(this.indexName).type(this.typeName) .id(id) .source(this.keys.rowColumnMap.get(key))); } catch (Exception e) { logger.error("failed to entry to bulk indexing"); } if(bulk.numberOfActions() >= this.batchSize){ saveToEs(bulk); bulk = client.prepareBulk(); } } }
Example 2
Source File: ESIndexState.java From storm-trident-elasticsearch with Apache License 2.0 | 6 votes |
public void bulkUpdateIndices(List<TridentTuple> inputs, TridentTupleMapper<Document<T>> mapper, BulkResponseHandler handler) { BulkRequestBuilder bulkRequest = client.prepareBulk(); for (TridentTuple input : inputs) { Document<T> doc = mapper.map(input); byte[] source = serializeSourceOrFail(doc); IndexRequestBuilder request = client.prepareIndex(doc.getName(), doc.getType(), doc.getId()).setSource(source); if(doc.getParentId() != null) { request.setParent(doc.getParentId()); } bulkRequest.add(request); } if( bulkRequest.numberOfActions() > 0) { try { handler.handle(bulkRequest.execute().actionGet()); } catch(ElasticsearchException e) { LOGGER.error("error while executing bulk request to elasticsearch"); throw new FailedException("Failed to store data into elasticsearch", e); } } }
Example 3
Source File: ESTest.java From canal-1.1.3 with Apache License 2.0 | 6 votes |
private void commit(BulkRequestBuilder bulkRequestBuilder) { if (bulkRequestBuilder.numberOfActions() > 0) { BulkResponse response = bulkRequestBuilder.execute().actionGet(); if (response.hasFailures()) { for (BulkItemResponse itemResponse : response.getItems()) { if (!itemResponse.isFailed()) { continue; } if (itemResponse.getFailure().getStatus() == RestStatus.NOT_FOUND) { System.out.println(itemResponse.getFailureMessage()); } else { System.out.println("ES bulk commit error" + itemResponse.getFailureMessage()); } } } } }
Example 4
Source File: ESUpdateState.java From sql4es with Apache License 2.0 | 6 votes |
/** * Executes the list with requests as a bulk with maximum number of requests per bulk * @param requests * @param maxRequestsPerBulk * @return * @throws SQLException */ private int execute(List<?> requests, int maxRequestsPerBulk) throws SQLException{ int result = 0; BulkRequestBuilder bulkReq = client.prepareBulk(); for(Object req : requests){ if(req instanceof IndexRequest) bulkReq.add((IndexRequest)req); else if(req instanceof UpdateRequest) bulkReq.add((UpdateRequest)req); else if(req instanceof DeleteRequest) bulkReq.add((DeleteRequest)req); else if(req instanceof IndexRequestBuilder) bulkReq.add((IndexRequestBuilder)req); else if(req instanceof UpdateRequestBuilder) bulkReq.add((UpdateRequestBuilder)req); else if(req instanceof DeleteRequestBuilder) bulkReq.add((DeleteRequestBuilder)req); else throw new SQLException("Type "+req.getClass()+" cannot be added to a bulk request"); if(bulkReq.numberOfActions() > maxRequestsPerBulk){ result += bulkReq.get().getItems().length; bulkReq = client.prepareBulk(); } } if(bulkReq.numberOfActions() > 0){ result += bulkReq.get().getItems().length; } return result; }
Example 5
Source File: KafkaRiver.java From elasticsearch-river-kafka with Apache License 2.0 | 6 votes |
void executeBuilder(BulkRequestBuilder bulkRequestBuilder) { if(bulkRequestBuilder.numberOfActions() == 0) return; ++stats.flushes; BulkResponse response = bulkRequestBuilder.execute().actionGet(); if (response.hasFailures()) { logger.warn("failed to execute" + response.buildFailureMessage()); } for(BulkItemResponse resp : response){ if(resp.isFailed()){ stats.failed++; }else{ stats.succeeded++; } } }
Example 6
Source File: IndexingComponent.java From elasticsearch-reindex-tool with Apache License 2.0 | 5 votes |
private Optional<BulkResult> executeBulk(int indexedCount, BulkRequestBuilder bulkRequest) { if (bulkRequest.numberOfActions() > 0) { BulkResponse bulkItemResponses = bulkRequest.execute().actionGet(); Set<String> failedIds = Stream.of(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) .map(BulkItemResponse::getId) .collect(Collectors.toSet()); return Optional.of(new BulkResult(indexedCount, failedIds)); } return Optional.empty(); }
Example 7
Source File: SearchUpdateJobImpl.java From stash-codesearch-plugin with Apache License 2.0 | 5 votes |
@Override public void doReindex(Client client, GitScm gitScm, GlobalSettings globalSettings) { if (!globalSettings.getIndexingEnabled()) { return; } deleteLatestIndexedNote(client); while (true) { try { SearchRequestBuilder esReq = client.prepareSearch(ES_UPDATEALIAS) .setSize(1000) .setFetchSource(false) .setRouting(getRepoDesc()) .setQuery(filteredQuery(matchAllQuery(), andFilter( sfu.projectRepositoryFilter( repository.getProject().getKey(), repository.getSlug()), sfu.exactRefFilter(ref)))); BulkRequestBuilder bulkDelete = client.prepareBulk().setRefresh(true); for (SearchHit hit : esReq.get().getHits().getHits()) { bulkDelete.add(buildDeleteFromRef(client, hit.getType(), hit.getId())); } if (bulkDelete.numberOfActions() == 0) { break; } bulkDelete.get(); } catch (Exception e) { log.error("Could not delete documents for {}, aborting", toString(), e); return; } } doUpdate(client, gitScm, globalSettings); }
Example 8
Source File: IndexBatchBolt.java From storm-trident-elasticsearch with Apache License 2.0 | 5 votes |
protected void bulkUpdateIndexes( ) { List<Tuple> inputs = new ArrayList<>(queue.size()); queue.drainTo(inputs); BulkRequestBuilder bulkRequest = client.prepareBulk(); for (Tuple input : inputs) { Document<T> doc = mapper.map(input); IndexRequestBuilder request = client.prepareIndex(doc.getName(), doc.getType(), doc.getId()).setSource((String)doc.getSource()); if(doc.getParentId() != null) { request.setParent(doc.getParentId()); } bulkRequest.add(request); } try { if (bulkRequest.numberOfActions() > 0) { BulkResponse bulkItemResponses = bulkRequest.execute().actionGet(); if (bulkItemResponses.hasFailures()) { BulkItemResponse[] items = bulkItemResponses.getItems(); for (int i = 0; i < items.length; i++) { ackOrFail(items[i], inputs.get(i)); } } else { ackAll(inputs); } } } catch (ElasticsearchException e) { LOGGER.error("Unable to process bulk request, " + inputs.size() + " tuples are in failure", e); outputCollector.reportError(e.getRootCause()); failAll(inputs); } }
Example 9
Source File: ElasticsearchTemplate.java From summerframework with Apache License 2.0 | 5 votes |
public int updateBatchData(ESBasicInfo esBasicInfo, Object object) throws IOException { BulkRequestBuilder bulkRequest = esClient.prepareBulk(); for (String id : esBasicInfo.getIds()) { bulkRequest.add(esClient.prepareUpdate(esBasicInfo.getIndex(), esBasicInfo.getType(), id) .setDoc(mapper.writeValueAsString(object), XContentType.JSON)); } bulkRequest.execute().actionGet(); return bulkRequest.numberOfActions(); }
Example 10
Source File: ElasticsearchTemplate.java From summerframework with Apache License 2.0 | 5 votes |
public int deleteBatchData(ESBasicInfo esBasicInfo) { BulkRequestBuilder bulkRequest = esClient.prepareBulk(); for (String id : esBasicInfo.getIds()) { bulkRequest.add(esClient.prepareDelete(esBasicInfo.getIndex(), esBasicInfo.getType(), id)); } BulkResponse response = bulkRequest.execute().actionGet(); log.info("status is:{}", response.status().getStatus()); return bulkRequest.numberOfActions(); }
Example 11
Source File: ElasticsearchTemplate.java From summerframework with Apache License 2.0 | 5 votes |
public int addBatchData(ESBasicInfo esBasicInfo, Object object) throws IOException { BulkRequestBuilder bulkRequest = esClient.prepareBulk(); for (String id : esBasicInfo.getIds()) { bulkRequest.add(esClient.prepareIndex(esBasicInfo.getIndex(), esBasicInfo.getType(), id) .setSource(mapper.writeValueAsString(object), XContentType.JSON)); } bulkRequest.execute().actionGet(); return bulkRequest.numberOfActions(); }
Example 12
Source File: ElasticsearchTemplate.java From summerframework with Apache License 2.0 | 5 votes |
public int updateBatchData(ESBasicInfo esBasicInfo, Object object) throws IOException { BulkRequestBuilder bulkRequest = esClient.prepareBulk(); for (String id : esBasicInfo.getIds()) { bulkRequest.add(esClient.prepareUpdate(esBasicInfo.getIndex(), esBasicInfo.getType(), id) .setDoc(mapper.writeValueAsString(object), XContentType.JSON)); } bulkRequest.execute().actionGet(); return bulkRequest.numberOfActions(); }
Example 13
Source File: ElasticsearchTemplate.java From summerframework with Apache License 2.0 | 5 votes |
public int deleteBatchData(ESBasicInfo esBasicInfo) { BulkRequestBuilder bulkRequest = esClient.prepareBulk(); for (String id : esBasicInfo.getIds()) { bulkRequest.add(esClient.prepareDelete(esBasicInfo.getIndex(), esBasicInfo.getType(), id)); } BulkResponse response = bulkRequest.execute().actionGet(); log.info("status is:{}", response.status().getStatus()); return bulkRequest.numberOfActions(); }
Example 14
Source File: ElasticsearchTemplate.java From summerframework with Apache License 2.0 | 5 votes |
public int addBatchData(ESBasicInfo esBasicInfo, Object object) throws IOException { BulkRequestBuilder bulkRequest = esClient.prepareBulk(); for (String id : esBasicInfo.getIds()) { bulkRequest.add(esClient.prepareIndex(esBasicInfo.getIndex(), esBasicInfo.getType(), id) .setSource(mapper.writeValueAsString(object), XContentType.JSON)); } bulkRequest.execute().actionGet(); return bulkRequest.numberOfActions(); }
Example 15
Source File: IDAO.java From spider with GNU General Public License v3.0 | 4 votes |
/** * 根据query删除数据 * * @param queryBuilder query * @param task 任务实体 * @return 是否全部数据删除成功 */ protected boolean deleteByQuery(QueryBuilder queryBuilder, Task task) { BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); SearchRequestBuilder searchRequestBuilder = client.prepareSearch(INDEX_NAME) .setTypes(TYPE_NAME) .setQuery(queryBuilder) .setSize(100) .setScroll(TimeValue.timeValueMinutes(SCROLL_TIMEOUT)); SearchResponse response = searchRequestBuilder.execute().actionGet(); while (true) { for (SearchHit hit : response.getHits()) { bulkRequestBuilder.add(new DeleteRequest(INDEX_NAME, TYPE_NAME, hit.id())); if (task != null) { task.increaseCount(); } } response = client.prepareSearchScroll(response.getScrollId()) .setScroll(TimeValue.timeValueMinutes(SCROLL_TIMEOUT)) .execute().actionGet(); if (response.getHits().getHits().length == 0) { if (task != null) { task.setDescription("按query%s删除数据ID添加完毕,已经添加%s条,准备执行删除", queryBuilder.toString(), bulkRequestBuilder.numberOfActions()); } LOG.debug("按query{}删除数据ID添加完毕,准备执行删除", queryBuilder.toString()); break; } else { if (task != null) { task.setDescription("按query%s删除数据已经添加%s条", queryBuilder.toString(), bulkRequestBuilder.numberOfActions()); } LOG.debug("按query{}删除数据已经添加{}条", queryBuilder.toString(), bulkRequestBuilder.numberOfActions()); } } if (bulkRequestBuilder.numberOfActions() <= 0) { if (task != null) { task.setDescription("按query%s删除数据时未找到数据,请检查参数", queryBuilder.toString()); } LOG.debug("按query{}删除数据时未找到数据,请检查参数", queryBuilder.toString()); return false; } BulkResponse bulkResponse = bulkRequestBuilder.get(); if (bulkResponse.hasFailures()) { if (task != null) { task.setDescription("按query%s删除数据部分失败,%s", queryBuilder.toString(), bulkResponse.buildFailureMessage()); } LOG.error("按query{}删除数据部分失败,{}", queryBuilder.toString(), bulkResponse.buildFailureMessage()); } else { if (task != null) { task.setDescription("按query%s删除数据成功,耗时:%s毫秒", queryBuilder.toString(), bulkResponse.getTookInMillis()); } LOG.info("按query{}删除数据成功,耗时:{}毫秒", queryBuilder.toString(), bulkResponse.getTookInMillis()); } return bulkResponse.hasFailures(); }
Example 16
Source File: ElasticSearchIndexer.java From attic-polygene-java with Apache License 2.0 | 4 votes |
@Override public void notifyChanges( Iterable<EntityState> changedStates ) { // All updated or new states Map<String, EntityState> newStates = new HashMap<>(); for( EntityState eState : changedStates ) { if( eState.status() == EntityStatus.UPDATED || eState.status() == EntityStatus.NEW ) { newStates.put( eState.entityReference().identity().toString(), eState ); } } EntityStoreUnitOfWork uow = entityStore.newUnitOfWork( module, UsecaseBuilder.newUsecase( "Load associations for indexing" ), SystemTime.now() ); // Bulk index request builder BulkRequestBuilder bulkBuilder = support.client().prepareBulk(); // Handle changed entity states for( EntityState changedState : changedStates ) { if( changedState.entityDescriptor().queryable() ) { switch( changedState.status() ) { case REMOVED: LOGGER.trace( "Removing Entity State from Index: {}", changedState ); remove( bulkBuilder, changedState.entityReference().identity().toString() ); break; case UPDATED: LOGGER.trace( "Updating Entity State in Index: {}", changedState ); remove( bulkBuilder, changedState.entityReference().identity().toString() ); String updatedJson = toJSON( changedState, newStates, uow ).toString(); LOGGER.trace( "Will index: {}", updatedJson ); index( bulkBuilder, changedState.entityReference().identity().toString(), updatedJson ); break; case NEW: LOGGER.trace( "Creating Entity State in Index: {}", changedState ); String newJson = toJSON( changedState, newStates, uow ).toString(); LOGGER.trace( "Will index: {}", newJson ); index( bulkBuilder, changedState.entityReference().identity().toString(), newJson ); break; case LOADED: default: // Ignored break; } } } uow.discard(); if( bulkBuilder.numberOfActions() > 0 ) { // Execute bulk actions BulkResponse bulkResponse = bulkBuilder.execute().actionGet(); // Handle errors if( bulkResponse.hasFailures() ) { throw new ElasticSearchIndexingException( bulkResponse.buildFailureMessage() ); } LOGGER.debug( "Indexing changed Entity states took {}ms", bulkResponse.getTookInMillis() ); // Refresh index support.client().admin().indices().prepareRefresh( support.index() ).execute().actionGet(); } }
Example 17
Source File: QuestionElasticSearchIndexBuilder.java From sakai with Educational Community License v2.0 | 4 votes |
@Override protected void processContentQueue() { startTime = System.currentTimeMillis(); // If there are a lot of docs queued up this could take awhile we don't want // to eat up all the CPU cycles. Thread.currentThread().setPriority(Thread.NORM_PRIORITY - 1); if (getPendingDocuments() == 0) { getLog().trace("No pending docs for index builder [" + getName() + "]"); return; } SearchResponse response = findContentQueue(); SearchHit[] hits = response.getHits().hits(); List<NoContentException> noContentExceptions = new ArrayList(); getLog().trace(getPendingDocuments() + " pending docs for index builder [" + getName() + "]"); BulkRequestBuilder bulkRequest = newContentQueueBulkUpdateRequestBuilder(); for (SearchHit hit : hits) { if (bulkRequest.numberOfActions() < bulkRequestSize) { try { processContentQueueEntry(hit, bulkRequest); } catch ( NoContentException e ) { noContentExceptions.add(e); } } else { executeBulkRequest(bulkRequest); bulkRequest = newContentQueueBulkUpdateRequestBuilder(); } } // execute any remaining bulks requests not executed yet if (bulkRequest.numberOfActions() > 0) { executeBulkRequest(bulkRequest); } // remove any docs without content, so we don't try to index them again if (!noContentExceptions.isEmpty()) { for (NoContentException noContentException : noContentExceptions) { deleteDocument(noContentException); } } lastLoad = System.currentTimeMillis(); if (hits.length > 0) { getLog().trace("Finished indexing " + hits.length + " docs in " + ((lastLoad - startTime)) + " ms for index builder " + getName()); } }
Example 18
Source File: BaseElasticSearchIndexBuilder.java From sakai with Educational Community License v2.0 | 4 votes |
/** * Searches for any docs in the search index that have not been indexed yet, * digests the content and loads it into the index. Any docs with empty content will be removed from * the index. */ protected void processContentQueue() { startTime = System.currentTimeMillis(); // If there are a lot of docs queued up this could take awhile we don't want // to eat up all the CPU cycles. Thread.currentThread().setPriority(Thread.NORM_PRIORITY - 1); if (getPendingDocuments() == 0) { getLog().trace("No pending docs for index builder [" + getName() + "]"); return; } SearchResponse response = findContentQueue(); SearchHit[] hits = response.getHits().hits(); List<NoContentException> noContentExceptions = new ArrayList(); getLog().debug(getPendingDocuments() + " pending docs for index builder [" + getName() + "]"); BulkRequestBuilder bulkRequest = newContentQueueBulkUpdateRequestBuilder(); for (SearchHit hit : hits) { if (bulkRequest.numberOfActions() < bulkRequestSize) { try { processContentQueueEntry(hit, bulkRequest); } catch ( NoContentException e ) { noContentExceptions.add(e); } } else { executeBulkRequest(bulkRequest); bulkRequest = newContentQueueBulkUpdateRequestBuilder(); } } // execute any remaining bulks requests not executed yet if (bulkRequest.numberOfActions() > 0) { executeBulkRequest(bulkRequest); } // remove any docs without content, so we don't try to index them again if (!noContentExceptions.isEmpty()) { for (NoContentException noContentException : noContentExceptions) { deleteDocument(noContentException); } } lastLoad = System.currentTimeMillis(); if (hits.length > 0) { getLog().info("Finished indexing " + hits.length + " docs in " + ((lastLoad - startTime)) + " ms for index builder " + getName()); } }
Example 19
Source File: QuestionElasticSearchIndexBuilder.java From sakai with Educational Community License v2.0 | 4 votes |
@Override protected void processContentQueue() { startTime = System.currentTimeMillis(); // If there are a lot of docs queued up this could take awhile we don't want // to eat up all the CPU cycles. Thread.currentThread().setPriority(Thread.NORM_PRIORITY - 1); if (getPendingDocuments() == 0) { getLog().trace("No pending docs for index builder [" + getName() + "]"); return; } SearchResponse response = findContentQueue(); SearchHit[] hits = response.getHits().hits(); List<NoContentException> noContentExceptions = new ArrayList(); getLog().trace(getPendingDocuments() + " pending docs for index builder [" + getName() + "]"); BulkRequestBuilder bulkRequest = newContentQueueBulkUpdateRequestBuilder(); for (SearchHit hit : hits) { if (bulkRequest.numberOfActions() < bulkRequestSize) { try { processContentQueueEntry(hit, bulkRequest); } catch ( NoContentException e ) { noContentExceptions.add(e); } } else { executeBulkRequest(bulkRequest); bulkRequest = newContentQueueBulkUpdateRequestBuilder(); } } // execute any remaining bulks requests not executed yet if (bulkRequest.numberOfActions() > 0) { executeBulkRequest(bulkRequest); } // remove any docs without content, so we don't try to index them again if (!noContentExceptions.isEmpty()) { for (NoContentException noContentException : noContentExceptions) { deleteDocument(noContentException); } } lastLoad = System.currentTimeMillis(); if (hits.length > 0) { getLog().trace("Finished indexing " + hits.length + " docs in " + ((lastLoad - startTime)) + " ms for index builder " + getName()); } }
Example 20
Source File: BaseElasticSearchIndexBuilder.java From sakai with Educational Community License v2.0 | 4 votes |
/** * Searches for any docs in the search index that have not been indexed yet, * digests the content and loads it into the index. Any docs with empty content will be removed from * the index. */ protected void processContentQueue() { startTime = System.currentTimeMillis(); // If there are a lot of docs queued up this could take awhile we don't want // to eat up all the CPU cycles. Thread.currentThread().setPriority(Thread.NORM_PRIORITY - 1); if (getPendingDocuments() == 0) { getLog().trace("No pending docs for index builder [" + getName() + "]"); return; } SearchResponse response = findContentQueue(); SearchHit[] hits = response.getHits().hits(); List<NoContentException> noContentExceptions = new ArrayList(); getLog().debug(getPendingDocuments() + " pending docs for index builder [" + getName() + "]"); BulkRequestBuilder bulkRequest = newContentQueueBulkUpdateRequestBuilder(); for (SearchHit hit : hits) { if (bulkRequest.numberOfActions() < bulkRequestSize) { try { processContentQueueEntry(hit, bulkRequest); } catch ( NoContentException e ) { noContentExceptions.add(e); } } else { executeBulkRequest(bulkRequest); bulkRequest = newContentQueueBulkUpdateRequestBuilder(); } } // execute any remaining bulks requests not executed yet if (bulkRequest.numberOfActions() > 0) { executeBulkRequest(bulkRequest); } // remove any docs without content, so we don't try to index them again if (!noContentExceptions.isEmpty()) { for (NoContentException noContentException : noContentExceptions) { deleteDocument(noContentException); } } lastLoad = System.currentTimeMillis(); if (hits.length > 0) { getLog().info("Finished indexing " + hits.length + " docs in " + ((lastLoad - startTime)) + " ms for index builder " + getName()); } }