Java Code Examples for org.apache.nifi.util.StopWatch#start()

The following examples show how to use org.apache.nifi.util.StopWatch#start() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ElasticSearchClientServiceImpl.java    From nifi with Apache License 2.0 6 votes vote down vote up
@Override
public DeleteOperationResponse deleteById(String index, String type, List<String> ids) {
    try {
        StringBuilder sb = new StringBuilder();
        for (int idx = 0; idx < ids.size(); idx++) {
            String header = buildBulkHeader("delete", index, type, ids.get(idx));
            sb.append(header).append("\n");
        }
        HttpEntity entity = new NStringEntity(sb.toString(), ContentType.APPLICATION_JSON);
        StopWatch watch = new StopWatch();
        watch.start();
        Response response = client.performRequest("POST", "/_bulk", Collections.emptyMap(), entity);
        watch.stop();

        if (getLogger().isDebugEnabled()) {
            getLogger().debug(String.format("Response for bulk delete: %s",
                    IOUtils.toString(response.getEntity().getContent(), StandardCharsets.UTF_8)));
        }

        DeleteOperationResponse dor = new DeleteOperationResponse(watch.getDuration(TimeUnit.MILLISECONDS));

        return dor;
    } catch (Exception ex) {
        throw new RuntimeException(ex);
    }
}
 
Example 2
Source File: ElasticSearchClientServiceImpl.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Override
public IndexOperationResponse bulk(List<IndexOperationRequest> operations) {
    try {
        StringBuilder payload = new StringBuilder();
        for (int index = 0; index < operations.size(); index++) {
            IndexOperationRequest or = operations.get(index);
            buildRequest(or, payload);
        }

        if (getLogger().isDebugEnabled()) {
            getLogger().debug(payload.toString());
        }
        HttpEntity entity = new NStringEntity(payload.toString(), ContentType.APPLICATION_JSON);
        StopWatch watch = new StopWatch();
        watch.start();
        Response response = client.performRequest("POST", "/_bulk", Collections.emptyMap(), entity);
        watch.stop();

        String rawResponse = IOUtils.toString(response.getEntity().getContent(), StandardCharsets.UTF_8);

        if (getLogger().isDebugEnabled()) {
            getLogger().debug(String.format("Response was: %s", rawResponse));
        }

        IndexOperationResponse retVal = IndexOperationResponse.fromJsonResponse(rawResponse);

        return retVal;
    } catch (Exception ex) {
        throw new ElasticsearchError(ex);
    }
}
 
Example 3
Source File: GetHDFSSequenceFile.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
protected void processBatchOfFiles(final List<Path> files, final ProcessContext context, final ProcessSession session) {
    final Configuration conf = getConfiguration();
    final FileSystem hdfs = getFileSystem();
    final String flowFileContentValue = context.getProperty(FLOWFILE_CONTENT).getValue();
    final boolean keepSourceFiles = context.getProperty(KEEP_SOURCE_FILE).asBoolean();
    final Double bufferSizeProp = context.getProperty(BUFFER_SIZE).asDataSize(DataUnit.B);
    if (bufferSizeProp != null) {
        int bufferSize = bufferSizeProp.intValue();
        conf.setInt(BUFFER_SIZE_KEY, bufferSize);
    }
    ComponentLog logger = getLogger();
    final SequenceFileReader<Set<FlowFile>> reader;
    if (flowFileContentValue.equalsIgnoreCase(VALUE_ONLY)) {
        reader = new ValueReader(session);
    } else {
        reader = new KeyValueReader(session);
    }
    Set<FlowFile> flowFiles = Collections.emptySet();
    for (final Path file : files) {
        if (!this.isScheduled()) {
            break; // This processor should stop running immediately.
        }

        final StopWatch stopWatch = new StopWatch(false);
        try {
            stopWatch.start();
            if (!hdfs.exists(file)) {
                continue; // If file is no longer here move on.
            }
            logger.debug("Reading file");
            flowFiles = getFlowFiles(conf, hdfs, reader, file);
            if (!keepSourceFiles && !hdfs.delete(file, false)) {
                logger.warn("Unable to delete path " + file.toString() + " from HDFS.  Will likely be picked up over and over...");
            }
        } catch (Throwable t) {
            logger.error("Error retrieving file {} from HDFS due to {}", new Object[]{file, t});
            session.rollback();
            context.yield();
        } finally {
            stopWatch.stop();
            long totalSize = 0;
            for (FlowFile flowFile : flowFiles) {
                totalSize += flowFile.getSize();
                session.getProvenanceReporter().receive(flowFile, file.toString());
            }
            if (totalSize > 0) {
                final String dataRate = stopWatch.calculateDataRate(totalSize);
                final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
                logger.info("Created {} flowFiles from SequenceFile {}. Ingested in {} milliseconds at a rate of {}", new Object[]{
                    flowFiles.size(), file.toUri().toASCIIString(), millis, dataRate});
                logger.info("Transferred flowFiles {}  to success", new Object[]{flowFiles});
                session.transfer(flowFiles, REL_SUCCESS);
            }
        }
    }
}
 
Example 4
Source File: GetHDFSSequenceFile.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
protected void processBatchOfFiles(final List<Path> files, final ProcessContext context, final ProcessSession session) {
    final Configuration conf = getConfiguration();
    final FileSystem hdfs = getFileSystem();
    final String flowFileContentValue = context.getProperty(FLOWFILE_CONTENT).getValue();
    final boolean keepSourceFiles = context.getProperty(KEEP_SOURCE_FILE).asBoolean();
    final Double bufferSizeProp = context.getProperty(BUFFER_SIZE).asDataSize(DataUnit.B);
    if (bufferSizeProp != null) {
        int bufferSize = bufferSizeProp.intValue();
        conf.setInt(BUFFER_SIZE_KEY, bufferSize);
    }
    ComponentLog logger = getLogger();
    final SequenceFileReader<Set<FlowFile>> reader;
    if (flowFileContentValue.equalsIgnoreCase(VALUE_ONLY)) {
        reader = new ValueReader(session);
    } else {
        reader = new KeyValueReader(session);
    }
    Set<FlowFile> flowFiles = Collections.emptySet();
    for (final Path file : files) {
        if (!this.isScheduled()) {
            break; // This processor should stop running immediately.
        }

        final StopWatch stopWatch = new StopWatch(false);
        try {
            stopWatch.start();
            if (!hdfs.exists(file)) {
                continue; // If file is no longer here move on.
            }
            logger.debug("Reading file");
            flowFiles = getFlowFiles(conf, hdfs, reader, file);
            if (!keepSourceFiles && !hdfs.delete(file, false)) {
                logger.warn("Unable to delete path " + file.toString() + " from HDFS.  Will likely be picked up over and over...");
            }
        } catch (Throwable t) {
            logger.error("Error retrieving file {} from HDFS due to {}", new Object[]{file, t});
            session.rollback();
            context.yield();
        } finally {
            stopWatch.stop();
            long totalSize = 0;
            for (FlowFile flowFile : flowFiles) {
                totalSize += flowFile.getSize();
                session.getProvenanceReporter().receive(flowFile, file.toString());
            }
            if (totalSize > 0) {
                final String dataRate = stopWatch.calculateDataRate(totalSize);
                final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
                logger.info("Created {} flowFiles from SequenceFile {}. Ingested in {} milliseconds at a rate of {}", new Object[]{
                    flowFiles.size(), file.toUri().toASCIIString(), millis, dataRate});
                logger.info("Transferred flowFiles {}  to success", new Object[]{flowFiles});
                session.transfer(flowFiles, REL_SUCCESS);
            }
        }
    }
}