Java Code Examples for org.apache.nifi.processor.ProcessSession#adjustCounter()
The following examples show how to use
org.apache.nifi.processor.ProcessSession#adjustCounter() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractListenEventProcessor.java From localization_nifi with Apache License 2.0 | 6 votes |
/** * If pollErrorQueue is true, the error queue will be checked first and event will be * returned from the error queue if available. * * If pollErrorQueue is false, or no data is in the error queue, the regular queue is polled. * * If longPoll is true, the regular queue will be polled with a short timeout, otherwise it will * poll with no timeout which will return immediately. * * @param longPoll whether or not to poll the main queue with a small timeout * @param pollErrorQueue whether or not to poll the error queue first * * @return an event from one of the queues, or null if none are available */ protected E getMessage(final boolean longPoll, final boolean pollErrorQueue, final ProcessSession session) { E event = null; if (pollErrorQueue) { event = errorEvents.poll(); } if (event == null) { try { if (longPoll) { event = events.poll(POLL_TIMEOUT_MS, TimeUnit.MILLISECONDS); } else { event = events.poll(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); return null; } } if (event != null) { session.adjustCounter("Messages Received", 1L, false); } return event; }
Example 2
Source File: ListenSyslog.java From localization_nifi with Apache License 2.0 | 6 votes |
protected RawSyslogEvent getMessage(final boolean longPoll, final boolean pollErrorQueue, final ProcessSession session) { RawSyslogEvent rawSyslogEvent = null; if (pollErrorQueue) { rawSyslogEvent = errorEvents.poll(); } if (rawSyslogEvent == null) { try { if (longPoll) { rawSyslogEvent = syslogEvents.poll(20, TimeUnit.MILLISECONDS); } else { rawSyslogEvent = syslogEvents.poll(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); return null; } } if (rawSyslogEvent != null) { session.adjustCounter("Messages Received", 1L, false); } return rawSyslogEvent; }
Example 3
Source File: ListenUDPRecord.java From nifi with Apache License 2.0 | 6 votes |
private void handleParseFailure(final StandardEvent event, final ProcessSession session, final Exception cause, final String message) { // If we are unable to parse the data, we need to transfer it to 'parse failure' relationship final Map<String, String> attributes = getAttributes(event.getSender()); FlowFile failureFlowFile = session.create(); failureFlowFile = session.write(failureFlowFile, out -> out.write(event.getData())); failureFlowFile = session.putAllAttributes(failureFlowFile, attributes); final String transitUri = getTransitUri(event.getSender()); session.getProvenanceReporter().receive(failureFlowFile, transitUri); session.transfer(failureFlowFile, REL_PARSE_FAILURE); if (cause == null) { getLogger().error(message); } else { getLogger().error(message, cause); } session.adjustCounter("Parse Failures", 1, false); }
Example 4
Source File: ListenSyslog.java From nifi with Apache License 2.0 | 6 votes |
protected RawSyslogEvent getMessage(final boolean longPoll, final boolean pollErrorQueue, final ProcessSession session) { RawSyslogEvent rawSyslogEvent = null; if (pollErrorQueue) { rawSyslogEvent = errorEvents.poll(); } if (rawSyslogEvent == null) { try { if (longPoll) { rawSyslogEvent = syslogEvents.poll(20, TimeUnit.MILLISECONDS); } else { rawSyslogEvent = syslogEvents.poll(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); return null; } } if (rawSyslogEvent != null) { session.adjustCounter("Messages Received", 1L, false); } return rawSyslogEvent; }
Example 5
Source File: CountEvents.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException { this.sessionFactory = sessionFactory; final ProcessSession session = sessionFactory.createSession(); if (!firstScheduleCounted.getAndSet(true)) { session.adjustCounter("Scheduled", 1, true); } session.adjustCounter("Triggered", 1, true); }
Example 6
Source File: ValidateFileExists.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { session.adjustCounter("Triggered", 1, true); FlowFile flowFile = session.get(); if (flowFile == null) { return; } session.transfer(flowFile, REL_SUCCESS); }
Example 7
Source File: ConsumerLease.java From nifi with Apache License 2.0 | 5 votes |
private void handleParseFailure(final ConsumerRecord<byte[], byte[]> consumerRecord, final ProcessSession session, final Exception cause, final String message) { // If we are unable to parse the data, we need to transfer it to 'parse failure' relationship final Map<String, String> attributes = getAttributes(consumerRecord); attributes.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(consumerRecord.offset())); attributes.put(KafkaProcessorUtils.KAFKA_TIMESTAMP, String.valueOf(consumerRecord.timestamp())); attributes.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(consumerRecord.partition())); attributes.put(KafkaProcessorUtils.KAFKA_TOPIC, consumerRecord.topic()); FlowFile failureFlowFile = session.create(); final byte[] value = consumerRecord.value(); if (value != null) { failureFlowFile = session.write(failureFlowFile, out -> out.write(value)); } failureFlowFile = session.putAllAttributes(failureFlowFile, attributes); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, consumerRecord.topic()); session.getProvenanceReporter().receive(failureFlowFile, transitUri); session.transfer(failureFlowFile, REL_PARSE_FAILURE); if (cause == null) { logger.error(message); } else { logger.error(message, cause); } session.adjustCounter("Parse Failures", 1, false); }
Example 8
Source File: ConsumerLease.java From nifi with Apache License 2.0 | 5 votes |
private void handleParseFailure(final ConsumerRecord<byte[], byte[]> consumerRecord, final ProcessSession session, final Exception cause, final String message) { // If we are unable to parse the data, we need to transfer it to 'parse failure' relationship final Map<String, String> attributes = getAttributes(consumerRecord); attributes.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(consumerRecord.offset())); attributes.put(KafkaProcessorUtils.KAFKA_TIMESTAMP, String.valueOf(consumerRecord.timestamp())); attributes.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(consumerRecord.partition())); attributes.put(KafkaProcessorUtils.KAFKA_TOPIC, consumerRecord.topic()); FlowFile failureFlowFile = session.create(); final byte[] value = consumerRecord.value(); if (value != null) { failureFlowFile = session.write(failureFlowFile, out -> out.write(value)); } failureFlowFile = session.putAllAttributes(failureFlowFile, attributes); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, consumerRecord.topic()); session.getProvenanceReporter().receive(failureFlowFile, transitUri); session.transfer(failureFlowFile, REL_PARSE_FAILURE); if (cause == null) { logger.error(message); } else { logger.error(message, cause); } session.adjustCounter("Parse Failures", 1, false); }
Example 9
Source File: ConsumerLease.java From nifi with Apache License 2.0 | 5 votes |
private void handleParseFailure(final ConsumerRecord<byte[], byte[]> consumerRecord, final ProcessSession session, final Exception cause, final String message) { // If we are unable to parse the data, we need to transfer it to 'parse failure' relationship final Map<String, String> attributes = getAttributes(consumerRecord); attributes.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(consumerRecord.offset())); attributes.put(KafkaProcessorUtils.KAFKA_TIMESTAMP, String.valueOf(consumerRecord.timestamp())); attributes.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(consumerRecord.partition())); attributes.put(KafkaProcessorUtils.KAFKA_TOPIC, consumerRecord.topic()); FlowFile failureFlowFile = session.create(); final byte[] value = consumerRecord.value(); if (value != null) { failureFlowFile = session.write(failureFlowFile, out -> out.write(value)); } failureFlowFile = session.putAllAttributes(failureFlowFile, attributes); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, consumerRecord.topic()); session.getProvenanceReporter().receive(failureFlowFile, transitUri); session.transfer(failureFlowFile, REL_PARSE_FAILURE); if (cause == null) { logger.error(message); } else { logger.error(message, cause); } session.adjustCounter("Parse Failures", 1, false); }
Example 10
Source File: TerminateAll.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile; while ((flowFile = session.get()) != null) { session.remove(flowFile); session.adjustCounter("Removed", 1, false); } }
Example 11
Source File: RecordSqlWriter.java From nifi with Apache License 2.0 | 5 votes |
@Override public void updateCounters(ProcessSession session) { final WriteResult result = writeResultRef.get(); if (result != null) { session.adjustCounter("Records Written", result.getRecordCount(), false); } }
Example 12
Source File: UpdateCounter.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); if (flowFile == null) { return; } session.adjustCounter(context.getProperty(COUNTER_NAME).evaluateAttributeExpressions(flowFile).getValue(), Long.parseLong(context.getProperty(DELTA).evaluateAttributeExpressions(flowFile).getValue()), false ); session.transfer(flowFile, SUCCESS); }
Example 13
Source File: AbstractListenEventBatchingProcessor.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { final int maxBatchSize = context.getProperty(MAX_BATCH_SIZE).asInteger(); final Map<String,FlowFileEventBatch> batches = getBatches(session, maxBatchSize, messageDemarcatorBytes); // if the size is 0 then there was nothing to process so return // we don't need to yield here because we have a long poll in side of getBatches if (batches.size() == 0) { return; } final List<E> allEvents = new ArrayList<>(); for (Map.Entry<String,FlowFileEventBatch> entry : batches.entrySet()) { FlowFile flowFile = entry.getValue().getFlowFile(); final List<E> events = entry.getValue().getEvents(); if (flowFile.getSize() == 0L || events.size() == 0) { session.remove(flowFile); getLogger().debug("No data written to FlowFile from batch {}; removing FlowFile", new Object[] {entry.getKey()}); continue; } final Map<String,String> attributes = getAttributes(entry.getValue()); flowFile = session.putAllAttributes(flowFile, attributes); getLogger().debug("Transferring {} to success", new Object[] {flowFile}); session.transfer(flowFile, REL_SUCCESS); session.adjustCounter("FlowFiles Transferred to Success", 1L, false); // the sender and command will be the same for all events based on the batch key final String transitUri = getTransitUri(entry.getValue()); session.getProvenanceReporter().receive(flowFile, transitUri); allEvents.addAll(events); } // let sub-classes take any additional actions postProcess(context, session, allEvents); }
Example 14
Source File: AbstractListenEventProcessor.java From nifi with Apache License 2.0 | 5 votes |
/** * If pollErrorQueue is true, the error queue will be checked first and event will be * returned from the error queue if available. * * If pollErrorQueue is false, or no data is in the error queue, the regular queue is polled. * * If longPoll is true, the regular queue will be polled with a short timeout, otherwise it will * poll with no timeout which will return immediately. * * @param longPoll whether or not to poll the main queue with a small timeout * @param pollErrorQueue whether or not to poll the error queue first * * @return an event from one of the queues, or null if none are available */ protected E getMessage(final boolean longPoll, final boolean pollErrorQueue, final ProcessSession session) { E event = null; if (pollErrorQueue) { event = errorEvents.poll(); } if (event != null) { return event; } try { if (longPoll) { event = events.poll(getLongPollTimeout(), TimeUnit.MILLISECONDS); } else { event = events.poll(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); return null; } if (event != null) { session.adjustCounter("Messages Received", 1L, false); } return event; }
Example 15
Source File: UpdateCounter.java From localization_nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); session.adjustCounter(context.getProperty(COUNTER_NAME).evaluateAttributeExpressions(flowFile).getValue(), Long.parseLong(context.getProperty(DELTA).evaluateAttributeExpressions(flowFile).getValue()), false ); session.transfer(flowFile, SUCCESS); }
Example 16
Source File: AbstractListenEventBatchingProcessor.java From localization_nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { final int maxBatchSize = context.getProperty(MAX_BATCH_SIZE).asInteger(); final Map<String,FlowFileEventBatch> batches = getBatches(session, maxBatchSize, messageDemarcatorBytes); // if the size is 0 then there was nothing to process so return // we don't need to yield here because we have a long poll in side of getBatches if (batches.size() == 0) { return; } final List<E> allEvents = new ArrayList<>(); for (Map.Entry<String,FlowFileEventBatch> entry : batches.entrySet()) { FlowFile flowFile = entry.getValue().getFlowFile(); final List<E> events = entry.getValue().getEvents(); if (flowFile.getSize() == 0L || events.size() == 0) { session.remove(flowFile); getLogger().debug("No data written to FlowFile from batch {}; removing FlowFile", new Object[] {entry.getKey()}); continue; } final Map<String,String> attributes = getAttributes(entry.getValue()); flowFile = session.putAllAttributes(flowFile, attributes); getLogger().debug("Transferring {} to success", new Object[] {flowFile}); session.transfer(flowFile, REL_SUCCESS); session.adjustCounter("FlowFiles Transferred to Success", 1L, false); // the sender and command will be the same for all events based on the batch key final String transitUri = getTransitUri(entry.getValue()); session.getProvenanceReporter().receive(flowFile, transitUri); allEvents.addAll(events); } // let sub-classes take any additional actions postProcess(context, session, allEvents); }
Example 17
Source File: DetectDuplicate.java From nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final ComponentLog logger = getLogger(); final String cacheKey = context.getProperty(CACHE_ENTRY_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue(); if (StringUtils.isBlank(cacheKey)) { logger.error("FlowFile {} has no attribute for given Cache Entry Identifier", new Object[]{flowFile}); flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); return; } final DistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class); final Long durationMS = context.getProperty(AGE_OFF_DURATION).asTimePeriod(TimeUnit.MILLISECONDS); final long now = System.currentTimeMillis(); try { final String flowFileDescription = context.getProperty(FLOWFILE_DESCRIPTION).evaluateAttributeExpressions(flowFile).getValue(); final CacheValue cacheValue = new CacheValue(flowFileDescription, now); final CacheValue originalCacheValue; final boolean shouldCacheIdentifier = context.getProperty(CACHE_IDENTIFIER).asBoolean(); if (shouldCacheIdentifier) { originalCacheValue = cache.getAndPutIfAbsent(cacheKey, cacheValue, keySerializer, valueSerializer, valueDeserializer); } else { originalCacheValue = cache.get(cacheKey, keySerializer, valueDeserializer); } boolean duplicate = originalCacheValue != null; if (duplicate && durationMS != null && (now >= originalCacheValue.getEntryTimeMS() + durationMS)) { boolean status = cache.remove(cacheKey, keySerializer); logger.debug("Removal of expired cached entry with key {} returned {}", new Object[]{cacheKey, status}); // both should typically result in duplicate being false...but, better safe than sorry if (shouldCacheIdentifier) { duplicate = !cache.putIfAbsent(cacheKey, cacheValue, keySerializer, valueSerializer); } else { duplicate = cache.containsKey(cacheKey, keySerializer); } } if (duplicate) { session.getProvenanceReporter().route(flowFile, REL_DUPLICATE, "Duplicate of: " + ORIGINAL_DESCRIPTION_ATTRIBUTE_NAME); String originalFlowFileDescription = originalCacheValue.getDescription(); flowFile = session.putAttribute(flowFile, ORIGINAL_DESCRIPTION_ATTRIBUTE_NAME, originalFlowFileDescription); session.transfer(flowFile, REL_DUPLICATE); logger.info("Found {} to be a duplicate of FlowFile with description {}", new Object[]{flowFile, originalFlowFileDescription}); session.adjustCounter("Duplicates Detected", 1L, false); } else { session.getProvenanceReporter().route(flowFile, REL_NON_DUPLICATE); session.transfer(flowFile, REL_NON_DUPLICATE); logger.info("Could not find a duplicate entry in cache for {}; routing to non-duplicate", new Object[]{flowFile}); session.adjustCounter("Non-Duplicate Files Processed", 1L, false); } } catch (final IOException e) { flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); logger.error("Unable to communicate with cache when processing {} due to {}", new Object[]{flowFile, e}); } }
Example 18
Source File: DetectDuplicate.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final ComponentLog logger = getLogger(); final String cacheKey = context.getProperty(CACHE_ENTRY_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue(); if (StringUtils.isBlank(cacheKey)) { logger.error("FlowFile {} has no attribute for given Cache Entry Identifier", new Object[]{flowFile}); flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); return; } final DistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class); final Long durationMS = context.getProperty(AGE_OFF_DURATION).asTimePeriod(TimeUnit.MILLISECONDS); final long now = System.currentTimeMillis(); try { final String flowFileDescription = context.getProperty(FLOWFILE_DESCRIPTION).evaluateAttributeExpressions(flowFile).getValue(); final CacheValue cacheValue = new CacheValue(flowFileDescription, now); final CacheValue originalCacheValue; final boolean shouldCacheIdentifier = context.getProperty(CACHE_IDENTIFIER).asBoolean(); if (shouldCacheIdentifier) { originalCacheValue = cache.getAndPutIfAbsent(cacheKey, cacheValue, keySerializer, valueSerializer, valueDeserializer); } else { originalCacheValue = cache.get(cacheKey, keySerializer, valueDeserializer); } boolean duplicate = originalCacheValue != null; if (duplicate && durationMS != null && (now >= originalCacheValue.getEntryTimeMS() + durationMS)) { boolean status = cache.remove(cacheKey, keySerializer); logger.debug("Removal of expired cached entry with key {} returned {}", new Object[]{cacheKey, status}); // both should typically result in duplicate being false...but, better safe than sorry if (shouldCacheIdentifier) { duplicate = !cache.putIfAbsent(cacheKey, cacheValue, keySerializer, valueSerializer); } else { duplicate = cache.containsKey(cacheKey, keySerializer); } } if (duplicate) { session.getProvenanceReporter().route(flowFile, REL_DUPLICATE, "Duplicate of: " + ORIGINAL_DESCRIPTION_ATTRIBUTE_NAME); String originalFlowFileDescription = originalCacheValue.getDescription(); flowFile = session.putAttribute(flowFile, ORIGINAL_DESCRIPTION_ATTRIBUTE_NAME, originalFlowFileDescription); session.transfer(flowFile, REL_DUPLICATE); logger.info("Found {} to be a duplicate of FlowFile with description {}", new Object[]{flowFile, originalFlowFileDescription}); session.adjustCounter("Duplicates Detected", 1L, false); } else { session.getProvenanceReporter().route(flowFile, REL_NON_DUPLICATE); session.transfer(flowFile, REL_NON_DUPLICATE); logger.info("Could not find a duplicate entry in cache for {}; routing to non-duplicate", new Object[]{flowFile}); session.adjustCounter("Non-Duplicate Files Processed", 1L, false); } } catch (final IOException e) { flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); logger.error("Unable to communicate with cache when processing {} due to {}", new Object[]{flowFile, e}); } }
Example 19
Source File: ScanAccumulo.java From nifi with Apache License 2.0 | 3 votes |
@Override public void onTrigger(ProcessContext processContext, ProcessSession processSession) throws ProcessException { FlowFile flowFile = processSession.get(); final RecordSetWriterFactory writerFactory = processContext.getProperty(RECORD_WRITER).asControllerService(RecordSetWriterFactory.class); long recordCount = scanAccumulo(writerFactory,processContext,processSession,Optional.ofNullable(flowFile)); processSession.adjustCounter("Records Processed", recordCount, false); }