Java Code Examples for org.apache.nifi.flowfile.FlowFile#getAttribute()
The following examples show how to use
org.apache.nifi.flowfile.FlowFile#getAttribute() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: VisibilityUtil.java From nifi with Apache License 2.0 | 6 votes |
public static String pickVisibilityString(String columnFamily, String columnQualifier, FlowFile flowFile, ProcessContext context) { if (StringUtils.isBlank(columnFamily)) { return null; } String lookupKey = String.format("visibility.%s%s%s", columnFamily, !StringUtils.isBlank(columnQualifier) ? "." : "", columnQualifier); String fromAttribute = flowFile.getAttribute(lookupKey); if (fromAttribute == null && !StringUtils.isBlank(columnQualifier)) { String lookupKeyFam = String.format("visibility.%s", columnFamily); fromAttribute = flowFile.getAttribute(lookupKeyFam); } if (fromAttribute != null) { return fromAttribute; } else { PropertyValue descriptor = context.getProperty(lookupKey); if (descriptor == null || !descriptor.isSet()) { descriptor = context.getProperty(String.format("visibility.%s", columnFamily)); } String retVal = descriptor != null ? descriptor.evaluateAttributeExpressions(flowFile).getValue() : null; return retVal; } }
Example 2
Source File: ControlRate.java From nifi with Apache License 2.0 | 6 votes |
private long getFlowFileAccrual(FlowFile flowFile) { long rateValue; switch (rateControlCriteria) { case DATA_RATE: rateValue = flowFile.getSize(); break; case FLOWFILE_RATE: rateValue = 1; break; case ATTRIBUTE_RATE: final String attributeValue = flowFile.getAttribute(rateControlAttribute); if (attributeValue == null) { return -1L; } if (!POSITIVE_LONG_PATTERN.matcher(attributeValue).matches()) { return -1L; } rateValue = Long.parseLong(attributeValue); break; default: throw new AssertionError("<Rate Control Criteria> property set to illegal value of " + rateControlCriteria); } return rateValue; }
Example 3
Source File: PublishKafka_0_10.java From nifi with Apache License 2.0 | 6 votes |
private byte[] getMessageKey(final FlowFile flowFile, final ProcessContext context) { final String uninterpretedKey; if (context.getProperty(KEY).isSet()) { uninterpretedKey = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue(); } else { uninterpretedKey = flowFile.getAttribute(KafkaProcessorUtils.KAFKA_KEY); } if (uninterpretedKey == null) { return null; } final String keyEncoding = context.getProperty(KEY_ATTRIBUTE_ENCODING).getValue(); if (UTF8_ENCODING.getValue().equals(keyEncoding)) { return uninterpretedKey.getBytes(StandardCharsets.UTF_8); } return DatatypeConverter.parseHexBinary(uninterpretedKey); }
Example 4
Source File: PublishKafka_0_11.java From nifi with Apache License 2.0 | 6 votes |
private byte[] getMessageKey(final FlowFile flowFile, final ProcessContext context) { final String uninterpretedKey; if (context.getProperty(KEY).isSet()) { uninterpretedKey = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue(); } else { uninterpretedKey = flowFile.getAttribute(KafkaProcessorUtils.KAFKA_KEY); } if (uninterpretedKey == null) { return null; } final String keyEncoding = context.getProperty(KEY_ATTRIBUTE_ENCODING).getValue(); if (UTF8_ENCODING.getValue().equals(keyEncoding)) { return uninterpretedKey.getBytes(StandardCharsets.UTF_8); } return DatatypeConverter.parseHexBinary(uninterpretedKey); }
Example 5
Source File: MergeContent.java From nifi with Apache License 2.0 | 6 votes |
@Override protected FlowFile preprocessFlowFile(final ProcessContext context, final ProcessSession session, final FlowFile flowFile) { FlowFile processed = flowFile; // handle backward compatibility with old segment attributes if (processed.getAttribute(FRAGMENT_COUNT_ATTRIBUTE) == null && processed.getAttribute(SEGMENT_COUNT_ATTRIBUTE) != null) { processed = session.putAttribute(processed, FRAGMENT_COUNT_ATTRIBUTE, processed.getAttribute(SEGMENT_COUNT_ATTRIBUTE)); } if (processed.getAttribute(FRAGMENT_INDEX_ATTRIBUTE) == null && processed.getAttribute(SEGMENT_INDEX_ATTRIBUTE) != null) { processed = session.putAttribute(processed, FRAGMENT_INDEX_ATTRIBUTE, processed.getAttribute(SEGMENT_INDEX_ATTRIBUTE)); } if (processed.getAttribute(FRAGMENT_ID_ATTRIBUTE) == null && processed.getAttribute(SEGMENT_ID_ATTRIBUTE) != null) { processed = session.putAttribute(processed, FRAGMENT_ID_ATTRIBUTE, processed.getAttribute(SEGMENT_ID_ATTRIBUTE)); } return processed; }
Example 6
Source File: HandleHttpRequest.java From nifi with Apache License 2.0 | 6 votes |
private boolean registerRequest(final ProcessContext context, final ProcessSession session, HttpRequestContainer container, final long start, final HttpServletRequest request, FlowFile flowFile) { final HttpContextMap contextMap = context.getProperty(HTTP_CONTEXT_MAP).asControllerService(HttpContextMap.class); String contextIdentifier = flowFile.getAttribute(HTTPUtils.HTTP_CONTEXT_ID); final boolean registered = contextMap.register(contextIdentifier, request, container.getResponse(), container.getContext()); if (registered) return true; getLogger().warn("Received request from {} but could not process it because too many requests are already outstanding; responding with SERVICE_UNAVAILABLE", new Object[]{request.getRemoteAddr()}); try { container.getResponse().sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, "HttpContextMap is full"); container.getContext().complete(); } catch (final Exception e) { getLogger().warn("Failed to respond with SERVICE_UNAVAILABLE message to {} due to {}", new Object[]{request.getRemoteAddr(), e}); } session.remove(flowFile); return false; }
Example 7
Source File: Bin.java From localization_nifi with Apache License 2.0 | 6 votes |
/** * If this bin has enough room for the size of the given flow file then it is added otherwise it is not * * @param flowFile flowfile to offer * @param session the ProcessSession to which the FlowFile belongs * @return true if added; false otherwise */ public boolean offer(final FlowFile flowFile, final ProcessSession session) { if (((size + flowFile.getSize()) > maximumSizeBytes) || (binContents.size() >= maximumEntries)) { successiveFailedOfferings++; return false; } if (fileCountAttribute != null) { final String countValue = flowFile.getAttribute(fileCountAttribute); final Integer count = toInteger(countValue); if (count != null) { int currentMaxEntries = this.maximumEntries; this.maximumEntries = Math.min(count, currentMaxEntries); this.minimumEntries = currentMaxEntries; } } size += flowFile.getSize(); session.migrate(getSession(), Collections.singleton(flowFile)); binContents.add(flowFile); successiveFailedOfferings = 0; return true; }
Example 8
Source File: MergeContent.java From localization_nifi with Apache License 2.0 | 5 votes |
@Override protected String getGroupId(final ProcessContext context, final FlowFile flowFile) { final String correlationAttributeName = context.getProperty(CORRELATION_ATTRIBUTE_NAME) .evaluateAttributeExpressions(flowFile).getValue(); String groupId = correlationAttributeName == null ? null : flowFile.getAttribute(correlationAttributeName); // when MERGE_STRATEGY is Defragment and correlationAttributeName is null then bin by fragment.identifier if (groupId == null && MERGE_STRATEGY_DEFRAGMENT.equals(context.getProperty(MERGE_STRATEGY).getValue())) { groupId = flowFile.getAttribute(FRAGMENT_ID_ATTRIBUTE); } return groupId; }
Example 9
Source File: PutKafka.java From nifi with Apache License 2.0 | 5 votes |
/** * Builds {@link PublishingContext} for message(s) to be sent to Kafka. * {@link PublishingContext} contains all contextual information required by * {@link KafkaPublisher} to publish to Kafka. Such information contains * things like topic name, content stream, delimiter, key and last ACKed * message for cases where provided FlowFile is being retried (failed in the * past). <br> * For the clean FlowFile (file that has been sent for the first time), * PublishingContext will be built form {@link ProcessContext} associated * with this invocation. <br> * For the failed FlowFile, {@link PublishingContext} will be built from * attributes of that FlowFile which by then will already contain required * information (e.g., topic, key, delimiter etc.). This is required to * ensure the affinity of the retry in the even where processor * configuration has changed. However keep in mind that failed FlowFile is * only considered a failed FlowFile if it is being re-processed by the same * processor (determined via {@link #FAILED_PROC_ID_ATTR}, see * {@link #isFailedFlowFile(FlowFile)}). If failed FlowFile is being sent to * another PublishKafka processor it is treated as a fresh FlowFile * regardless if it has #FAILED* attributes set. */ private PublishingContext buildPublishingContext(FlowFile flowFile, ProcessContext context, InputStream contentStream) { String topicName; byte[] keyBytes; byte[] delimiterBytes = null; int lastAckedMessageIndex = -1; if (this.isFailedFlowFile(flowFile)) { lastAckedMessageIndex = Integer.valueOf(flowFile.getAttribute(FAILED_LAST_ACK_IDX)); topicName = flowFile.getAttribute(FAILED_TOPIC_ATTR); keyBytes = flowFile.getAttribute(FAILED_KEY_ATTR) != null ? flowFile.getAttribute(FAILED_KEY_ATTR).getBytes(StandardCharsets.UTF_8) : null; delimiterBytes = flowFile.getAttribute(FAILED_DELIMITER_ATTR) != null ? flowFile.getAttribute(FAILED_DELIMITER_ATTR).getBytes(StandardCharsets.UTF_8) : null; } else { topicName = context.getProperty(TOPIC).evaluateAttributeExpressions(flowFile).getValue(); String _key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue(); keyBytes = _key == null ? null : _key.getBytes(StandardCharsets.UTF_8); delimiterBytes = context.getProperty(MESSAGE_DELIMITER).isSet() ? context.getProperty(MESSAGE_DELIMITER) .evaluateAttributeExpressions(flowFile).getValue().getBytes(StandardCharsets.UTF_8) : null; } PublishingContext publishingContext = new PublishingContext(contentStream, topicName, lastAckedMessageIndex); publishingContext.setKeyBytes(keyBytes); publishingContext.setDelimiterBytes(delimiterBytes); publishingContext.setPartitionId(this.determinePartition(context, flowFile)); return publishingContext; }
Example 10
Source File: ParseEvtx.java From localization_nifi with Apache License 2.0 | 5 votes |
protected String getBasename(FlowFile flowFile, ComponentLog logger) { String basename = flowFile.getAttribute(CoreAttributes.FILENAME.key()); if (basename.endsWith(EVTX_EXTENSION)) { return basename.substring(0, basename.length() - EVTX_EXTENSION.length()); } else { logger.warn("Trying to parse file without .evtx extension {} from flowfile {}", new Object[]{basename, flowFile}); return basename; } }
Example 11
Source File: MockProcessSession.java From nifi with Apache License 2.0 | 5 votes |
/** * Checks if a FlowFile is known in this session. * * @param flowFile * the FlowFile to check * @return <code>true</code> if the FlowFile is known in this session, * <code>false</code> otherwise. */ boolean isFlowFileKnown(final FlowFile flowFile) { final FlowFile curFlowFile = currentVersions.get(flowFile.getId()); if (curFlowFile == null) { return false; } final String curUuid = curFlowFile.getAttribute(CoreAttributes.UUID.key()); final String providedUuid = curFlowFile.getAttribute(CoreAttributes.UUID.key()); if (!curUuid.equals(providedUuid)) { return false; } return true; }
Example 12
Source File: RecordBin.java From nifi with Apache License 2.0 | 5 votes |
/** * Ensure that at least one FlowFile has a fragment.count attribute and that they all have the same value, if they have a value. */ private void validateFragmentCount(String countAttributeName) { Integer expectedFragmentCount = thresholds.getFragmentCount(); for (final FlowFile flowFile : flowFiles) { final String countVal = flowFile.getAttribute(countAttributeName); if (countVal == null) { continue; } final int count; try { count = Integer.parseInt(countVal); } catch (final NumberFormatException nfe) { logger.error("Could not merge bin with {} FlowFiles because the '{}' attribute had a value of '{}' for {} but expected a number", new Object[] {flowFiles.size(), countAttributeName, countVal, flowFile}); fail(); return; } if (expectedFragmentCount != null && count != expectedFragmentCount) { logger.error("Could not merge bin with {} FlowFiles because the '{}' attribute had a value of '{}' for {} but another FlowFile in the bin had a value of {}", new Object[] {flowFiles.size(), countAttributeName, countVal, flowFile, expectedFragmentCount}); fail(); return; } if (expectedFragmentCount == null) { expectedFragmentCount = count; thresholds.setFragmentCount(count); } } if (expectedFragmentCount == null) { logger.error("Could not merge bin with {} FlowFiles because the '{}' attribute was not present on any of the FlowFiles", new Object[] {flowFiles.size(), countAttributeName}); fail(); return; } }
Example 13
Source File: MockProcessSession.java From localization_nifi with Apache License 2.0 | 5 votes |
/** * Checks if a FlowFile is known in this session. * * @param flowFile * the FlowFile to check * @return <code>true</code> if the FlowFile is known in this session, * <code>false</code> otherwise. */ boolean isFlowFileKnown(final FlowFile flowFile) { final FlowFile curFlowFile = currentVersions.get(flowFile.getId()); if (curFlowFile == null) { return false; } final String curUuid = curFlowFile.getAttribute(CoreAttributes.UUID.key()); final String providedUuid = curFlowFile.getAttribute(CoreAttributes.UUID.key()); if (!curUuid.equals(providedUuid)) { return false; } return true; }
Example 14
Source File: MergeContent.java From nifi with Apache License 2.0 | 4 votes |
private String getDefragmentValidationError(final List<FlowFile> binContents) { if (binContents.isEmpty()) { return null; } // If we are defragmenting, all fragments must have the appropriate attributes. String decidedFragmentCount = null; String fragmentIdentifier = null; for (final FlowFile flowFile : binContents) { final String fragmentIndex = flowFile.getAttribute(FRAGMENT_INDEX_ATTRIBUTE); if (!isNumber(fragmentIndex)) { return "Cannot Defragment " + flowFile + " because it does not have an integer value for the " + FRAGMENT_INDEX_ATTRIBUTE + " attribute"; } fragmentIdentifier = flowFile.getAttribute(FRAGMENT_ID_ATTRIBUTE); final String fragmentCount = flowFile.getAttribute(FRAGMENT_COUNT_ATTRIBUTE); if (!isNumber(fragmentCount)) { return "Cannot Defragment " + flowFile + " because it does not have an integer value for the " + FRAGMENT_COUNT_ATTRIBUTE + " attribute"; } else if (decidedFragmentCount == null) { decidedFragmentCount = fragmentCount; } else if (!decidedFragmentCount.equals(fragmentCount)) { return "Cannot Defragment " + flowFile + " because it is grouped with another FlowFile, and the two have differing values for the " + FRAGMENT_COUNT_ATTRIBUTE + " attribute: " + decidedFragmentCount + " and " + fragmentCount; } } final int numericFragmentCount; try { numericFragmentCount = Integer.parseInt(decidedFragmentCount); } catch (final NumberFormatException nfe) { return "Cannot Defragment FlowFiles with Fragment Identifier " + fragmentIdentifier + " because the " + FRAGMENT_COUNT_ATTRIBUTE + " has a non-integer value of " + decidedFragmentCount; } if (binContents.size() < numericFragmentCount) { return "Cannot Defragment FlowFiles with Fragment Identifier " + fragmentIdentifier + " because the expected number of fragments is " + decidedFragmentCount + " but found only " + binContents.size() + " fragments"; } if (binContents.size() > numericFragmentCount) { return "Cannot Defragment FlowFiles with Fragment Identifier " + fragmentIdentifier + " because the expected number of fragments is " + decidedFragmentCount + " but found " + binContents.size() + " fragments for this identifier"; } return null; }
Example 15
Source File: ControlRate.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public FlowFileFilterResult filter(FlowFile flowFile) { long accrual = getFlowFileAccrual(flowFile); if(accrual < 0){ // this FlowFile is invalid for this configuration so let the processor deal with it return FlowFileFilterResult.ACCEPT_AND_TERMINATE; } final String groupName = (groupingAttributeName == null) ? DEFAULT_GROUP_ATTRIBUTE : flowFile .getAttribute(groupingAttributeName); Throttle throttle = throttleMap.get(groupName); if (throttle == null) { throttle = new Throttle(timePeriodSeconds, TimeUnit.SECONDS, getLogger()); final long newRate; if (DataUnit.DATA_SIZE_PATTERN.matcher(maximumRateStr).matches()) { newRate = DataUnit.parseDataSize(maximumRateStr, DataUnit.B).longValue(); } else { newRate = Long.parseLong(maximumRateStr); } throttle.setMaxRate(newRate); throttleMap.put(groupName, throttle); } throttle.lock(); try { if (throttle.tryAdd(accrual)) { flowFilesInBatch += 1; if (flowFilesInBatch>= flowFilesPerBatch) { flowFilesInBatch = 0; return FlowFileFilterResult.ACCEPT_AND_TERMINATE; } else { return FlowFileFilterResult.ACCEPT_AND_CONTINUE; } } } finally { throttle.unlock(); } return FlowFileFilterResult.REJECT_AND_TERMINATE; }
Example 16
Source File: MergeContent.java From nifi with Apache License 2.0 | 4 votes |
@Override protected BinProcessingResult processBin(final Bin bin, final ProcessContext context) throws ProcessException { final BinProcessingResult binProcessingResult = new BinProcessingResult(true); final String mergeFormat = context.getProperty(MERGE_FORMAT).getValue(); MergeBin merger; switch (mergeFormat) { case MERGE_FORMAT_TAR_VALUE: merger = new TarMerge(); break; case MERGE_FORMAT_ZIP_VALUE: merger = new ZipMerge(context.getProperty(COMPRESSION_LEVEL).asInteger()); break; case MERGE_FORMAT_FLOWFILE_STREAM_V3_VALUE: merger = new FlowFileStreamMerger(new FlowFilePackagerV3(), "application/flowfile-v3"); break; case MERGE_FORMAT_FLOWFILE_STREAM_V2_VALUE: merger = new FlowFileStreamMerger(new FlowFilePackagerV2(), "application/flowfile-v2"); break; case MERGE_FORMAT_FLOWFILE_TAR_V1_VALUE: merger = new FlowFileStreamMerger(new FlowFilePackagerV1(), "application/flowfile-v1"); break; case MERGE_FORMAT_CONCAT_VALUE: merger = new BinaryConcatenationMerge(); break; case MERGE_FORMAT_AVRO_VALUE: merger = new AvroMerge(); break; default: throw new AssertionError(); } final AttributeStrategy attributeStrategy = AttributeStrategyUtil.strategyFor(context); final List<FlowFile> contents = bin.getContents(); final ProcessSession binSession = bin.getSession(); if (MERGE_STRATEGY_DEFRAGMENT.equals(context.getProperty(MERGE_STRATEGY).getValue())) { final String error = getDefragmentValidationError(bin.getContents()); // Fail the flow files and commit them if (error != null) { final String binDescription = contents.size() <= 10 ? contents.toString() : contents.size() + " FlowFiles"; getLogger().error(error + "; routing {} to failure", new Object[]{binDescription}); binSession.transfer(contents, REL_FAILURE); binSession.commit(); return binProcessingResult; } Collections.sort(contents, new FragmentComparator()); } FlowFile bundle = merger.merge(bin, context); // keep the filename, as it is added to the bundle. final String filename = bundle.getAttribute(CoreAttributes.FILENAME.key()); // merge all of the attributes final Map<String, String> bundleAttributes = attributeStrategy.getMergedAttributes(contents); bundleAttributes.put(CoreAttributes.MIME_TYPE.key(), merger.getMergedContentType()); // restore the filename of the bundle bundleAttributes.put(CoreAttributes.FILENAME.key(), filename); bundleAttributes.put(MERGE_COUNT_ATTRIBUTE, Integer.toString(contents.size())); bundleAttributes.put(MERGE_BIN_AGE_ATTRIBUTE, Long.toString(bin.getBinAge())); bundle = binSession.putAllAttributes(bundle, bundleAttributes); final String inputDescription = contents.size() < 10 ? contents.toString() : contents.size() + " FlowFiles"; getLogger().info("Merged {} into {}", new Object[]{inputDescription, bundle}); binSession.transfer(bundle, REL_MERGED); binProcessingResult.getAttributes().put(MERGE_UUID_ATTRIBUTE, bundle.getAttribute(CoreAttributes.UUID.key())); for (final FlowFile unmerged : merger.getUnmergedFlowFiles()) { final FlowFile unmergedCopy = binSession.clone(unmerged); binSession.transfer(unmergedCopy, REL_FAILURE); } // We haven't committed anything, parent will take care of it binProcessingResult.setCommitted(false); return binProcessingResult; }
Example 17
Source File: DeleteMongo.java From nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); final WriteConcern writeConcern = getWriteConcern(context); final String deleteMode = context.getProperty(DELETE_MODE).getValue(); final String deleteAttr = flowFile.getAttribute("mongodb.delete.mode"); final Boolean failMode = context.getProperty(FAIL_ON_NO_DELETE).asBoolean(); if (deleteMode.equals(DELETE_ATTR.getValue()) && (StringUtils.isEmpty(deleteAttr) || !ALLOWED_DELETE_VALUES.contains(deleteAttr.toLowerCase()) )) { getLogger().error(String.format("%s is not an allowed value for mongodb.delete.mode", deleteAttr)); session.transfer(flowFile, REL_FAILURE); return; } try { final MongoCollection<Document> collection = getCollection(context, flowFile).withWriteConcern(writeConcern); ByteArrayOutputStream bos = new ByteArrayOutputStream(); session.exportTo(flowFile, bos); bos.close(); String json = new String(bos.toByteArray()); Document query = Document.parse(json); DeleteResult result; if (deleteMode.equals(DELETE_ONE.getValue()) || (deleteMode.equals(DELETE_ATTR.getValue()) && deleteAttr.toLowerCase().equals("one") )) { result = collection.deleteOne(query); } else { result = collection.deleteMany(query); } if (failMode && result.getDeletedCount() == 0) { session.transfer(flowFile, REL_FAILURE); } else { session.transfer(flowFile, REL_SUCCESS); } } catch (Exception ex) { getLogger().error("Could not send a delete to MongoDB, failing...", ex); session.transfer(flowFile, REL_FAILURE); } }
Example 18
Source File: PutCouchbaseKey.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { final ComponentLog logger = getLogger(); FlowFile flowFile = session.get(); if (flowFile == null) { return; } final byte[] content = new byte[(int) flowFile.getSize()]; session.read(flowFile, new InputStreamCallback() { @Override public void process(final InputStream in) throws IOException { StreamUtils.fillBuffer(in, content, true); } }); String docId = flowFile.getAttribute(CoreAttributes.UUID.key()); if (!StringUtils.isEmpty(context.getProperty(DOC_ID).getValue())) { docId = context.getProperty(DOC_ID).evaluateAttributeExpressions(flowFile).getValue(); } try { Document<?> doc = null; final DocumentType documentType = DocumentType.valueOf(context.getProperty(DOCUMENT_TYPE).getValue()); switch (documentType) { case Json: { doc = RawJsonDocument.create(docId, new String(content, StandardCharsets.UTF_8)); break; } case Binary: { final ByteBuf buf = Unpooled.copiedBuffer(content); doc = BinaryDocument.create(docId, buf); break; } } final PersistTo persistTo = PersistTo.valueOf(context.getProperty(PERSIST_TO).getValue()); final ReplicateTo replicateTo = ReplicateTo.valueOf(context.getProperty(REPLICATE_TO).getValue()); doc = openBucket(context).upsert(doc, persistTo, replicateTo); final Map<String, String> updatedAttrs = new HashMap<>(); updatedAttrs.put(CouchbaseAttributes.Cluster.key(), context.getProperty(COUCHBASE_CLUSTER_SERVICE).getValue()); updatedAttrs.put(CouchbaseAttributes.Bucket.key(), context.getProperty(BUCKET_NAME).getValue()); updatedAttrs.put(CouchbaseAttributes.DocId.key(), docId); updatedAttrs.put(CouchbaseAttributes.Cas.key(), String.valueOf(doc.cas())); updatedAttrs.put(CouchbaseAttributes.Expiry.key(), String.valueOf(doc.expiry())); flowFile = session.putAllAttributes(flowFile, updatedAttrs); session.getProvenanceReporter().send(flowFile, getTransitUrl(context, docId)); session.transfer(flowFile, REL_SUCCESS); } catch (final CouchbaseException e) { String errMsg = String.format("Writing document %s to Couchbase Server using %s failed due to %s", docId, flowFile, e); handleCouchbaseException(context, session, logger, flowFile, e, errMsg); } }
Example 19
Source File: GeoEnrichIP.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final DatabaseReader dbReader = databaseReaderRef.get(); final String ipAttributeName = context.getProperty(IP_ADDRESS_ATTRIBUTE).evaluateAttributeExpressions(flowFile).getValue(); final String ipAttributeValue = flowFile.getAttribute(ipAttributeName); if (StringUtils.isEmpty(ipAttributeName)) { //TODO need to add additional validation - should look like an IPv4 or IPv6 addr for instance session.transfer(flowFile, REL_NOT_FOUND); getLogger().warn("Unable to find ip address for {}", new Object[]{flowFile}); return; } InetAddress inetAddress = null; CityResponse response = null; try { inetAddress = InetAddress.getByName(ipAttributeValue); } catch (final IOException ioe) { session.transfer(flowFile, REL_NOT_FOUND); getLogger().warn("Could not resolve {} to ip address for {}", new Object[]{ipAttributeValue, flowFile}, ioe); return; } final StopWatch stopWatch = new StopWatch(true); try { response = dbReader.city(inetAddress); stopWatch.stop(); } catch (final IOException | GeoIp2Exception ex) { session.transfer(flowFile, REL_NOT_FOUND); getLogger().warn("Failure while trying to find enrichment data for {} due to {}", new Object[]{flowFile, ex}, ex); return; } if (response == null) { session.transfer(flowFile, REL_NOT_FOUND); return; } final Map<String, String> attrs = new HashMap<>(); attrs.put(new StringBuilder(ipAttributeName).append(".geo.lookup.micros").toString(), String.valueOf(stopWatch.getDuration(TimeUnit.MICROSECONDS))); attrs.put(new StringBuilder(ipAttributeName).append(".geo.city").toString(), response.getCity().getName()); final Double latitude = response.getLocation().getLatitude(); if (latitude != null) { attrs.put(new StringBuilder(ipAttributeName).append(".geo.latitude").toString(), latitude.toString()); } final Double longitude = response.getLocation().getLongitude(); if (longitude != null) { attrs.put(new StringBuilder(ipAttributeName).append(".geo.longitude").toString(), longitude.toString()); } int i = 0; for (final Subdivision subd : response.getSubdivisions()) { attrs.put(new StringBuilder(ipAttributeName).append(".geo.subdivision.").append(i).toString(), subd.getName()); attrs.put(new StringBuilder(ipAttributeName).append(".geo.subdivision.isocode.").append(i).toString(), subd.getIsoCode()); i++; } attrs.put(new StringBuilder(ipAttributeName).append(".geo.country").toString(), response.getCountry().getName()); attrs.put(new StringBuilder(ipAttributeName).append(".geo.country.isocode").toString(), response.getCountry().getIsoCode()); attrs.put(new StringBuilder(ipAttributeName).append(".geo.postalcode").toString(), response.getPostal().getCode()); flowFile = session.putAllAttributes(flowFile, attrs); session.transfer(flowFile, REL_FOUND); }
Example 20
Source File: SequenceFileWriterImpl.java From localization_nifi with Apache License 2.0 | 4 votes |
protected void processInputStream(InputStream stream, FlowFile flowFile, final Writer writer) throws IOException { int fileSize = (int) flowFile.getSize(); final InputStreamWritable inStreamWritable = new InputStreamWritable(new BufferedInputStream(stream), fileSize); String key = flowFile.getAttribute(CoreAttributes.FILENAME.key()); writer.append(new Text(key), inStreamWritable); }