Java Code Examples for org.apache.nifi.logging.ComponentLog#warn()
The following examples show how to use
org.apache.nifi.logging.ComponentLog#warn() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ProvenanceEnumerator.java From nifi with Apache License 2.0 | 6 votes |
public ProvenanceEnumerator(final ReportingContext context, final ComponentLog logger, final int[] fields) { this.logger = logger; this.fields = fields; final EventAccess eventAccess = context.getEventAccess(); this.provenanceEventRepository = eventAccess.getProvenanceRepository(); final ProcessGroupStatus procGroupStatus = eventAccess.getControllerStatus(); this.componentMapHolder = ComponentMapHolder.createComponentMap(procGroupStatus); final boolean isClustered = context.isClustered(); nodeIdentifier = context.getClusterNodeIdentifier(); if (nodeIdentifier == null && isClustered) { logger.warn("This instance of NiFi is configured for clustering, but the Cluster Node Identifier is not yet available. " + "The contentPath and previousContentPath fields will be null for all rows in this query"); } try { this.provenanceEvents = provenanceEventRepository.getEvents(0, FETCH_SIZE); } catch (IOException ioe) { logger.error("Error retrieving provenance events, queries will return no rows"); } reset(); }
Example 2
Source File: MonitorDiskUsage.java From localization_nifi with Apache License 2.0 | 6 votes |
static void checkThreshold(final String pathName, final Path path, final int threshold, final ComponentLog logger) { final File file = path.toFile(); final long totalBytes = file.getTotalSpace(); final long freeBytes = file.getFreeSpace(); final long usedBytes = totalBytes - freeBytes; final double usedPercent = (double) usedBytes / (double) totalBytes * 100D; if (usedPercent >= threshold) { final String usedSpace = FormatUtils.formatDataSize(usedBytes); final String totalSpace = FormatUtils.formatDataSize(totalBytes); final String freeSpace = FormatUtils.formatDataSize(freeBytes); final double freePercent = (double) freeBytes / (double) totalBytes * 100D; final String message = String.format("%1$s exceeds configured threshold of %2$s%%, having %3$s / %4$s (%5$.2f%%) used and %6$s (%7$.2f%%) free", pathName, threshold, usedSpace, totalSpace, usedPercent, freeSpace, freePercent); logger.warn(message); } }
Example 3
Source File: ConsumeKafka.java From nifi with Apache License 2.0 | 5 votes |
@OnUnscheduled public void stopConnectionRetainer() { if (connectionRetainer != null) { final ComponentLog logger = getLogger(); logger.debug("Canceling connectionRetainer... {}", new Object[]{connectionRetainer}); try { connectionRetainer.shutdownNow(); } catch (final Exception e) { logger.warn("Failed to shutdown connection retainer {} due to {}", new Object[]{connectionRetainer, e}, e); } connectionRetainer = null; } }
Example 4
Source File: SimpleCsvFileLookupService.java From nifi with Apache License 2.0 | 5 votes |
private void loadCache() throws IllegalStateException, IOException { if (lock.tryLock()) { try { final ComponentLog logger = getLogger(); if (logger.isDebugEnabled()) { logger.debug("Loading lookup table from file: " + csvFile); } final Map<String, String> properties = new HashMap<>(); try (final InputStream is = new FileInputStream(csvFile)) { try (final InputStreamReader reader = new InputStreamReader(is, charset)) { final Iterable<CSVRecord> records = csvFormat.withFirstRecordAsHeader().parse(reader); for (final CSVRecord record : records) { final String key = record.get(lookupKeyColumn); final String value = record.get(lookupValueColumn); if (StringUtils.isBlank(key)) { throw new IllegalStateException("Empty lookup key encountered in: " + csvFile); } else if (!ignoreDuplicates && properties.containsKey(key)) { throw new IllegalStateException("Duplicate lookup key encountered: " + key + " in " + csvFile); } else if (ignoreDuplicates && properties.containsKey(key)) { logger.warn("Duplicate lookup key encountered: {} in {}", new Object[]{key, csvFile}); } properties.put(key, value); } } } this.cache = new ConcurrentHashMap<>(properties); if (cache.isEmpty()) { logger.warn("Lookup table is empty after reading file: " + csvFile); } } finally { lock.unlock(); } } }
Example 5
Source File: AlertHandler.java From nifi with Apache License 2.0 | 5 votes |
@Override protected void executeAction(PropertyContext propertyContext, Action action, Map<String, Object> facts) { ComponentLog logger = getLogger(); if (propertyContext instanceof ReportingContext) { ReportingContext context = (ReportingContext) propertyContext; Map<String, String> attributes = action.getAttributes(); if (context.getBulletinRepository() != null) { final String category = attributes.getOrDefault("category", defaultCategory); final String message = getMessage(attributes.getOrDefault("message", defaultMessage), facts); final String level = attributes.getOrDefault("severity", attributes.getOrDefault("logLevel", defaultLogLevel)); Severity severity; try { severity = Severity.valueOf(level.toUpperCase()); } catch (IllegalArgumentException iae) { severity = Severity.INFO; } BulletinRepository bulletinRepository = context.getBulletinRepository(); bulletinRepository.addBulletin(context.createBulletin(category, severity, message)); } else { logger.warn("Bulletin Repository is not available which is unusual. Cannot send a bulletin."); } } else { logger.warn("Reporting context was not provided to create bulletins."); } }
Example 6
Source File: ParseEvtx.java From localization_nifi with Apache License 2.0 | 5 votes |
protected String getBasename(FlowFile flowFile, ComponentLog logger) { String basename = flowFile.getAttribute(CoreAttributes.FILENAME.key()); if (basename.endsWith(EVTX_EXTENSION)) { return basename.substring(0, basename.length() - EVTX_EXTENSION.length()); } else { logger.warn("Trying to parse file without .evtx extension {} from flowfile {}", new Object[]{basename, flowFile}); return basename; } }
Example 7
Source File: PutHiveStreaming.java From localization_nifi with Apache License 2.0 | 5 votes |
@OnStopped public void cleanup() { ComponentLog log = getLogger(); sendHeartBeat.set(false); for (Map.Entry<HiveEndPoint, HiveWriter> entry : allWriters.entrySet()) { try { HiveWriter w = entry.getValue(); w.flushAndClose(); } catch (Exception ex) { log.warn("Error while closing writer to " + entry.getKey() + ". Exception follows.", ex); if (ex instanceof InterruptedException) { Thread.currentThread().interrupt(); } } } callTimeoutPool.shutdown(); try { while (!callTimeoutPool.isTerminated()) { callTimeoutPool.awaitTermination(options.getCallTimeOut(), TimeUnit.MILLISECONDS); } } catch (Throwable t) { log.warn("shutdown interrupted on " + callTimeoutPool, t); } callTimeoutPool = null; ugi = null; hiveConfigurator.stopRenewer(); }
Example 8
Source File: LogMessage.java From nifi with Apache License 2.0 | 5 votes |
private void processFlowFile( final ComponentLog logger, final MessageLogLevel logLevel, final FlowFile flowFile, final ProcessContext context) { String logPrefix = context.getProperty(LOG_PREFIX).evaluateAttributeExpressions(flowFile).getValue(); String logMessage = context.getProperty(LOG_MESSAGE).evaluateAttributeExpressions(flowFile).getValue(); String messageToWrite; if (StringUtil.isBlank(logPrefix)) { messageToWrite = logMessage; } else { messageToWrite = String.format("%s%s", logPrefix, logMessage); } // Uses optional property to specify logging level switch (logLevel) { case info: logger.info(messageToWrite); break; case debug: logger.debug(messageToWrite); break; case warn: logger.warn(messageToWrite); break; case trace: logger.trace(messageToWrite); break; case error: logger.error(messageToWrite); break; default: logger.debug(messageToWrite); } }
Example 9
Source File: PutFileTransfer.java From localization_nifi with Apache License 2.0 | 4 votes |
private ConflictResult identifyAndResolveConflictFile( final String conflictResolutionType, final T transfer, final String path, final FlowFile flowFile, final boolean rejectZeroByteFiles, final ComponentLog logger) throws IOException { Relationship destinationRelationship = REL_SUCCESS; String fileName = flowFile.getAttribute(CoreAttributes.FILENAME.key()); boolean transferFile = true; boolean penalizeFile = false; //First, check if the file is empty //Reject files that are zero bytes or less if (rejectZeroByteFiles) { final long sizeInBytes = flowFile.getSize(); if (sizeInBytes == 0) { logger.warn("Rejecting {} because it is zero bytes", new Object[]{flowFile}); return new ConflictResult(REL_REJECT, false, fileName, true); } } //Second, check if the user doesn't care about detecting naming conflicts ahead of time if (conflictResolutionType.equalsIgnoreCase(FileTransfer.CONFLICT_RESOLUTION_NONE)) { return new ConflictResult(destinationRelationship, transferFile, fileName, penalizeFile); } final FileInfo remoteFileInfo = transfer.getRemoteFileInfo(flowFile, path, fileName); if (remoteFileInfo == null) { return new ConflictResult(destinationRelationship, transferFile, fileName, penalizeFile); } if (remoteFileInfo.isDirectory()) { logger.warn("Resolving conflict by rejecting {} due to conflicting filename with a directory or file already on remote server", new Object[]{flowFile}); return new ConflictResult(REL_REJECT, false, fileName, false); } logger.info("Discovered a filename conflict on the remote server for {} so handling using configured Conflict Resolution of {}", new Object[]{flowFile, conflictResolutionType}); switch (conflictResolutionType.toUpperCase()) { case FileTransfer.CONFLICT_RESOLUTION_REJECT: destinationRelationship = REL_REJECT; transferFile = false; penalizeFile = false; logger.warn("Resolving conflict by rejecting {} due to conflicting filename with a directory or file already on remote server", new Object[]{flowFile}); break; case FileTransfer.CONFLICT_RESOLUTION_REPLACE: transfer.deleteFile(path, fileName); destinationRelationship = REL_SUCCESS; transferFile = true; penalizeFile = false; logger.info("Resolving filename conflict for {} with remote server by deleting remote file and replacing with flow file", new Object[]{flowFile}); break; case FileTransfer.CONFLICT_RESOLUTION_RENAME: boolean uniqueNameGenerated = false; for (int i = 1; i < 100 && !uniqueNameGenerated; i++) { String possibleFileName = i + "." + fileName; final FileInfo renamedFileInfo = transfer.getRemoteFileInfo(flowFile, path, possibleFileName); uniqueNameGenerated = (renamedFileInfo == null); if (uniqueNameGenerated) { fileName = possibleFileName; logger.info("Attempting to resolve filename conflict for {} on the remote server by using a newly generated filename of: {}", new Object[]{flowFile, fileName}); destinationRelationship = REL_SUCCESS; transferFile = true; penalizeFile = false; break; } } if (!uniqueNameGenerated) { destinationRelationship = REL_REJECT; transferFile = false; penalizeFile = false; logger.warn("Could not determine a unique name after 99 attempts for. Switching resolution mode to REJECT for " + flowFile); } break; case FileTransfer.CONFLICT_RESOLUTION_IGNORE: destinationRelationship = REL_SUCCESS; transferFile = false; penalizeFile = false; logger.info("Resolving conflict for {} by not transferring file and and still considering the process a success.", new Object[]{flowFile}); break; case FileTransfer.CONFLICT_RESOLUTION_FAIL: destinationRelationship = REL_FAILURE; transferFile = false; penalizeFile = true; logger.warn("Resolved filename conflict for {} as configured by routing to FAILURE relationship.", new Object[]{flowFile}); default: break; } return new ConflictResult(destinationRelationship, transferFile, fileName, penalizeFile); }
Example 10
Source File: ScrollElasticsearchHttp.java From nifi with Apache License 2.0 | 4 votes |
private void getPage(final Response getResponse, final URL url, final ProcessContext context, final ProcessSession session, FlowFile flowFile, final ComponentLog logger, final long startNanos, Charset charset) throws IOException { final int statusCode = getResponse.code(); if (isSuccess(statusCode)) { ResponseBody body = getResponse.body(); final byte[] bodyBytes = body.bytes(); JsonNode responseJson = parseJsonResponse(new ByteArrayInputStream(bodyBytes)); String scrollId = responseJson.get("_scroll_id").asText(); StringBuilder builder = new StringBuilder(); builder.append("{ \"hits\" : ["); JsonNode hits = responseJson.get("hits").get("hits"); if (hits.size() == 0) { finishQuery(context.getStateManager()); session.remove(flowFile); return; } for(int i = 0; i < hits.size(); i++) { JsonNode hit = hits.get(i); String retrievedIndex = hit.get("_index").asText(); String retrievedType = hit.get("_type").asText(); JsonNode source = hit.get("_source"); flowFile = session.putAttribute(flowFile, "es.index", retrievedIndex); flowFile = session.putAttribute(flowFile, "es.type", retrievedType); flowFile = session.putAttribute(flowFile, "mime.type", "application/json"); builder.append(source.toString()); if (i < hits.size() - 1) { builder.append(", "); } } builder.append("] }"); logger.debug("Elasticsearch retrieved " + responseJson.size() + " documents, routing to success"); flowFile = session.write(flowFile, out -> { out.write(builder.toString().getBytes(charset)); }); session.transfer(flowFile, REL_SUCCESS); saveScrollId(context.getStateManager(), scrollId); // emit provenance event final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos); session.getProvenanceReporter().receive(flowFile, url.toExternalForm(), millis); } else { // 5xx -> RETRY, but a server error might last a while, so yield if (statusCode / 100 == 5) { logger.warn("Elasticsearch returned code {} with message {}, removing the flow file. This is likely a server problem, yielding...", new Object[]{statusCode, getResponse.message()}); session.remove(flowFile); context.yield(); } else { logger.warn("Elasticsearch returned code {} with message {}", new Object[]{statusCode, getResponse.message()}); session.remove(flowFile); } } }
Example 11
Source File: LogAttribute.java From localization_nifi with Apache License 2.0 | 4 votes |
protected String processFlowFile(final ComponentLog logger, final DebugLevels logLevel, final FlowFile flowFile, final ProcessSession session, final ProcessContext context) { final Set<String> attributeKeys = getAttributesToLog(flowFile.getAttributes().keySet(), context); final ComponentLog LOG = getLogger(); final String dashedLine; String logPrefix = context.getProperty(LOG_PREFIX).evaluateAttributeExpressions(flowFile).getValue(); if (StringUtil.isBlank(logPrefix)) { dashedLine = StringUtils.repeat('-', 50); } else { // abbreviate long lines logPrefix = StringUtils.abbreviate(logPrefix, 40); // center the logPrefix and pad with dashes logPrefix = StringUtils.center(logPrefix, 40, '-'); // five dashes on the left and right side, plus the dashed logPrefix dashedLine = StringUtils.repeat('-', 5) + logPrefix + StringUtils.repeat('-', 5); } // Pretty print metadata final StringBuilder message = new StringBuilder(); message.append("logging for flow file ").append(flowFile); message.append("\n"); message.append(dashedLine); message.append("\nStandard FlowFile Attributes"); message.append(String.format("\nKey: '%1$s'\n\tValue: '%2$s'", "entryDate", new Date(flowFile.getEntryDate()))); message.append(String.format("\nKey: '%1$s'\n\tValue: '%2$s'", "lineageStartDate", new Date(flowFile.getLineageStartDate()))); message.append(String.format("\nKey: '%1$s'\n\tValue: '%2$s'", "fileSize", flowFile.getSize())); message.append("\nFlowFile Attribute Map Content"); for (final String key : attributeKeys) { message.append(String.format("\nKey: '%1$s'\n\tValue: '%2$s'", key, flowFile.getAttribute(key))); } message.append("\n"); message.append(dashedLine); // The user can request to log the payload final boolean logPayload = context.getProperty(LOG_PAYLOAD).asBoolean(); if (logPayload) { message.append("\n"); if (flowFile.getSize() < ONE_MB) { final FlowFilePayloadCallback callback = new FlowFilePayloadCallback(); session.read(flowFile, callback); message.append(callback.getContents()); } else { message.append("\n Not including payload since it is larger than one mb."); } } final String outputMessage = message.toString().trim(); // Uses optional property to specify logging level switch (logLevel) { case info: LOG.info(outputMessage); break; case debug: LOG.debug(outputMessage); break; case warn: LOG.warn(outputMessage); break; case trace: LOG.trace(outputMessage); break; case error: LOG.error(outputMessage); break; default: LOG.debug(outputMessage); } return outputMessage; }
Example 12
Source File: KerberosProperties.java From nifi with Apache License 2.0 | 4 votes |
public static List<ValidationResult> validatePrincipalWithKeytabOrPassword(final String subject, final Configuration config, final String principal, final String keytab, final String password, final ComponentLog logger) { final List<ValidationResult> results = new ArrayList<>(); // if security is enabled then the keytab and principal are required final boolean isSecurityEnabled = SecurityUtil.isSecurityEnabled(config); final boolean blankPrincipal = (principal == null || principal.isEmpty()); if (isSecurityEnabled && blankPrincipal) { results.add(new ValidationResult.Builder() .valid(false) .subject(subject) .explanation("Kerberos Principal must be provided when using a secure configuration") .build()); } final boolean blankKeytab = (keytab == null || keytab.isEmpty()); final boolean blankPassword = (password == null || password.isEmpty()); if (isSecurityEnabled && blankKeytab && blankPassword) { results.add(new ValidationResult.Builder() .valid(false) .subject(subject) .explanation("Kerberos Keytab or Kerberos Password must be provided when using a secure configuration") .build()); } if (isSecurityEnabled && !blankKeytab && !blankPassword) { results.add(new ValidationResult.Builder() .valid(false) .subject(subject) .explanation("Cannot specify both a Kerberos Keytab and a Kerberos Password") .build()); } if (!isSecurityEnabled && (!blankPrincipal || !blankKeytab)) { logger.warn("Configuration does not have security enabled, Keytab and Principal will be ignored"); } return results; }
Example 13
Source File: EncodeContent.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final ComponentLog logger = getLogger(); boolean encode = context.getProperty(MODE).getValue().equalsIgnoreCase(ENCODE_MODE); String encoding = context.getProperty(ENCODING).getValue(); StreamCallback encoder = null; // Select the encoder/decoder to use if (encode) { if (encoding.equalsIgnoreCase(BASE64_ENCODING)) { encoder = new EncodeBase64(); } else if (encoding.equalsIgnoreCase(BASE32_ENCODING)) { encoder = new EncodeBase32(); } else if (encoding.equalsIgnoreCase(HEX_ENCODING)) { encoder = new EncodeHex(); } } else { if (encoding.equalsIgnoreCase(BASE64_ENCODING)) { encoder = new DecodeBase64(); } else if (encoding.equalsIgnoreCase(BASE32_ENCODING)) { encoder = new DecodeBase32(); } else if (encoding.equalsIgnoreCase(HEX_ENCODING)) { encoder = new DecodeHex(); } } if (encoder == null) { logger.warn("Unknown operation: {} {}", new Object[]{encode ? "encode" : "decode", encoding}); return; } try { final StopWatch stopWatch = new StopWatch(true); flowFile = session.write(flowFile, encoder); logger.info("Successfully {} {}", new Object[]{encode ? "encoded" : "decoded", flowFile}); session.getProvenanceReporter().modifyContent(flowFile, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS); } catch (Exception e) { logger.error("Failed to {} {} due to {}", new Object[]{encode ? "encode" : "decode", flowFile, e}); session.transfer(flowFile, REL_FAILURE); } }
Example 14
Source File: StandardProcessorNode.java From localization_nifi with Apache License 2.0 | 4 votes |
/** * Will idempotently start the processor using the following sequence: <i> * <ul> * <li>Validate Processor's state (e.g., PropertyDescriptors, * ControllerServices etc.)</li> * <li>Transition (atomically) Processor's scheduled state form STOPPED to * STARTING. If the above state transition succeeds, then execute the start * task (asynchronously) which will be re-tried until @OnScheduled is * executed successfully and "schedulingAgentCallback' is invoked, or until * STOP operation is initiated on this processor. If state transition fails * it means processor is already being started and WARN message will be * logged explaining it.</li> * </ul> * </i> * <p> * Any exception thrown while invoking operations annotated with @OnSchedule * will be caught and logged after which @OnUnscheduled operation will be * invoked (quietly) and the start sequence will be repeated (re-try) after * delay provided by 'administrativeYieldMillis'. * </p> * <p> * Upon successful completion of start sequence (@OnScheduled -> * 'schedulingAgentCallback') the attempt will be made to transition * processor's scheduling state to RUNNING at which point processor is * considered to be fully started and functioning. If upon successful * invocation of @OnScheduled operation the processor can not be * transitioned to RUNNING state (e.g., STOP operation was invoked on the * processor while it's @OnScheduled operation was executing), the * processor's @OnUnscheduled operation will be invoked and its scheduling * state will be set to STOPPED at which point the processor is considered * to be fully stopped. * </p> */ @Override public <T extends ProcessContext & ControllerServiceLookup> void start(final ScheduledExecutorService taskScheduler, final long administrativeYieldMillis, final T processContext, final SchedulingAgentCallback schedulingAgentCallback) { if (!this.isValid()) { throw new IllegalStateException( "Processor " + this.getName() + " is not in a valid state due to " + this.getValidationErrors()); } final ComponentLog procLog = new SimpleProcessLogger(StandardProcessorNode.this.getIdentifier(), processor); if (this.scheduledState.compareAndSet(ScheduledState.STOPPED, ScheduledState.STARTING)) { // will ensure that the Processor represented by this node can only be started once final Runnable startProcRunnable = new Runnable() { @Override public void run() { try { invokeTaskAsCancelableFuture(schedulingAgentCallback, new Callable<Void>() { @Override public Void call() throws Exception { try (final NarCloseable nc = NarCloseable.withComponentNarLoader(processor.getClass(), processor.getIdentifier())) { ReflectionUtils.invokeMethodsWithAnnotation(OnScheduled.class, processor, processContext); return null; } } }); if (scheduledState.compareAndSet(ScheduledState.STARTING, ScheduledState.RUNNING)) { schedulingAgentCallback.trigger(); // callback provided by StandardProcessScheduler to essentially initiate component's onTrigger() cycle } else { // can only happen if stopProcessor was called before service was transitioned to RUNNING state try (final NarCloseable nc = NarCloseable.withComponentNarLoader(processor.getClass(), processor.getIdentifier())) { ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnUnscheduled.class, processor, processContext); } scheduledState.set(ScheduledState.STOPPED); } } catch (final Exception e) { final Throwable cause = e instanceof InvocationTargetException ? e.getCause() : e; procLog.error("{} failed to invoke @OnScheduled method due to {}; processor will not be scheduled to run for {} seconds", new Object[] {StandardProcessorNode.this.getProcessor(), cause, administrativeYieldMillis / 1000L}, cause); LOG.error("Failed to invoke @OnScheduled method due to {}", cause.toString(), cause); ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnUnscheduled.class, processor, processContext); ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnStopped.class, processor, processContext); if (scheduledState.get() != ScheduledState.STOPPING) { // make sure we only continue retry loop if STOP action wasn't initiated taskScheduler.schedule(this, administrativeYieldMillis, TimeUnit.MILLISECONDS); } else { scheduledState.set(ScheduledState.STOPPED); } } } }; taskScheduler.execute(startProcRunnable); } else { final String procName = this.processor.getClass().getSimpleName(); LOG.warn("Can not start '" + procName + "' since it's already in the process of being started or it is DISABLED - " + scheduledState.get()); procLog.warn("Can not start '" + procName + "' since it's already in the process of being started or it is DISABLED - " + scheduledState.get()); } }
Example 15
Source File: InvokeGRPC.java From nifi with Apache License 2.0 | 4 votes |
/** * Whenever this processor is triggered, we need to construct a client in order to communicate * with the configured gRPC service. * * @param context the processor context */ @OnScheduled public void initializeClient(final ProcessContext context) throws Exception { channelReference.set(null); blockingStubReference.set(null); final ComponentLog logger = getLogger(); final String host = context.getProperty(PROP_SERVICE_HOST).getValue(); final int port = context.getProperty(PROP_SERVICE_PORT).asInteger(); final Integer maxMessageSize = context.getProperty(PROP_MAX_MESSAGE_SIZE).asDataSize(DataUnit.B).intValue(); String userAgent = USER_AGENT_PREFIX; try { userAgent += "_" + InetAddress.getLocalHost().getHostName(); } catch (final UnknownHostException e) { logger.warn("Unable to determine local hostname. Defaulting gRPC user agent to {}.", new Object[]{USER_AGENT_PREFIX}, e); } final NettyChannelBuilder nettyChannelBuilder = NettyChannelBuilder.forAddress(host, port) // supports both gzip and plaintext, but will compress by default. .compressorRegistry(CompressorRegistry.getDefaultInstance()) .decompressorRegistry(DecompressorRegistry.getDefaultInstance()) .maxInboundMessageSize(maxMessageSize) .userAgent(userAgent); // configure whether or not we're using secure comms final boolean useSecure = context.getProperty(PROP_USE_SECURE).asBoolean(); final SSLContextService sslContextService = context.getProperty(PROP_SSL_CONTEXT_SERVICE).asControllerService(SSLContextService.class); final SSLContext sslContext = sslContextService == null ? null : sslContextService.createSSLContext(SslContextFactory.ClientAuth.NONE); if (useSecure && sslContext != null) { SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient(); if(StringUtils.isNotBlank(sslContextService.getKeyStoreFile())) { final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm(), sslContext.getProvider()); final KeyStore keyStore = KeyStore.getInstance(sslContextService.getKeyStoreType()); try (final InputStream is = new FileInputStream(sslContextService.getKeyStoreFile())) { keyStore.load(is, sslContextService.getKeyStorePassword().toCharArray()); } keyManagerFactory.init(keyStore, sslContextService.getKeyStorePassword().toCharArray()); sslContextBuilder.keyManager(keyManagerFactory); } if(StringUtils.isNotBlank(sslContextService.getTrustStoreFile())) { final TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm(), sslContext.getProvider()); final KeyStore trustStore = KeyStore.getInstance(sslContextService.getTrustStoreType()); try (final InputStream is = new FileInputStream(sslContextService.getTrustStoreFile())) { trustStore.load(is, sslContextService.getTrustStorePassword().toCharArray()); } trustManagerFactory.init(trustStore); sslContextBuilder.trustManager(trustManagerFactory); } nettyChannelBuilder.sslContext(sslContextBuilder.build()); } else { nettyChannelBuilder.usePlaintext(true); } final ManagedChannel channel = nettyChannelBuilder.build(); final FlowFileServiceGrpc.FlowFileServiceBlockingStub blockingStub = FlowFileServiceGrpc.newBlockingStub(channel); channelReference.set(channel); blockingStubReference.set(blockingStub); }
Example 16
Source File: FetchElasticsearch.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final String index = context.getProperty(INDEX).evaluateAttributeExpressions(flowFile).getValue(); final String docId = context.getProperty(DOC_ID).evaluateAttributeExpressions(flowFile).getValue(); final String docType = context.getProperty(TYPE).evaluateAttributeExpressions(flowFile).getValue(); final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue()); final ComponentLog logger = getLogger(); try { logger.debug("Fetching {}/{}/{} from Elasticsearch", new Object[]{index, docType, docId}); final long startNanos = System.nanoTime(); GetRequestBuilder getRequestBuilder = esClient.get().prepareGet(index, docType, docId); if (authToken != null) { getRequestBuilder.putHeader("Authorization", authToken); } final GetResponse getResponse = getRequestBuilder.execute().actionGet(); if (getResponse == null || !getResponse.isExists()) { logger.warn("Failed to read {}/{}/{} from Elasticsearch: Document not found", new Object[]{index, docType, docId}); // We couldn't find the document, so penalize it and send it to "not found" flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_NOT_FOUND); } else { flowFile = session.putAttribute(flowFile, "filename", docId); flowFile = session.putAttribute(flowFile, "es.index", index); flowFile = session.putAttribute(flowFile, "es.type", docType); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException { out.write(getResponse.getSourceAsString().getBytes(charset)); } }); logger.debug("Elasticsearch document " + docId + " fetched, routing to success"); final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos); final String uri = context.getProperty(HOSTS).evaluateAttributeExpressions().getValue() + "/" + index + "/" + docType + "/" + docId; session.getProvenanceReporter().fetch(flowFile, uri, millis); session.transfer(flowFile, REL_SUCCESS); } } catch (NoNodeAvailableException | ElasticsearchTimeoutException | ReceiveTimeoutTransportException | NodeClosedException exceptionToRetry) { logger.error("Failed to read into Elasticsearch due to {}, this may indicate an error in configuration " + "(hosts, username/password, etc.). Routing to retry", new Object[]{exceptionToRetry.getLocalizedMessage()}, exceptionToRetry); session.transfer(flowFile, REL_RETRY); context.yield(); } catch (Exception e) { logger.error("Failed to read {} from Elasticsearch due to {}", new Object[]{flowFile, e.getLocalizedMessage()}, e); session.transfer(flowFile, REL_FAILURE); context.yield(); } }
Example 17
Source File: ScrollElasticsearchHttp.java From localization_nifi with Apache License 2.0 | 4 votes |
private void getPage(final Response getResponse, final URL url, final ProcessContext context, final ProcessSession session, FlowFile flowFile, final ComponentLog logger, final long startNanos) throws IOException { final int statusCode = getResponse.code(); if (isSuccess(statusCode)) { ResponseBody body = getResponse.body(); final byte[] bodyBytes = body.bytes(); JsonNode responseJson = parseJsonResponse(new ByteArrayInputStream(bodyBytes)); String scrollId = responseJson.get("_scroll_id").asText(); StringBuilder builder = new StringBuilder(); builder.append("{ \"hits\" : ["); JsonNode hits = responseJson.get("hits").get("hits"); if (hits.size() == 0) { finishQuery(context.getStateManager()); session.remove(flowFile); return; } for(int i = 0; i < hits.size(); i++) { JsonNode hit = hits.get(i); String retrievedIndex = hit.get("_index").asText(); String retrievedType = hit.get("_type").asText(); JsonNode source = hit.get("_source"); flowFile = session.putAttribute(flowFile, "es.index", retrievedIndex); flowFile = session.putAttribute(flowFile, "es.type", retrievedType); flowFile = session.putAttribute(flowFile, "mime.type", "application/json"); builder.append(source.toString()); if (i < hits.size() - 1) { builder.append(", "); } } builder.append("] }"); logger.debug("Elasticsearch retrieved " + responseJson.size() + " documents, routing to success"); flowFile = session.write(flowFile, out -> { out.write(builder.toString().getBytes()); }); session.transfer(flowFile, REL_SUCCESS); saveScrollId(context.getStateManager(), scrollId); // emit provenance event final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos); session.getProvenanceReporter().receive(flowFile, url.toExternalForm(), millis); } else { // 5xx -> RETRY, but a server error might last a while, so yield if (statusCode / 100 == 5) { logger.warn("Elasticsearch returned code {} with message {}, removing the flow file. This is likely a server problem, yielding...", new Object[]{statusCode, getResponse.message()}); session.remove(flowFile); context.yield(); } else { logger.warn("Elasticsearch returned code {} with message {}", new Object[]{statusCode, getResponse.message()}); session.remove(flowFile); } } }
Example 18
Source File: IdentifyMimeType.java From nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final ComponentLog logger = getLogger(); final AtomicReference<String> mimeTypeRef = new AtomicReference<>(null); final String filename = flowFile.getAttribute(CoreAttributes.FILENAME.key()); session.read(flowFile, new InputStreamCallback() { @Override public void process(final InputStream stream) throws IOException { try (final InputStream in = new BufferedInputStream(stream)) { TikaInputStream tikaStream = TikaInputStream.get(in); Metadata metadata = new Metadata(); if (filename != null && context.getProperty(USE_FILENAME_IN_DETECTION).asBoolean()) { metadata.add(TikaMetadataKeys.RESOURCE_NAME_KEY, filename); } // Get mime type MediaType mediatype = detector.detect(tikaStream, metadata); mimeTypeRef.set(mediatype.toString()); } } }); String mimeType = mimeTypeRef.get(); String extension = ""; try { MimeType mimetype; mimetype = mimeTypes.forName(mimeType); extension = mimetype.getExtension(); } catch (MimeTypeException ex) { logger.warn("MIME type extension lookup failed: {}", new Object[]{ex}); } // Workaround for bug in Tika - https://issues.apache.org/jira/browse/TIKA-1563 if (mimeType != null && mimeType.equals("application/gzip") && extension.equals(".tgz")) { extension = ".gz"; } if (mimeType == null) { flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), "application/octet-stream"); flowFile = session.putAttribute(flowFile, "mime.extension", ""); logger.info("Unable to identify MIME Type for {}; setting to application/octet-stream", new Object[]{flowFile}); } else { flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), mimeType); flowFile = session.putAttribute(flowFile, "mime.extension", extension); logger.info("Identified {} as having MIME Type {}", new Object[]{flowFile, mimeType}); } session.getProvenanceReporter().modifyAttributes(flowFile); session.transfer(flowFile, REL_SUCCESS); }
Example 19
Source File: PutDistributedMapCache.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final ComponentLog logger = getLogger(); // cache key is computed from attribute 'CACHE_ENTRY_IDENTIFIER' with expression language support final String cacheKey = context.getProperty(CACHE_ENTRY_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue(); // if the computed value is null, or empty, we transfer the flow file to failure relationship if (StringUtils.isBlank(cacheKey)) { logger.error("FlowFile {} has no attribute for given Cache Entry Identifier", new Object[] {flowFile}); flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); return; } // the cache client used to interact with the distributed cache final DistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class); try { final long maxCacheEntrySize = context.getProperty(CACHE_ENTRY_MAX_BYTES).asDataSize(DataUnit.B).longValue(); long flowFileSize = flowFile.getSize(); // too big flow file if (flowFileSize > maxCacheEntrySize) { logger.warn("Flow file {} size {} exceeds the max cache entry size ({} B).", new Object[] {flowFile, flowFileSize, maxCacheEntrySize}); session.transfer(flowFile, REL_FAILURE); return; } if (flowFileSize == 0) { logger.warn("Flow file {} is empty, there is nothing to cache.", new Object[] {flowFile}); session.transfer(flowFile, REL_FAILURE); return; } // get flow file content final ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); session.exportTo(flowFile, byteStream); byte[] cacheValue = byteStream.toByteArray(); final String updateStrategy = context.getProperty(CACHE_UPDATE_STRATEGY).getValue(); boolean cached = false; if (updateStrategy.equals(CACHE_UPDATE_REPLACE.getValue())) { cache.put(cacheKey, cacheValue, keySerializer, valueSerializer); cached = true; } else if (updateStrategy.equals(CACHE_UPDATE_KEEP_ORIGINAL.getValue())) { final byte[] oldValue = cache.getAndPutIfAbsent(cacheKey, cacheValue, keySerializer, valueSerializer, valueDeserializer); if (oldValue == null) { cached = true; } } // set 'cached' attribute flowFile = session.putAttribute(flowFile, CACHED_ATTRIBUTE_NAME, String.valueOf(cached)); if (cached) { session.transfer(flowFile, REL_SUCCESS); } else { session.transfer(flowFile, REL_FAILURE); } } catch (final IOException e) { flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); logger.error("Unable to communicate with cache when processing {} due to {}", new Object[] {flowFile, e}); } }
Example 20
Source File: CSVRecordLookupService.java From nifi with Apache License 2.0 | 4 votes |
private void loadCache() throws IllegalStateException, IOException { if (lock.tryLock()) { try { final ComponentLog logger = getLogger(); if (logger.isDebugEnabled()) { logger.debug("Loading lookup table from file: " + csvFile); } ConcurrentHashMap<String, Record> cache = new ConcurrentHashMap<>(); try (final InputStream is = new FileInputStream(csvFile)) { try (final InputStreamReader reader = new InputStreamReader(is, charset)) { final CSVParser records = csvFormat.withFirstRecordAsHeader().parse(reader); RecordSchema lookupRecordSchema = null; for (final CSVRecord record : records) { final String key = record.get(lookupKeyColumn); if (StringUtils.isBlank(key)) { throw new IllegalStateException("Empty lookup key encountered in: " + csvFile); } else if (!ignoreDuplicates && cache.containsKey(key)) { throw new IllegalStateException("Duplicate lookup key encountered: " + key + " in " + csvFile); } else if (ignoreDuplicates && cache.containsKey(key)) { logger.warn("Duplicate lookup key encountered: {} in {}", new Object[]{key, csvFile}); } // Put each key/value pair (except the lookup) into the properties final Map<String, Object> properties = new HashMap<>(); record.toMap().forEach((k, v) -> { if (!lookupKeyColumn.equals(k)) { properties.put(k, v); } }); if (lookupRecordSchema == null) { List<RecordField> recordFields = new ArrayList<>(properties.size()); properties.forEach((k, v) -> recordFields.add(new RecordField(k, RecordFieldType.STRING.getDataType()))); lookupRecordSchema = new SimpleRecordSchema(recordFields); } cache.put(key, new MapRecord(lookupRecordSchema, properties)); } } } this.cache = cache; if (cache.isEmpty()) { logger.warn("Lookup table is empty after reading file: " + csvFile); } } finally { lock.unlock(); } } }