Java Code Examples for org.apache.nifi.processor.ProcessSession#rollback()
The following examples show how to use
org.apache.nifi.processor.ProcessSession#rollback() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ConsumeAzureEventHub.java From nifi with Apache License 2.0 | 6 votes |
@Override public void onEvents(PartitionContext context, Iterable<EventData> messages) throws Exception { final ProcessSession session = processSessionFactory.createSession(); try { final StopWatch stopWatch = new StopWatch(true); if (readerFactory != null && writerFactory != null) { writeRecords(context, messages, session, stopWatch); } else { writeFlowFiles(context, messages, session, stopWatch); } // Commit NiFi first. session.commit(); // If creating an Event Hub checkpoint failed, then the same message can be retrieved again. context.checkpoint(); } catch (Exception e) { getLogger().error("Unable to fully process received message due to " + e, e); // FlowFiles those are already committed will not get rollback. session.rollback(); } }
Example 2
Source File: DistributeLoad.java From localization_nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { final FlowFile flowFile = session.get(); if (flowFile == null) { return; } final DistributionStrategy strategy = strategyRef.get(); final Set<Relationship> available = context.getAvailableRelationships(); final int numRelationships = context.getProperty(NUM_RELATIONSHIPS).asInteger(); final boolean allDestinationsAvailable = (available.size() == numRelationships); if (!allDestinationsAvailable && strategy.requiresAllDestinationsAvailable()) { // can't transfer the FlowFiles. Roll back and yield session.rollback(); context.yield(); return; } final Relationship relationship = strategy.mapToRelationship(context, flowFile); if (relationship == null) { // can't transfer the FlowFiles. Roll back and yield session.rollback(); context.yield(); return; } session.transfer(flowFile, relationship); session.getProvenanceReporter().route(flowFile, relationship); }
Example 3
Source File: AbstractFlumeProcessor.java From nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException { final ProcessSession session = sessionFactory.createSession(); try { onTrigger(context, session); session.commit(); } catch (final Throwable t) { getLogger() .error("{} failed to process due to {}; rolling back session", new Object[]{this, t}); session.rollback(true); throw t; } }
Example 4
Source File: AbstractFlumeProcessor.java From localization_nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException { final ProcessSession session = sessionFactory.createSession(); try { onTrigger(context, session); session.commit(); } catch (final Throwable t) { getLogger() .error("{} failed to process due to {}; rolling back session", new Object[]{this, t}); session.rollback(true); throw t; } }
Example 5
Source File: AbstractPort.java From localization_nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException { final ProcessSession session = sessionFactory.createSession(); try { onTrigger(context, session); session.commit(); } catch (final ProcessException e) { session.rollback(); throw e; } catch (final Throwable t) { session.rollback(); throw new RuntimeException(t); } }
Example 6
Source File: StandardFunnel.java From localization_nifi with Apache License 2.0 | 5 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException { final ProcessSession session = sessionFactory.createSession(); try { onTrigger(context, session); session.commit(); } catch (final ProcessException e) { session.rollback(); throw e; } catch (final Throwable t) { session.rollback(); throw new RuntimeException(t); } }
Example 7
Source File: WeakHashMapProcessSessionFactory.java From nifi with Apache License 2.0 | 5 votes |
@Override public synchronized void terminateActiveSessions() { terminated = true; for (final ProcessSession session : sessionMap.keySet()) { try { session.rollback(); } catch (final TerminatedTaskException tte) { // ignore } } sessionMap.clear(); }
Example 8
Source File: GetHDFSSequenceFile.java From nifi with Apache License 2.0 | 4 votes |
@Override protected void processBatchOfFiles(final List<Path> files, final ProcessContext context, final ProcessSession session) { final Configuration conf = getConfiguration(); final FileSystem hdfs = getFileSystem(); final String flowFileContentValue = context.getProperty(FLOWFILE_CONTENT).getValue(); final boolean keepSourceFiles = context.getProperty(KEEP_SOURCE_FILE).asBoolean(); final Double bufferSizeProp = context.getProperty(BUFFER_SIZE).asDataSize(DataUnit.B); if (bufferSizeProp != null) { int bufferSize = bufferSizeProp.intValue(); conf.setInt(BUFFER_SIZE_KEY, bufferSize); } ComponentLog logger = getLogger(); final SequenceFileReader<Set<FlowFile>> reader; if (flowFileContentValue.equalsIgnoreCase(VALUE_ONLY)) { reader = new ValueReader(session); } else { reader = new KeyValueReader(session); } Set<FlowFile> flowFiles = Collections.emptySet(); for (final Path file : files) { if (!this.isScheduled()) { break; // This processor should stop running immediately. } final StopWatch stopWatch = new StopWatch(false); try { stopWatch.start(); if (!hdfs.exists(file)) { continue; // If file is no longer here move on. } logger.debug("Reading file"); flowFiles = getFlowFiles(conf, hdfs, reader, file); if (!keepSourceFiles && !hdfs.delete(file, false)) { logger.warn("Unable to delete path " + file.toString() + " from HDFS. Will likely be picked up over and over..."); } } catch (Throwable t) { logger.error("Error retrieving file {} from HDFS due to {}", new Object[]{file, t}); session.rollback(); context.yield(); } finally { stopWatch.stop(); long totalSize = 0; for (FlowFile flowFile : flowFiles) { totalSize += flowFile.getSize(); session.getProvenanceReporter().receive(flowFile, file.toString()); } if (totalSize > 0) { final String dataRate = stopWatch.calculateDataRate(totalSize); final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS); logger.info("Created {} flowFiles from SequenceFile {}. Ingested in {} milliseconds at a rate of {}", new Object[]{ flowFiles.size(), file.toUri().toASCIIString(), millis, dataRate}); logger.info("Transferred flowFiles {} to success", new Object[]{flowFiles}); session.transfer(flowFiles, REL_SUCCESS); } } } }
Example 9
Source File: GetHDFS.java From nifi with Apache License 2.0 | 4 votes |
protected void processBatchOfFiles(final List<Path> files, final ProcessContext context, final ProcessSession session) { // process the batch of files InputStream stream = null; CompressionCodec codec = null; Configuration conf = getConfiguration(); FileSystem hdfs = getFileSystem(); final boolean keepSourceFiles = context.getProperty(KEEP_SOURCE_FILE).asBoolean(); final Double bufferSizeProp = context.getProperty(BUFFER_SIZE).asDataSize(DataUnit.B); int bufferSize = bufferSizeProp != null ? bufferSizeProp.intValue() : conf.getInt(BUFFER_SIZE_KEY, BUFFER_SIZE_DEFAULT); final Path rootDir = new Path(context.getProperty(DIRECTORY).evaluateAttributeExpressions().getValue()); final CompressionType compressionType = CompressionType.valueOf(context.getProperty(COMPRESSION_CODEC).toString()); final boolean inferCompressionCodec = compressionType == CompressionType.AUTOMATIC; if (inferCompressionCodec || compressionType != CompressionType.NONE) { codec = getCompressionCodec(context, getConfiguration()); } final CompressionCodecFactory compressionCodecFactory = new CompressionCodecFactory(conf); for (final Path file : files) { try { if (!getUserGroupInformation().doAs((PrivilegedExceptionAction<Boolean>) () -> hdfs.exists(file))) { continue; // if file is no longer there then move on } final String originalFilename = file.getName(); final String relativePath = getPathDifference(rootDir, file); stream = getUserGroupInformation().doAs((PrivilegedExceptionAction<FSDataInputStream>) () -> hdfs.open(file, bufferSize)); final String outputFilename; // Check if we should infer compression codec if (inferCompressionCodec) { codec = compressionCodecFactory.getCodec(file); } // Check if compression codec is defined (inferred or otherwise) if (codec != null) { stream = codec.createInputStream(stream); outputFilename = StringUtils.removeEnd(originalFilename, codec.getDefaultExtension()); } else { outputFilename = originalFilename; } FlowFile flowFile = session.create(); final StopWatch stopWatch = new StopWatch(true); flowFile = session.importFrom(stream, flowFile); stopWatch.stop(); final String dataRate = stopWatch.calculateDataRate(flowFile.getSize()); final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS); flowFile = session.putAttribute(flowFile, CoreAttributes.PATH.key(), relativePath.isEmpty() ? "." : relativePath); flowFile = session.putAttribute(flowFile, CoreAttributes.FILENAME.key(), outputFilename); if (!keepSourceFiles && !getUserGroupInformation().doAs((PrivilegedExceptionAction<Boolean>) () -> hdfs.delete(file, false))) { getLogger().warn("Could not remove {} from HDFS. Not ingesting this file ...", new Object[]{file}); session.remove(flowFile); continue; } session.getProvenanceReporter().receive(flowFile, file.toString()); session.transfer(flowFile, REL_SUCCESS); getLogger().info("retrieved {} from HDFS {} in {} milliseconds at a rate of {}", new Object[]{flowFile, file, millis, dataRate}); session.commit(); } catch (final Throwable t) { getLogger().error("Error retrieving file {} from HDFS due to {}", new Object[]{file, t}); session.rollback(); context.yield(); } finally { IOUtils.closeQuietly(stream); stream = null; } } }
Example 10
Source File: ListenHTTPServlet.java From nifi with Apache License 2.0 | 4 votes |
private void handleException(final HttpServletRequest request, final HttpServletResponse response, final ProcessSession session, String foundSubject, final Throwable t) throws IOException { session.rollback(); logger.error("Unable to receive file from Remote Host: [{}] SubjectDN [{}] due to {}", new Object[]{request.getRemoteHost(), foundSubject, t}); response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, t.toString()); }
Example 11
Source File: AbstractWebSocketGatewayProcessor.java From nifi with Apache License 2.0 | 4 votes |
private void enqueueMessage(final WebSocketMessage incomingMessage){ final ProcessSession session = processSessionFactory.createSession(); try { FlowFile messageFlowFile = session.create(); final Map<String, String> attrs = new HashMap<>(); attrs.put(ATTR_WS_CS_ID, webSocketService.getIdentifier()); final WebSocketSessionInfo sessionInfo = incomingMessage.getSessionInfo(); attrs.put(ATTR_WS_SESSION_ID, sessionInfo.getSessionId()); attrs.put(ATTR_WS_ENDPOINT_ID, endpointId); attrs.put(ATTR_WS_LOCAL_ADDRESS, sessionInfo.getLocalAddress().toString()); attrs.put(ATTR_WS_REMOTE_ADDRESS, sessionInfo.getRemoteAddress().toString()); final WebSocketMessage.Type messageType = incomingMessage.getType(); if (messageType != null) { attrs.put(ATTR_WS_MESSAGE_TYPE, messageType.name()); } messageFlowFile = session.putAllAttributes(messageFlowFile, attrs); final byte[] payload = incomingMessage.getPayload(); if (payload != null) { messageFlowFile = session.write(messageFlowFile, out -> { out.write(payload, incomingMessage.getOffset(), incomingMessage.getLength()); }); } session.getProvenanceReporter().receive(messageFlowFile, getTransitUri(sessionInfo)); if (incomingMessage instanceof WebSocketConnectedMessage) { session.transfer(messageFlowFile, REL_CONNECTED); } else { switch (messageType) { case TEXT: session.transfer(messageFlowFile, REL_MESSAGE_TEXT); break; case BINARY: session.transfer(messageFlowFile, REL_MESSAGE_BINARY); break; } } session.commit(); } catch (Exception e) { logger.error("Unable to fully process input due to " + e, e); session.rollback(); } }
Example 12
Source File: PutAzureEventHub.java From nifi with Apache License 2.0 | 4 votes |
/** * Joins all the futures so it can determine which flow files from given batch were sent successfully and which were not. * * @param context of this instance of the processor * @param session that handles all flow files sent within the future queue * @param stopWatch for time measurements * @param futureQueue a list of futures of messages that had been sent within above context and session before this method was called. */ protected void waitForAllFutures( final ProcessContext context, final ProcessSession session, final StopWatch stopWatch, final BlockingQueue<CompletableFuture<FlowFileResultCarrier<Relationship>>> futureQueue){ try { for (CompletableFuture<FlowFileResultCarrier<Relationship>> completableFuture : futureQueue) { completableFuture.join(); final FlowFileResultCarrier<Relationship> flowFileResult = completableFuture.get(); if(flowFileResult == null) { continue; } final FlowFile flowFile = flowFileResult.getFlowFile(); if(flowFileResult.getResult() == REL_SUCCESS) { final String namespace = context.getProperty(NAMESPACE).getValue(); final String eventHubName = context.getProperty(EVENT_HUB_NAME).getValue(); session.getProvenanceReporter().send(flowFile, "amqps://" + namespace + ".servicebus.windows.net" + "/" + eventHubName, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS); } else { final Throwable processException = flowFileResult.getException(); getLogger().error("Failed to send {} to EventHub due to {}; routing to failure", new Object[]{flowFile, processException}, processException); session.transfer(session.penalize(flowFile), REL_FAILURE); } } } catch (InterruptedException | ExecutionException | CancellationException | CompletionException e) { getLogger().error("Batch processing failed", e); session.rollback(); if(e instanceof InterruptedException) { Thread.currentThread().interrupt(); } throw new ProcessException("Batch processing failed", e); } }
Example 13
Source File: StandardRemoteGroupPort.java From nifi with Apache License 2.0 | 4 votes |
private int transferFlowFiles(final Transaction transaction, final ProcessContext context, final ProcessSession session, final FlowFile firstFlowFile) throws IOException, ProtocolException { FlowFile flowFile = firstFlowFile; try { final String userDn = transaction.getCommunicant().getDistinguishedName(); final long startSendingNanos = System.nanoTime(); final StopWatch stopWatch = new StopWatch(true); long bytesSent = 0L; final SiteToSiteClientConfig siteToSiteClientConfig = getSiteToSiteClient().getConfig(); final long maxBatchBytes = siteToSiteClientConfig.getPreferredBatchSize(); final int maxBatchCount = siteToSiteClientConfig.getPreferredBatchCount(); final long preferredBatchDuration = siteToSiteClientConfig.getPreferredBatchDuration(TimeUnit.NANOSECONDS); final long maxBatchDuration = preferredBatchDuration > 0 ? preferredBatchDuration : BATCH_SEND_NANOS; final Set<FlowFile> flowFilesSent = new HashSet<>(); boolean continueTransaction = true; while (continueTransaction) { final long startNanos = System.nanoTime(); // call codec.encode within a session callback so that we have the InputStream to read the FlowFile final FlowFile toWrap = flowFile; session.read(flowFile, new InputStreamCallback() { @Override public void process(final InputStream in) throws IOException { final DataPacket dataPacket = new StandardDataPacket(toWrap.getAttributes(), in, toWrap.getSize()); transaction.send(dataPacket); } }); final long transferNanos = System.nanoTime() - startNanos; final long transferMillis = TimeUnit.MILLISECONDS.convert(transferNanos, TimeUnit.NANOSECONDS); flowFilesSent.add(flowFile); bytesSent += flowFile.getSize(); logger.debug("{} Sent {} to {}", this, flowFile, transaction.getCommunicant().getUrl()); final String transitUri = transaction.getCommunicant().createTransitUri(flowFile.getAttribute(CoreAttributes.UUID.key())); flowFile = session.putAttribute(flowFile, SiteToSiteAttributes.S2S_PORT_ID.key(), getTargetIdentifier()); session.getProvenanceReporter().send(flowFile, transitUri, "Remote DN=" + userDn, transferMillis, false); session.remove(flowFile); final long sendingNanos = System.nanoTime() - startSendingNanos; if (maxBatchCount > 0 && flowFilesSent.size() >= maxBatchCount) { flowFile = null; } else if (maxBatchBytes > 0 && bytesSent >= maxBatchBytes) { flowFile = null; } else if (sendingNanos >= maxBatchDuration) { flowFile = null; } else { flowFile = session.get(); } continueTransaction = (flowFile != null); } transaction.confirm(); // consume input stream entirely, ignoring its contents. If we // don't do this, the Connection will not be returned to the pool stopWatch.stop(); final String uploadDataRate = stopWatch.calculateDataRate(bytesSent); final long uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS); final String dataSize = FormatUtils.formatDataSize(bytesSent); transaction.complete(); session.commit(); final String flowFileDescription = (flowFilesSent.size() < 20) ? flowFilesSent.toString() : flowFilesSent.size() + " FlowFiles"; logger.info("{} Successfully sent {} ({}) to {} in {} milliseconds at a rate of {}", new Object[]{ this, flowFileDescription, dataSize, transaction.getCommunicant().getUrl(), uploadMillis, uploadDataRate}); return flowFilesSent.size(); } catch (final Exception e) { session.rollback(); throw e; } }
Example 14
Source File: StandardRemoteGroupPort.java From localization_nifi with Apache License 2.0 | 4 votes |
private int transferFlowFiles(final Transaction transaction, final ProcessContext context, final ProcessSession session, final FlowFile firstFlowFile) throws IOException, ProtocolException { FlowFile flowFile = firstFlowFile; try { final String userDn = transaction.getCommunicant().getDistinguishedName(); final long startSendingNanos = System.nanoTime(); final StopWatch stopWatch = new StopWatch(true); long bytesSent = 0L; final Set<FlowFile> flowFilesSent = new HashSet<>(); boolean continueTransaction = true; while (continueTransaction) { final long startNanos = System.nanoTime(); // call codec.encode within a session callback so that we have the InputStream to read the FlowFile final FlowFile toWrap = flowFile; session.read(flowFile, new InputStreamCallback() { @Override public void process(final InputStream in) throws IOException { final DataPacket dataPacket = new StandardDataPacket(toWrap.getAttributes(), in, toWrap.getSize()); transaction.send(dataPacket); } }); final long transferNanos = System.nanoTime() - startNanos; final long transferMillis = TimeUnit.MILLISECONDS.convert(transferNanos, TimeUnit.NANOSECONDS); flowFilesSent.add(flowFile); bytesSent += flowFile.getSize(); logger.debug("{} Sent {} to {}", this, flowFile, transaction.getCommunicant().getUrl()); final String transitUri = transaction.getCommunicant().createTransitUri(flowFile.getAttribute(CoreAttributes.UUID.key())); session.getProvenanceReporter().send(flowFile, transitUri, "Remote DN=" + userDn, transferMillis, false); session.remove(flowFile); final long sendingNanos = System.nanoTime() - startSendingNanos; if (sendingNanos < BATCH_SEND_NANOS) { flowFile = session.get(); } else { flowFile = null; } continueTransaction = (flowFile != null); } transaction.confirm(); // consume input stream entirely, ignoring its contents. If we // don't do this, the Connection will not be returned to the pool stopWatch.stop(); final String uploadDataRate = stopWatch.calculateDataRate(bytesSent); final long uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS); final String dataSize = FormatUtils.formatDataSize(bytesSent); transaction.complete(); session.commit(); final String flowFileDescription = (flowFilesSent.size() < 20) ? flowFilesSent.toString() : flowFilesSent.size() + " FlowFiles"; logger.info("{} Successfully sent {} ({}) to {} in {} milliseconds at a rate of {}", new Object[]{ this, flowFileDescription, dataSize, transaction.getCommunicant().getUrl(), uploadMillis, uploadDataRate}); return flowFilesSent.size(); } catch (final Exception e) { session.rollback(); throw e; } }
Example 15
Source File: AbstractFlowFileServerProtocol.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public int receiveFlowFiles(final Peer peer, final ProcessContext context, final ProcessSession session, final FlowFileCodec codec) throws IOException, ProtocolException { if (!handshakeCompleted) { throw new IllegalStateException("Handshake has not been completed"); } if (shutdown) { throw new IllegalStateException("Protocol is shutdown"); } logger.debug("{} receiving FlowFiles from {}", this, peer); final CommunicationsSession commsSession = peer.getCommunicationsSession(); final DataInputStream dis = new DataInputStream(commsSession.getInput().getInputStream()); String remoteDn = commsSession.getUserDn(); if (remoteDn == null) { remoteDn = "none"; } final StopWatch stopWatch = new StopWatch(true); final CRC32 crc = new CRC32(); // Peer has data. Otherwise, we would not have been called, because they would not have sent // a SEND_FLOWFILES request to use. Just decode the bytes into FlowFiles until peer says he's // finished sending data. final Set<FlowFile> flowFilesReceived = new HashSet<>(); long bytesReceived = 0L; boolean continueTransaction = true; while (continueTransaction) { final long startNanos = System.nanoTime(); final InputStream flowFileInputStream = handshakeProperties.isUseGzip() ? new CompressionInputStream(dis) : dis; final CheckedInputStream checkedInputStream = new CheckedInputStream(flowFileInputStream, crc); final DataPacket dataPacket = codec.decode(checkedInputStream); if (dataPacket == null) { logger.debug("{} Received null dataPacket indicating the end of transaction from {}", this, peer); break; } FlowFile flowFile = session.create(); flowFile = session.importFrom(dataPacket.getData(), flowFile); flowFile = session.putAllAttributes(flowFile, dataPacket.getAttributes()); final long transferNanos = System.nanoTime() - startNanos; final long transferMillis = TimeUnit.MILLISECONDS.convert(transferNanos, TimeUnit.NANOSECONDS); final String sourceSystemFlowFileUuid = dataPacket.getAttributes().get(CoreAttributes.UUID.key()); final String host = StringUtils.isEmpty(peer.getHost()) ? "unknown" : peer.getHost(); final String port = peer.getPort() <= 0 ? "unknown" : String.valueOf(peer.getPort()); final Map<String,String> attributes = new HashMap<>(4); attributes.put(CoreAttributes.UUID.key(), UUID.randomUUID().toString()); attributes.put(SiteToSiteAttributes.S2S_HOST.key(), host); attributes.put(SiteToSiteAttributes.S2S_ADDRESS.key(), host + ":" + port); flowFile = session.putAllAttributes(flowFile, attributes); final String transitUri = createTransitUri(peer, sourceSystemFlowFileUuid); session.getProvenanceReporter().receive(flowFile, transitUri, sourceSystemFlowFileUuid == null ? null : "urn:nifi:" + sourceSystemFlowFileUuid, "Remote Host=" + peer.getHost() + ", Remote DN=" + remoteDn, transferMillis); session.transfer(flowFile, Relationship.ANONYMOUS); flowFilesReceived.add(flowFile); bytesReceived += flowFile.getSize(); final Response transactionResponse = readTransactionResponse(false, commsSession); switch (transactionResponse.getCode()) { case CONTINUE_TRANSACTION: logger.debug("{} Received ContinueTransaction indicator from {}", this, peer); break; case FINISH_TRANSACTION: logger.debug("{} Received FinishTransaction indicator from {}", this, peer); continueTransaction = false; break; case CANCEL_TRANSACTION: logger.info("{} Received CancelTransaction indicator from {} with explanation {}", this, peer, transactionResponse.getMessage()); session.rollback(); return 0; default: throw new ProtocolException("Received unexpected response from peer: when expecting Continue Transaction or Finish Transaction, received" + transactionResponse); } } // we received a FINISH_TRANSACTION indicator. Send back a CONFIRM_TRANSACTION message // to peer so that we can verify that the connection is still open. This is a two-phase commit, // which helps to prevent the chances of data duplication. Without doing this, we may commit the // session and then when we send the response back to the peer, the peer may have timed out and may not // be listening. As a result, it will re-send the data. By doing this two-phase commit, we narrow the // Critical Section involved in this transaction so that rather than the Critical Section being the // time window involved in the entire transaction, it is reduced to a simple round-trip conversation. logger.debug("{} Sending CONFIRM_TRANSACTION Response Code to {}", this, peer); String calculatedCRC = String.valueOf(crc.getValue()); writeTransactionResponse(false, ResponseCode.CONFIRM_TRANSACTION, commsSession, calculatedCRC); FlowFileTransaction transaction = new FlowFileTransaction(session, context, stopWatch, bytesReceived, flowFilesReceived, calculatedCRC); return commitReceiveTransaction(peer, transaction); }
Example 16
Source File: AbstractFlowFileServerProtocol.java From localization_nifi with Apache License 2.0 | 4 votes |
protected int commitTransferTransaction(Peer peer, FlowFileTransaction transaction) throws IOException { ProcessSession session = transaction.getSession(); Set<FlowFile> flowFilesSent = transaction.getFlowFilesSent(); // we've sent a FINISH_TRANSACTION. Now we'll wait for the peer to send a 'Confirm Transaction' response CommunicationsSession commsSession = peer.getCommunicationsSession(); final Response transactionConfirmationResponse = readTransactionResponse(true, commsSession); if (transactionConfirmationResponse.getCode() == ResponseCode.CONFIRM_TRANSACTION) { // Confirm Checksum and echo back the confirmation. logger.debug("{} Received {} from {}", this, transactionConfirmationResponse, peer); final String receivedCRC = transactionConfirmationResponse.getMessage(); if (getVersionNegotiator().getVersion() > 3) { String calculatedCRC = transaction.getCalculatedCRC(); if (!receivedCRC.equals(calculatedCRC)) { writeTransactionResponse(true, ResponseCode.BAD_CHECKSUM, commsSession); session.rollback(); throw new IOException(this + " Sent data to peer " + peer + " but calculated CRC32 Checksum as " + calculatedCRC + " while peer calculated CRC32 Checksum as " + receivedCRC + "; canceling transaction and rolling back session"); } } writeTransactionResponse(true, ResponseCode.CONFIRM_TRANSACTION, commsSession, ""); } else { throw new ProtocolException("Expected to receive 'Confirm Transaction' response from peer " + peer + " but received " + transactionConfirmationResponse); } final String flowFileDescription = flowFilesSent.size() < 20 ? flowFilesSent.toString() : flowFilesSent.size() + " FlowFiles"; final Response transactionResponse; try { transactionResponse = readTransactionResponse(true, commsSession); } catch (final IOException e) { logger.error("{} Failed to receive a response from {} when expecting a TransactionFinished Indicator." + " It is unknown whether or not the peer successfully received/processed the data." + " Therefore, {} will be rolled back, possibly resulting in data duplication of {}", this, peer, session, flowFileDescription); session.rollback(); throw e; } logger.debug("{} received {} from {}", new Object[]{this, transactionResponse, peer}); if (transactionResponse.getCode() == ResponseCode.TRANSACTION_FINISHED_BUT_DESTINATION_FULL) { peer.penalize(port.getIdentifier(), port.getYieldPeriod(TimeUnit.MILLISECONDS)); } else if (transactionResponse.getCode() != ResponseCode.TRANSACTION_FINISHED) { throw new ProtocolException("After sending data, expected TRANSACTION_FINISHED response but got " + transactionResponse); } session.commit(); StopWatch stopWatch = transaction.getStopWatch(); long bytesSent = transaction.getBytesSent(); stopWatch.stop(); final String uploadDataRate = stopWatch.calculateDataRate(bytesSent); final long uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS); final String dataSize = FormatUtils.formatDataSize(bytesSent); logger.info("{} Successfully sent {} ({}) to {} in {} milliseconds at a rate of {}", new Object[]{ this, flowFileDescription, dataSize, peer, uploadMillis, uploadDataRate}); return flowFilesSent.size(); }
Example 17
Source File: GetMongo.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { final ComponentLog logger = getLogger(); final Document query = context.getProperty(QUERY).isSet() ? Document.parse(context.getProperty(QUERY).getValue()) : null; final Document projection = context.getProperty(PROJECTION).isSet() ? Document.parse(context.getProperty(PROJECTION).getValue()) : null; final Document sort = context.getProperty(SORT).isSet() ? Document.parse(context.getProperty(SORT).getValue()) : null; final MongoCollection<Document> collection = getCollection(context); try { final FindIterable<Document> it = query != null ? collection.find(query) : collection.find(); if (projection != null) { it.projection(projection); } if (sort != null) { it.sort(sort); } if (context.getProperty(LIMIT).isSet()) { it.limit(context.getProperty(LIMIT).asInteger()); } if (context.getProperty(BATCH_SIZE).isSet()) { it.batchSize(context.getProperty(BATCH_SIZE).asInteger()); } final MongoCursor<Document> cursor = it.iterator(); try { FlowFile flowFile = null; while (cursor.hasNext()) { flowFile = session.create(); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException { IOUtils.write(cursor.next().toJson(), out); } }); session.getProvenanceReporter().receive(flowFile, context.getProperty(URI).getValue()); session.transfer(flowFile, REL_SUCCESS); } session.commit(); } finally { cursor.close(); } } catch (final RuntimeException e) { context.yield(); session.rollback(); logger.error("Failed to execute query {} due to {}", new Object[] { query, e }, e); } }
Example 18
Source File: GetHDFSSequenceFile.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override protected void processBatchOfFiles(final List<Path> files, final ProcessContext context, final ProcessSession session) { final Configuration conf = getConfiguration(); final FileSystem hdfs = getFileSystem(); final String flowFileContentValue = context.getProperty(FLOWFILE_CONTENT).getValue(); final boolean keepSourceFiles = context.getProperty(KEEP_SOURCE_FILE).asBoolean(); final Double bufferSizeProp = context.getProperty(BUFFER_SIZE).asDataSize(DataUnit.B); if (bufferSizeProp != null) { int bufferSize = bufferSizeProp.intValue(); conf.setInt(BUFFER_SIZE_KEY, bufferSize); } ComponentLog logger = getLogger(); final SequenceFileReader<Set<FlowFile>> reader; if (flowFileContentValue.equalsIgnoreCase(VALUE_ONLY)) { reader = new ValueReader(session); } else { reader = new KeyValueReader(session); } Set<FlowFile> flowFiles = Collections.emptySet(); for (final Path file : files) { if (!this.isScheduled()) { break; // This processor should stop running immediately. } final StopWatch stopWatch = new StopWatch(false); try { stopWatch.start(); if (!hdfs.exists(file)) { continue; // If file is no longer here move on. } logger.debug("Reading file"); flowFiles = getFlowFiles(conf, hdfs, reader, file); if (!keepSourceFiles && !hdfs.delete(file, false)) { logger.warn("Unable to delete path " + file.toString() + " from HDFS. Will likely be picked up over and over..."); } } catch (Throwable t) { logger.error("Error retrieving file {} from HDFS due to {}", new Object[]{file, t}); session.rollback(); context.yield(); } finally { stopWatch.stop(); long totalSize = 0; for (FlowFile flowFile : flowFiles) { totalSize += flowFile.getSize(); session.getProvenanceReporter().receive(flowFile, file.toString()); } if (totalSize > 0) { final String dataRate = stopWatch.calculateDataRate(totalSize); final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS); logger.info("Created {} flowFiles from SequenceFile {}. Ingested in {} milliseconds at a rate of {}", new Object[]{ flowFiles.size(), file.toUri().toASCIIString(), millis, dataRate}); logger.info("Transferred flowFiles {} to success", new Object[]{flowFiles}); session.transfer(flowFiles, REL_SUCCESS); } } } }
Example 19
Source File: DeleteHDFS.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { String fileOrDirectoryName = null; FlowFile flowFile = session.get(); // If this processor has an incoming connection, then do not run unless a // FlowFile is actually sent through if (flowFile == null && context.hasIncomingConnection()) { context.yield(); return; } if (flowFile != null) { fileOrDirectoryName = context.getProperty(FILE_OR_DIRECTORY).evaluateAttributeExpressions(flowFile).getValue(); } else { fileOrDirectoryName = context.getProperty(FILE_OR_DIRECTORY).evaluateAttributeExpressions().getValue(); } final FileSystem fileSystem = getFileSystem(); try { // Check if the user has supplied a file or directory pattern List<Path> pathList = Lists.newArrayList(); if (GLOB_MATCHER.reset(fileOrDirectoryName).find()) { FileStatus[] fileStatuses = fileSystem.globStatus(new Path(fileOrDirectoryName)); if (fileStatuses != null) { for (FileStatus fileStatus : fileStatuses) { pathList.add(fileStatus.getPath()); } } } else { pathList.add(new Path(fileOrDirectoryName)); } Map<String, String> attributes = Maps.newHashMapWithExpectedSize(2); for (Path path : pathList) { attributes.put("filename", path.getName()); attributes.put("path", path.getParent().toString()); if (fileSystem.exists(path)) { fileSystem.delete(path, context.getProperty(RECURSIVE).asBoolean()); if (!context.hasIncomingConnection()) { flowFile = session.create(); } session.transfer(session.putAllAttributes(flowFile, attributes), REL_SUCCESS); } else { getLogger().warn("File (" + path + ") does not exist"); if (!context.hasIncomingConnection()) { flowFile = session.create(); } session.transfer(session.putAllAttributes(flowFile, attributes), REL_FAILURE); } } } catch (IOException e) { getLogger().warn("Error processing delete for file or directory", e); if (flowFile != null) { session.rollback(true); } } }
Example 20
Source File: AbstractFlowFileServerProtocol.java From nifi with Apache License 2.0 | 4 votes |
protected int commitReceiveTransaction(Peer peer, FlowFileTransaction transaction) throws IOException { CommunicationsSession commsSession = peer.getCommunicationsSession(); ProcessSession session = transaction.getSession(); final Response confirmTransactionResponse = readTransactionResponse(false, commsSession); logger.debug("{} Received {} from {}", this, confirmTransactionResponse, peer); switch (confirmTransactionResponse.getCode()) { case CONFIRM_TRANSACTION: break; case BAD_CHECKSUM: session.rollback(); throw new IOException(this + " Received a BadChecksum response from peer " + peer); default: throw new ProtocolException(this + " Received unexpected Response Code from peer " + peer + " : " + confirmTransactionResponse + "; expected 'Confirm Transaction' Response Code"); } // Commit the session so that we have persisted the data session.commit(); if (transaction.getContext().getAvailableRelationships().isEmpty()) { // Confirm that we received the data and the peer can now discard it but that the peer should not // send any more data for a bit logger.debug("{} Sending TRANSACTION_FINISHED_BUT_DESTINATION_FULL to {}", this, peer); writeTransactionResponse(false, ResponseCode.TRANSACTION_FINISHED_BUT_DESTINATION_FULL, commsSession); } else { // Confirm that we received the data and the peer can now discard it logger.debug("{} Sending TRANSACTION_FINISHED to {}", this, peer); writeTransactionResponse(false, ResponseCode.TRANSACTION_FINISHED, commsSession); } Set<FlowFile> flowFilesReceived = transaction.getFlowFilesSent(); long bytesReceived = transaction.getBytesSent(); StopWatch stopWatch = transaction.getStopWatch(); stopWatch.stop(); final String flowFileDescription = flowFilesReceived.size() < 20 ? flowFilesReceived.toString() : flowFilesReceived.size() + " FlowFiles"; final String uploadDataRate = stopWatch.calculateDataRate(bytesReceived); final long uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS); final String dataSize = FormatUtils.formatDataSize(bytesReceived); logger.info("{} Successfully received {} ({}) from {} in {} milliseconds at a rate of {}", new Object[]{ this, flowFileDescription, dataSize, peer, uploadMillis, uploadDataRate}); return flowFilesReceived.size(); }