Java Code Examples for org.apache.flink.configuration.Configuration#getLong()
The following examples show how to use
org.apache.flink.configuration.Configuration#getLong() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RestClientConfiguration.java From flink with Apache License 2.0 | 6 votes |
/** * Creates and returns a new {@link RestClientConfiguration} from the given {@link Configuration}. * * @param config configuration from which the REST client endpoint configuration should be created from * @return REST client endpoint configuration * @throws ConfigurationException if SSL was configured incorrectly */ public static RestClientConfiguration fromConfiguration(Configuration config) throws ConfigurationException { Preconditions.checkNotNull(config); final SSLHandlerFactory sslHandlerFactory; if (SSLUtils.isRestSSLEnabled(config)) { try { sslHandlerFactory = SSLUtils.createRestClientSSLEngineFactory(config); } catch (Exception e) { throw new ConfigurationException("Failed to initialize SSLContext for the REST client", e); } } else { sslHandlerFactory = null; } final long connectionTimeout = config.getLong(RestOptions.CONNECTION_TIMEOUT); final long idlenessTimeout = config.getLong(RestOptions.IDLENESS_TIMEOUT); int maxContentLength = config.getInteger(RestOptions.CLIENT_MAX_CONTENT_LENGTH); return new RestClientConfiguration(sslHandlerFactory, connectionTimeout, idlenessTimeout, maxContentLength); }
Example 2
Source File: SessionClusterEntrypoint.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override protected ArchivedExecutionGraphStore createSerializableExecutionGraphStore( Configuration configuration, ScheduledExecutor scheduledExecutor) throws IOException { final File tmpDir = new File(ConfigurationUtils.parseTempDirectories(configuration)[0]); final Time expirationTime = Time.seconds(configuration.getLong(JobManagerOptions.JOB_STORE_EXPIRATION_TIME)); final long maximumCacheSizeBytes = configuration.getLong(JobManagerOptions.JOB_STORE_CACHE_SIZE); return new FileArchivedExecutionGraphStore( tmpDir, expirationTime, maximumCacheSizeBytes, scheduledExecutor, Ticker.systemTicker()); }
Example 3
Source File: RestHandlerConfiguration.java From flink with Apache License 2.0 | 6 votes |
public static RestHandlerConfiguration fromConfiguration(Configuration configuration) { final long refreshInterval = configuration.getLong(WebOptions.REFRESH_INTERVAL); final int maxCheckpointStatisticCacheEntries = configuration.getInteger(WebOptions.CHECKPOINTS_HISTORY_SIZE); final Time timeout = Time.milliseconds(configuration.getLong(WebOptions.TIMEOUT)); final String rootDir = "flink-web-ui"; final File webUiDir = new File(configuration.getString(WebOptions.TMP_DIR), rootDir); return new RestHandlerConfiguration( refreshInterval, maxCheckpointStatisticCacheEntries, timeout, webUiDir); }
Example 4
Source File: InputProcessorUtil.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public static CheckpointBarrierHandler createCheckpointBarrierHandler( StreamTask<?, ?> checkpointedTask, CheckpointingMode checkpointMode, IOManager ioManager, InputGate inputGate, Configuration taskManagerConfig) throws IOException { CheckpointBarrierHandler barrierHandler; if (checkpointMode == CheckpointingMode.EXACTLY_ONCE) { long maxAlign = taskManagerConfig.getLong(TaskManagerOptions.TASK_CHECKPOINT_ALIGNMENT_BYTES_LIMIT); if (!(maxAlign == -1 || maxAlign > 0)) { throw new IllegalConfigurationException( TaskManagerOptions.TASK_CHECKPOINT_ALIGNMENT_BYTES_LIMIT.key() + " must be positive or -1 (infinite)"); } if (taskManagerConfig.getBoolean(TaskManagerOptions.NETWORK_CREDIT_MODEL)) { barrierHandler = new BarrierBuffer(inputGate, new CachedBufferBlocker(inputGate.getPageSize()), maxAlign); } else { barrierHandler = new BarrierBuffer(inputGate, new BufferSpiller(ioManager, inputGate.getPageSize()), maxAlign); } } else if (checkpointMode == CheckpointingMode.AT_LEAST_ONCE) { barrierHandler = new BarrierTracker(inputGate); } else { throw new IllegalArgumentException("Unrecognized Checkpointing Mode: " + checkpointMode); } if (checkpointedTask != null) { barrierHandler.registerCheckpointEventHandler(checkpointedTask); } return barrierHandler; }
Example 5
Source File: TaskManagerHeapSizeCalculationJavaBashTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Calculates the heap size via * {@link TaskManagerServices#calculateHeapSizeMB(long, Configuration)} and the shell script * and verifies that these are equal. * * @param config flink configuration * @param tolerance tolerate values that are off by this factor (0.01 = 1%) */ private void compareNetworkBufJavaVsScript(final Configuration config, final float tolerance) throws IOException { final long totalJavaMemorySizeMB = config.getLong(KEY_TASKM_MEM_SIZE, 0L); long javaNetworkBufMem = TaskManagerServices.calculateNetworkBufferMemory(totalJavaMemorySizeMB << 20, config); String[] command = {"src/test/bin/calcTMNetBufMem.sh", totalJavaMemorySizeMB + "m", String.valueOf(config.getFloat(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_FRACTION)), config.getString(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MIN), config.getString(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MAX)}; String scriptOutput = executeScript(command); long absoluteTolerance = (long) (javaNetworkBufMem * tolerance); if (absoluteTolerance < 1) { assertEquals( "Different network buffer memory sizes with configuration: " + config.toString(), String.valueOf(javaNetworkBufMem), scriptOutput); } else { Long scriptNetworkBufMem = Long.valueOf(scriptOutput); assertThat( "Different network buffer memory sizes (Java: " + javaNetworkBufMem + ", Script: " + scriptNetworkBufMem + ") with configuration: " + config.toString(), scriptNetworkBufMem, allOf(greaterThanOrEqualTo(javaNetworkBufMem - absoluteTolerance), lessThanOrEqualTo(javaNetworkBufMem + absoluteTolerance))); } }
Example 6
Source File: RestClusterClientConfiguration.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public static RestClusterClientConfiguration fromConfiguration(Configuration config) throws ConfigurationException { RestClientConfiguration restClientConfiguration = RestClientConfiguration.fromConfiguration(config); final long awaitLeaderTimeout = config.getLong(RestOptions.AWAIT_LEADER_TIMEOUT); final int retryMaxAttempts = config.getInteger(RestOptions.RETRY_MAX_ATTEMPTS); final long retryDelay = config.getLong(RestOptions.RETRY_DELAY); return new RestClusterClientConfiguration(restClientConfiguration, awaitLeaderTimeout, retryMaxAttempts, retryDelay); }
Example 7
Source File: HeartbeatServices.java From flink with Apache License 2.0 | 5 votes |
/** * Creates an HeartbeatServices instance from a {@link Configuration}. * * @param configuration Configuration to be used for the HeartbeatServices creation * @return An HeartbeatServices instance created from the given configuration */ public static HeartbeatServices fromConfiguration(Configuration configuration) { long heartbeatInterval = configuration.getLong(HeartbeatManagerOptions.HEARTBEAT_INTERVAL); long heartbeatTimeout = configuration.getLong(HeartbeatManagerOptions.HEARTBEAT_TIMEOUT); return new HeartbeatServices(heartbeatInterval, heartbeatTimeout); }
Example 8
Source File: SlotManagerConfiguration.java From flink with Apache License 2.0 | 5 votes |
private static Time getSlotRequestTimeout(final Configuration configuration) { final long slotRequestTimeoutMs; if (configuration.contains(ResourceManagerOptions.SLOT_REQUEST_TIMEOUT)) { LOGGER.warn("Config key {} is deprecated; use {} instead.", ResourceManagerOptions.SLOT_REQUEST_TIMEOUT, JobManagerOptions.SLOT_REQUEST_TIMEOUT); slotRequestTimeoutMs = configuration.getLong(ResourceManagerOptions.SLOT_REQUEST_TIMEOUT); } else { slotRequestTimeoutMs = configuration.getLong(JobManagerOptions.SLOT_REQUEST_TIMEOUT); } return Time.milliseconds(slotRequestTimeoutMs); }
Example 9
Source File: MetricFetcherImpl.java From flink with Apache License 2.0 | 5 votes |
@Nonnull public static <T extends RestfulGateway> MetricFetcherImpl<T> fromConfiguration( final Configuration configuration, final MetricQueryServiceRetriever metricQueryServiceGatewayRetriever, final GatewayRetriever<T> dispatcherGatewayRetriever, final ExecutorService executor) { final Time timeout = Time.milliseconds(configuration.getLong(WebOptions.TIMEOUT)); final long updateInterval = configuration.getLong(MetricOptions.METRIC_FETCHER_UPDATE_INTERVAL); return new MetricFetcherImpl<>( dispatcherGatewayRetriever, metricQueryServiceGatewayRetriever, executor, timeout, updateInterval); }
Example 10
Source File: RetryingRegistrationConfiguration.java From flink with Apache License 2.0 | 5 votes |
public static RetryingRegistrationConfiguration fromConfiguration(final Configuration configuration) { long initialRegistrationTimeoutMillis = configuration.getLong(ClusterOptions.INITIAL_REGISTRATION_TIMEOUT); long maxRegistrationTimeoutMillis = configuration.getLong(ClusterOptions.MAX_REGISTRATION_TIMEOUT); long errorDelayMillis = configuration.getLong(ClusterOptions.ERROR_REGISTRATION_DELAY); long refusedDelayMillis = configuration.getLong(ClusterOptions.REFUSED_REGISTRATION_DELAY); return new RetryingRegistrationConfiguration( initialRegistrationTimeoutMillis, maxRegistrationTimeoutMillis, errorDelayMillis, refusedDelayMillis); }
Example 11
Source File: PythonOptionsTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testBundleTime() { final Configuration configuration = new Configuration(); final long defaultBundleTime = configuration.getLong(PythonOptions.MAX_BUNDLE_TIME_MILLS); assertThat(defaultBundleTime, is(equalTo(PythonOptions.MAX_BUNDLE_TIME_MILLS.defaultValue()))); final long expectedBundleTime = 100; configuration.setLong(PythonOptions.MAX_BUNDLE_TIME_MILLS, expectedBundleTime); final long actualBundleSize = configuration.getLong(PythonOptions.MAX_BUNDLE_TIME_MILLS); assertThat(actualBundleSize, is(equalTo(expectedBundleTime))); }
Example 12
Source File: BinaryOutputFormat.java From flink with Apache License 2.0 | 5 votes |
@Override public void configure(Configuration parameters) { super.configure(parameters); // read own parameters this.blockSize = parameters.getLong(BLOCK_SIZE_PARAMETER_KEY, NATIVE_BLOCK_SIZE); if (this.blockSize < 1 && this.blockSize != NATIVE_BLOCK_SIZE) { throw new IllegalArgumentException("The block size parameter must be set and larger than 0."); } if (this.blockSize > Integer.MAX_VALUE) { throw new UnsupportedOperationException("Currently only block size up to Integer.MAX_VALUE are supported"); } }
Example 13
Source File: FileInputFormat.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Initialize defaults for input format. Needs to be a static method because it is configured for local * cluster execution. * @param configuration The configuration to load defaults from */ private static void initDefaultsFromConfiguration(Configuration configuration) { final long to = configuration.getLong(ConfigConstants.FS_STREAM_OPENING_TIMEOUT_KEY, ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT); if (to < 0) { LOG.error("Invalid timeout value for filesystem stream opening: " + to + ". Using default value of " + ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT); DEFAULT_OPENING_TIMEOUT = ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT; } else if (to == 0) { DEFAULT_OPENING_TIMEOUT = 300000; // 5 minutes } else { DEFAULT_OPENING_TIMEOUT = to; } }
Example 14
Source File: TaskManagerHeapSizeCalculationJavaBashTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Calculates the heap size via * {@link TaskManagerServices#calculateHeapSizeMB(long, Configuration)} and the shell script * and verifies that these are equal. * * @param config flink configuration * @param tolerance tolerate values that are off by this factor (0.01 = 1%) */ private void compareHeapSizeJavaVsScript(final Configuration config, float tolerance) throws IOException { final long totalJavaMemorySizeMB = config.getLong(KEY_TASKM_MEM_SIZE, 0L); long javaHeapSizeMB = TaskManagerServices.calculateHeapSizeMB(totalJavaMemorySizeMB, config); String[] command = {"src/test/bin/calcTMHeapSizeMB.sh", totalJavaMemorySizeMB + "m", String.valueOf(config.getBoolean(TaskManagerOptions.MEMORY_OFF_HEAP)), String.valueOf(config.getFloat(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_FRACTION)), config.getString(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MIN), config.getString(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MAX), config.getString(TaskManagerOptions.MANAGED_MEMORY_SIZE), String.valueOf(config.getFloat(TaskManagerOptions.MANAGED_MEMORY_FRACTION))}; String scriptOutput = executeScript(command); long absoluteTolerance = (long) (javaHeapSizeMB * tolerance); if (absoluteTolerance < 1) { assertEquals("Different heap sizes with configuration: " + config.toString(), String.valueOf(javaHeapSizeMB), scriptOutput); } else { Long scriptHeapSizeMB = Long.valueOf(scriptOutput); assertThat( "Different heap sizes (Java: " + javaHeapSizeMB + ", Script: " + scriptHeapSizeMB + ") with configuration: " + config.toString(), scriptHeapSizeMB, allOf(greaterThanOrEqualTo(javaHeapSizeMB - absoluteTolerance), lessThanOrEqualTo(javaHeapSizeMB + absoluteTolerance))); } }
Example 15
Source File: LimitedConnectionsFileSystem.java From flink with Apache License 2.0 | 4 votes |
/** * Parses and returns the settings for connection limiting, for the file system with * the given file system scheme. * * @param config The configuration to check. * @param fsScheme The file system scheme. * * @return The parsed configuration, or null, if no connection limiting is configured. */ @Nullable public static ConnectionLimitingSettings fromConfig(Configuration config, String fsScheme) { checkNotNull(fsScheme, "fsScheme"); checkNotNull(config, "config"); final ConfigOption<Integer> totalLimitOption = CoreOptions.fileSystemConnectionLimit(fsScheme); final ConfigOption<Integer> limitInOption = CoreOptions.fileSystemConnectionLimitIn(fsScheme); final ConfigOption<Integer> limitOutOption = CoreOptions.fileSystemConnectionLimitOut(fsScheme); final int totalLimit = config.getInteger(totalLimitOption); final int limitIn = config.getInteger(limitInOption); final int limitOut = config.getInteger(limitOutOption); checkLimit(totalLimit, totalLimitOption); checkLimit(limitIn, limitInOption); checkLimit(limitOut, limitOutOption); // create the settings only, if at least one limit is configured if (totalLimit <= 0 && limitIn <= 0 && limitOut <= 0) { // no limit configured return null; } else { final ConfigOption<Long> openTimeoutOption = CoreOptions.fileSystemConnectionLimitTimeout(fsScheme); final ConfigOption<Long> inactivityTimeoutOption = CoreOptions.fileSystemConnectionLimitStreamInactivityTimeout(fsScheme); final long openTimeout = config.getLong(openTimeoutOption); final long inactivityTimeout = config.getLong(inactivityTimeoutOption); checkTimeout(openTimeout, openTimeoutOption); checkTimeout(inactivityTimeout, inactivityTimeoutOption); return new ConnectionLimitingSettings( totalLimit == -1 ? 0 : totalLimit, limitIn == -1 ? 0 : limitIn, limitOut == -1 ? 0 : limitOut, openTimeout, inactivityTimeout); } }
Example 16
Source File: RheemFileOutputFormat.java From rheem with Apache License 2.0 | 4 votes |
@Override public void configure(Configuration parameters) { try { // get the output file path, if it was not yet set if (this.outputFilePath == null) { // get the file parameter String filePath = parameters.getString(FILE_PARAMETER_KEY, null); if (filePath == null) { throw new IllegalArgumentException("The output path has been specified neither via constructor/setters" + ", nor via the Configuration."); } try { this.outputFilePath = new Path(filePath); } catch (RuntimeException rex) { throw new RuntimeException("Could not create a valid URI from the given file path name: " + rex.getMessage()); } } // check if have not been set and use the defaults in that case if (this.writeMode == null) { this.writeMode = DEFAULT_WRITE_MODE; } if (this.outputDirectoryMode == null) { this.outputDirectoryMode = DEFAULT_OUTPUT_DIRECTORY_MODE; } // read own parameters this.blockSize = parameters.getLong(BLOCK_SIZE_PARAMETER_KEY, NATIVE_BLOCK_SIZE); if (this.blockSize < 1 && this.blockSize != NATIVE_BLOCK_SIZE) { throw new IllegalArgumentException("The block size parameter must be set and larger than 0."); } if (this.blockSize > Integer.MAX_VALUE) { throw new UnsupportedOperationException("Currently only block size up to Integer.MAX_VALUE are supported"); } } catch (Exception e){ throw new RheemException(e); } }
Example 17
Source File: SourceReaderOptions.java From flink with Apache License 2.0 | 4 votes |
public SourceReaderOptions(Configuration config) { this.sourceReaderCloseTimeout = config.getLong(SOURCE_READER_CLOSE_TIMEOUT); this.elementQueueCapacity = config.getInteger(ELEMENT_QUEUE_CAPACITY); }
Example 18
Source File: BlobServer.java From flink with Apache License 2.0 | 4 votes |
/** * Instantiates a new BLOB server and binds it to a free network port. * * @param config Configuration to be used to instantiate the BlobServer * @param blobStore BlobStore to store blobs persistently * * @throws IOException * thrown if the BLOB server cannot bind to a free network port or if the * (local or distributed) file storage cannot be created or is not usable */ public BlobServer(Configuration config, BlobStore blobStore) throws IOException { this.blobServiceConfiguration = checkNotNull(config); this.blobStore = checkNotNull(blobStore); this.readWriteLock = new ReentrantReadWriteLock(); // configure and create the storage directory this.storageDir = BlobUtils.initLocalStorageDirectory(config); LOG.info("Created BLOB server storage directory {}", storageDir); // configure the maximum number of concurrent connections final int maxConnections = config.getInteger(BlobServerOptions.FETCH_CONCURRENT); if (maxConnections >= 1) { this.maxConnections = maxConnections; } else { LOG.warn("Invalid value for maximum connections in BLOB server: {}. Using default value of {}", maxConnections, BlobServerOptions.FETCH_CONCURRENT.defaultValue()); this.maxConnections = BlobServerOptions.FETCH_CONCURRENT.defaultValue(); } // configure the backlog of connections int backlog = config.getInteger(BlobServerOptions.FETCH_BACKLOG); if (backlog < 1) { LOG.warn("Invalid value for BLOB connection backlog: {}. Using default value of {}", backlog, BlobServerOptions.FETCH_BACKLOG.defaultValue()); backlog = BlobServerOptions.FETCH_BACKLOG.defaultValue(); } // Initializing the clean up task this.cleanupTimer = new Timer(true); this.cleanupInterval = config.getLong(BlobServerOptions.CLEANUP_INTERVAL) * 1000; this.cleanupTimer .schedule(new TransientBlobCleanupTask(blobExpiryTimes, readWriteLock.writeLock(), storageDir, LOG), cleanupInterval, cleanupInterval); this.shutdownHook = ShutdownHookUtil.addShutdownHook(this, getClass().getSimpleName(), LOG); // ----------------------- start the server ------------------- final String serverPortRange = config.getString(BlobServerOptions.PORT); final Iterator<Integer> ports = NetUtils.getPortRangeFromString(serverPortRange); final ServerSocketFactory socketFactory; if (SSLUtils.isInternalSSLEnabled(config) && config.getBoolean(BlobServerOptions.SSL_ENABLED)) { try { socketFactory = SSLUtils.createSSLServerSocketFactory(config); } catch (Exception e) { throw new IOException("Failed to initialize SSL for the blob server", e); } } else { socketFactory = ServerSocketFactory.getDefault(); } final int finalBacklog = backlog; final String bindHost = config.getOptional(JobManagerOptions.BIND_HOST).orElseGet(NetUtils::getWildcardIPAddress); this.serverSocket = NetUtils.createSocketFromPorts(ports, (port) -> socketFactory.createServerSocket(port, finalBacklog, InetAddress.getByName(bindHost))); if (serverSocket == null) { throw new IOException("Unable to open BLOB Server in specified port range: " + serverPortRange); } // start the server thread setName("BLOB Server listener at " + getPort()); setDaemon(true); if (LOG.isInfoEnabled()) { LOG.info("Started BLOB server at {}:{} - max concurrent requests: {} - max backlog: {}", serverSocket.getInetAddress().getHostAddress(), getPort(), maxConnections, backlog); } }
Example 19
Source File: StreamSource.java From flink with Apache License 2.0 | 4 votes |
public void run(final Object lockingObject, final StreamStatusMaintainer streamStatusMaintainer, final Output<StreamRecord<OUT>> collector, final OperatorChain<?, ?> operatorChain) throws Exception { final TimeCharacteristic timeCharacteristic = getOperatorConfig().getTimeCharacteristic(); final Configuration configuration = this.getContainingTask().getEnvironment().getTaskManagerInfo().getConfiguration(); final long latencyTrackingInterval = getExecutionConfig().isLatencyTrackingConfigured() ? getExecutionConfig().getLatencyTrackingInterval() : configuration.getLong(MetricOptions.LATENCY_INTERVAL); LatencyMarksEmitter<OUT> latencyEmitter = null; if (latencyTrackingInterval > 0) { latencyEmitter = new LatencyMarksEmitter<>( getProcessingTimeService(), collector, latencyTrackingInterval, this.getOperatorID(), getRuntimeContext().getIndexOfThisSubtask()); } final long watermarkInterval = getRuntimeContext().getExecutionConfig().getAutoWatermarkInterval(); this.ctx = StreamSourceContexts.getSourceContext( timeCharacteristic, getProcessingTimeService(), lockingObject, streamStatusMaintainer, collector, watermarkInterval, -1); try { userFunction.run(ctx); // if we get here, then the user function either exited after being done (finite source) // or the function was canceled or stopped. For the finite source case, we should emit // a final watermark that indicates that we reached the end of event-time, and end inputs // of the operator chain if (!isCanceledOrStopped()) { advanceToEndOfEventTime(); synchronized (lockingObject) { operatorChain.endInput(1); } } } finally { // make sure that the context is closed in any case ctx.close(); if (latencyEmitter != null) { latencyEmitter.close(); } } }
Example 20
Source File: StreamSource.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
public void run(final Object lockingObject, final StreamStatusMaintainer streamStatusMaintainer, final Output<StreamRecord<OUT>> collector) throws Exception { final TimeCharacteristic timeCharacteristic = getOperatorConfig().getTimeCharacteristic(); final Configuration configuration = this.getContainingTask().getEnvironment().getTaskManagerInfo().getConfiguration(); final long latencyTrackingInterval = getExecutionConfig().isLatencyTrackingConfigured() ? getExecutionConfig().getLatencyTrackingInterval() : configuration.getLong(MetricOptions.LATENCY_INTERVAL); LatencyMarksEmitter<OUT> latencyEmitter = null; if (latencyTrackingInterval > 0) { latencyEmitter = new LatencyMarksEmitter<>( getProcessingTimeService(), collector, latencyTrackingInterval, this.getOperatorID(), getRuntimeContext().getIndexOfThisSubtask()); } final long watermarkInterval = getRuntimeContext().getExecutionConfig().getAutoWatermarkInterval(); this.ctx = StreamSourceContexts.getSourceContext( timeCharacteristic, getProcessingTimeService(), lockingObject, streamStatusMaintainer, collector, watermarkInterval, -1); try { userFunction.run(ctx); // if we get here, then the user function either exited after being done (finite source) // or the function was canceled or stopped. For the finite source case, we should emit // a final watermark that indicates that we reached the end of event-time if (!isCanceledOrStopped()) { ctx.emitWatermark(Watermark.MAX_WATERMARK); } } finally { // make sure that the context is closed in any case ctx.close(); if (latencyEmitter != null) { latencyEmitter.close(); } } }