org.apache.flink.util.function.FunctionWithException Java Examples
The following examples show how to use
org.apache.flink.util.function.FunctionWithException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: S3RecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 6 votes |
/** * Single constructor to initialize all. Actual setup of the parts happens in the * factory methods. */ S3RecoverableFsDataOutputStream( RecoverableMultiPartUpload upload, FunctionWithException<File, RefCountedFile, IOException> tempFileCreator, RefCountedFSOutputStream initialTmpFile, long userDefinedMinPartSize, long bytesBeforeCurrentPart) { checkArgument(bytesBeforeCurrentPart >= 0L); this.upload = checkNotNull(upload); this.tmpFileProvider = checkNotNull(tempFileCreator); this.userDefinedMinPartSize = userDefinedMinPartSize; this.fileStream = initialTmpFile; this.bytesBeforeCurrentPart = bytesBeforeCurrentPart; }
Example #2
Source File: ResultPartition.java From flink with Apache License 2.0 | 6 votes |
public ResultPartition( String owningTaskName, int partitionIndex, ResultPartitionID partitionId, ResultPartitionType partitionType, ResultSubpartition[] subpartitions, int numTargetKeyGroups, ResultPartitionManager partitionManager, @Nullable BufferCompressor bufferCompressor, FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) { this.owningTaskName = checkNotNull(owningTaskName); Preconditions.checkArgument(0 <= partitionIndex, "The partition index must be positive."); this.partitionIndex = partitionIndex; this.partitionId = checkNotNull(partitionId); this.partitionType = checkNotNull(partitionType); this.subpartitions = checkNotNull(subpartitions); this.numTargetKeyGroups = numTargetKeyGroups; this.partitionManager = checkNotNull(partitionManager); this.bufferCompressor = bufferCompressor; this.bufferPoolFactory = bufferPoolFactory; }
Example #3
Source File: OneInputStreamTaskTestHarness.java From flink with Apache License 2.0 | 6 votes |
/** * Creates a test harness with the specified number of input gates and specified number * of channels per input gate and specified localRecoveryConfig. */ public OneInputStreamTaskTestHarness( FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory, int numInputGates, int numInputChannelsPerGate, TypeInformation<IN> inputType, TypeInformation<OUT> outputType, LocalRecoveryConfig localRecoveryConfig) { super(taskFactory, outputType, localRecoveryConfig); this.inputType = inputType; inputSerializer = inputType.createSerializer(executionConfig); this.numInputGates = numInputGates; this.numInputChannelsPerGate = numInputChannelsPerGate; }
Example #4
Source File: S3RecoverableFsDataOutputStream.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public static S3RecoverableFsDataOutputStream recoverStream( final RecoverableMultiPartUpload upload, final FunctionWithException<File, RefCountedFile, IOException> tmpFileCreator, final long userDefinedMinPartSize, final long bytesBeforeCurrentPart) throws IOException { checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE); final RefCountedBufferingFileStream fileStream = boundedBufferingFileStream( tmpFileCreator, upload.getIncompletePart()); return new S3RecoverableFsDataOutputStream( upload, tmpFileCreator, fileStream, userDefinedMinPartSize, bytesBeforeCurrentPart); }
Example #5
Source File: ResultPartitionFactory.java From flink with Apache License 2.0 | 6 votes |
/** * The minimum pool size should be <code>numberOfSubpartitions + 1</code> for two considerations: * * <p>1. StreamTask can only process input if there is at-least one available buffer on output side, so it might cause * stuck problem if the minimum pool size is exactly equal to the number of subpartitions, because every subpartition * might maintain a partial unfilled buffer. * * <p>2. Increases one more buffer for every output LocalBufferPool to void performance regression if processing input is * based on at-least one buffer available on output side. */ @VisibleForTesting FunctionWithException<BufferPoolOwner, BufferPool, IOException> createBufferPoolFactory( int numberOfSubpartitions, ResultPartitionType type) { return bufferPoolOwner -> { int maxNumberOfMemorySegments = type.isBounded() ? numberOfSubpartitions * networkBuffersPerChannel + floatingNetworkBuffersPerGate : Integer.MAX_VALUE; // If the partition type is back pressure-free, we register with the buffer pool for // callbacks to release memory. return bufferPoolFactory.createBufferPool( numberOfSubpartitions + 1, maxNumberOfMemorySegments, type.hasBackPressure() ? null : bufferPoolOwner, numberOfSubpartitions, maxBuffersPerChannel); }; }
Example #6
Source File: S3RecoverableFsDataOutputStream.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
/** * Single constructor to initialize all. Actual setup of the parts happens in the * factory methods. */ S3RecoverableFsDataOutputStream( RecoverableMultiPartUpload upload, FunctionWithException<File, RefCountedFile, IOException> tempFileCreator, RefCountedFSOutputStream initialTmpFile, long userDefinedMinPartSize, long bytesBeforeCurrentPart) { checkArgument(bytesBeforeCurrentPart >= 0L); this.upload = checkNotNull(upload); this.tmpFileProvider = checkNotNull(tempFileCreator); this.userDefinedMinPartSize = userDefinedMinPartSize; this.fileStream = initialTmpFile; this.bytesBeforeCurrentPart = bytesBeforeCurrentPart; }
Example #7
Source File: S3RecoverableWriter.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public static S3RecoverableWriter writer( final FileSystem fs, final FunctionWithException<File, RefCountedFile, IOException> tempFileCreator, final S3AccessHelper s3AccessHelper, final Executor uploadThreadPool, final long userDefinedMinPartSize, final int maxConcurrentUploadsPerStream) { checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE); final S3RecoverableMultipartUploadFactory uploadFactory = new S3RecoverableMultipartUploadFactory( fs, s3AccessHelper, maxConcurrentUploadsPerStream, uploadThreadPool, tempFileCreator); return new S3RecoverableWriter(s3AccessHelper, uploadFactory, tempFileCreator, userDefinedMinPartSize); }
Example #8
Source File: ReleaseOnConsumptionResultPartition.java From flink with Apache License 2.0 | 6 votes |
ReleaseOnConsumptionResultPartition( String owningTaskName, int partitionIndex, ResultPartitionID partitionId, ResultPartitionType partitionType, ResultSubpartition[] subpartitions, int numTargetKeyGroups, ResultPartitionManager partitionManager, @Nullable BufferCompressor bufferCompressor, FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) { super( owningTaskName, partitionIndex, partitionId, partitionType, subpartitions, numTargetKeyGroups, partitionManager, bufferCompressor, bufferPoolFactory); this.consumedSubpartitions = new boolean[subpartitions.length]; this.numUnconsumedSubpartitions = subpartitions.length; }
Example #9
Source File: S3RecoverableFsDataOutputStream.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public static S3RecoverableFsDataOutputStream newStream( final RecoverableMultiPartUpload upload, final FunctionWithException<File, RefCountedFile, IOException> tmpFileCreator, final long userDefinedMinPartSize) throws IOException { checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE); final RefCountedBufferingFileStream fileStream = boundedBufferingFileStream(tmpFileCreator, Optional.empty()); return new S3RecoverableFsDataOutputStream( upload, tmpFileCreator, fileStream, userDefinedMinPartSize, 0L); }
Example #10
Source File: S3RecoverableWriter.java From flink with Apache License 2.0 | 6 votes |
public static S3RecoverableWriter writer( final FileSystem fs, final FunctionWithException<File, RefCountedFileWithStream, IOException> tempFileCreator, final S3AccessHelper s3AccessHelper, final Executor uploadThreadPool, final long userDefinedMinPartSize, final int maxConcurrentUploadsPerStream) { checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE); final S3RecoverableMultipartUploadFactory uploadFactory = new S3RecoverableMultipartUploadFactory( fs, s3AccessHelper, maxConcurrentUploadsPerStream, uploadThreadPool, tempFileCreator); return new S3RecoverableWriter(s3AccessHelper, uploadFactory, tempFileCreator, userDefinedMinPartSize); }
Example #11
Source File: S3RecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 6 votes |
public static S3RecoverableFsDataOutputStream recoverStream( final RecoverableMultiPartUpload upload, final FunctionWithException<File, RefCountedFileWithStream, IOException> tmpFileCreator, final long userDefinedMinPartSize, final long bytesBeforeCurrentPart) throws IOException { checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE); final RefCountedBufferingFileStream fileStream = boundedBufferingFileStream( tmpFileCreator, upload.getIncompletePart()); return new S3RecoverableFsDataOutputStream( upload, tmpFileCreator, fileStream, userDefinedMinPartSize, bytesBeforeCurrentPart); }
Example #12
Source File: S3RecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 6 votes |
/** * Single constructor to initialize all. Actual setup of the parts happens in the * factory methods. */ S3RecoverableFsDataOutputStream( RecoverableMultiPartUpload upload, FunctionWithException<File, RefCountedFileWithStream, IOException> tempFileCreator, RefCountedFSOutputStream initialTmpFile, long userDefinedMinPartSize, long bytesBeforeCurrentPart) { checkArgument(bytesBeforeCurrentPart >= 0L); this.upload = checkNotNull(upload); this.tmpFileProvider = checkNotNull(tempFileCreator); this.userDefinedMinPartSize = userDefinedMinPartSize; this.fileStream = initialTmpFile; this.bytesBeforeCurrentPart = bytesBeforeCurrentPart; }
Example #13
Source File: S3RecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 6 votes |
public static S3RecoverableFsDataOutputStream newStream( final RecoverableMultiPartUpload upload, final FunctionWithException<File, RefCountedFile, IOException> tmpFileCreator, final long userDefinedMinPartSize) throws IOException { checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE); final RefCountedBufferingFileStream fileStream = boundedBufferingFileStream(tmpFileCreator, Optional.empty()); return new S3RecoverableFsDataOutputStream( upload, tmpFileCreator, fileStream, userDefinedMinPartSize, 0L); }
Example #14
Source File: S3RecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 6 votes |
public static S3RecoverableFsDataOutputStream recoverStream( final RecoverableMultiPartUpload upload, final FunctionWithException<File, RefCountedFile, IOException> tmpFileCreator, final long userDefinedMinPartSize, final long bytesBeforeCurrentPart) throws IOException { checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE); final RefCountedBufferingFileStream fileStream = boundedBufferingFileStream( tmpFileCreator, upload.getIncompletePart()); return new S3RecoverableFsDataOutputStream( upload, tmpFileCreator, fileStream, userDefinedMinPartSize, bytesBeforeCurrentPart); }
Example #15
Source File: DelimitedInputFormatTest.java From flink with Apache License 2.0 | 6 votes |
private void testDelimiterOnBufferBoundary(FunctionWithException<String, FileInputSplit, IOException> splitCreator) throws IOException { String[] records = new String[]{"1234567890<DEL?NO!>1234567890", "1234567890<DEL?NO!>1234567890", "<DEL?NO!>"}; String delimiter = "<DELIM>"; String fileContent = StringUtils.join(records, delimiter); final FileInputSplit split = splitCreator.apply(fileContent); final Configuration parameters = new Configuration(); format.setBufferSize(12); format.setDelimiter(delimiter); format.configure(parameters); format.open(split); for (String record : records) { String value = format.nextRecord(null); assertEquals(record, value); } assertNull(format.nextRecord(null)); assertTrue(format.reachedEnd()); format.close(); }
Example #16
Source File: ConfigUtils.java From flink with Apache License 2.0 | 6 votes |
/** * Gets a {@link List} of values of type {@code IN} from a {@link ReadableConfig} * and transforms it to a {@link List} of type {@code OUT} based on the provided {@code mapper} function. * * @param configuration the configuration object to get the value out of * @param key the {@link ConfigOption option} to serve as the key for the list in the configuration * @param mapper the transformation function from {@code IN} to {@code OUT}. * @return the transformed values in a list of type {@code OUT}. */ public static <IN, OUT, E extends Throwable> List<OUT> decodeListFromConfig( final ReadableConfig configuration, final ConfigOption<List<IN>> key, final FunctionWithException<IN, OUT, E> mapper) throws E { checkNotNull(configuration); checkNotNull(key); checkNotNull(mapper); final List<IN> encodedString = configuration.get(key); if (encodedString == null || encodedString.isEmpty()) { return new ArrayList<>(); } final List<OUT> result = new ArrayList<>(encodedString.size()); for (IN input : encodedString) { result.add(mapper.apply(input)); } return result; }
Example #17
Source File: OneInputStreamTaskTestHarness.java From flink with Apache License 2.0 | 6 votes |
public OneInputStreamTaskTestHarness( FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory, int numInputGates, int numInputChannelsPerGate, TypeInformation<IN> inputType, TypeInformation<OUT> outputType, File localRootDir) { super(taskFactory, outputType, localRootDir); this.inputType = inputType; inputSerializer = inputType.createSerializer(executionConfig); this.numInputGates = numInputGates; this.numInputChannelsPerGate = numInputChannelsPerGate; streamConfig.setStateKeySerializer(inputSerializer); }
Example #18
Source File: TestingExecutor.java From flink with Apache License 2.0 | 6 votes |
TestingExecutor( List<SupplierWithException<TypedResult<List<Tuple2<Boolean, Row>>>, SqlExecutionException>> resultChanges, List<SupplierWithException<TypedResult<Integer>, SqlExecutionException>> snapshotResults, List<SupplierWithException<List<Row>, SqlExecutionException>> resultPages, BiConsumerWithException<String, String, SqlExecutionException> useCatalogConsumer, BiConsumerWithException<String, String, SqlExecutionException> useDatabaseConsumer, BiFunctionWithException<String, String, TableResult, SqlExecutionException> executeSqlConsumer, TriFunctionWithException<String, String, String, Void, SqlExecutionException> setSessionPropertyFunction, FunctionWithException<String, Void, SqlExecutionException> resetSessionPropertiesFunction) { this.resultChanges = resultChanges; this.snapshotResults = snapshotResults; this.resultPages = resultPages; this.useCatalogConsumer = useCatalogConsumer; this.useDatabaseConsumer = useDatabaseConsumer; this.executeSqlConsumer = executeSqlConsumer; this.setSessionPropertyFunction = setSessionPropertyFunction; this.resetSessionPropertiesFunction = resetSessionPropertiesFunction; helper = new SqlParserHelper(); helper.registerTables(); }
Example #19
Source File: StreamTaskTestHarness.java From flink with Apache License 2.0 | 6 votes |
public StreamTaskTestHarness( FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory, TypeInformation<OUT> outputType, LocalRecoveryConfig localRecoveryConfig) { this.taskFactory = checkNotNull(taskFactory); this.memorySize = DEFAULT_MEMORY_MANAGER_SIZE; this.bufferSize = DEFAULT_NETWORK_BUFFER_SIZE; this.jobConfig = new Configuration(); this.taskConfig = new Configuration(); this.executionConfig = new ExecutionConfig(); streamConfig = new StreamConfig(taskConfig); streamConfig.setBufferTimeout(0); outputSerializer = outputType.createSerializer(executionConfig); outputStreamRecordSerializer = new StreamElementSerializer<>(outputSerializer); this.taskStateManager = new TestTaskStateManager(localRecoveryConfig); }
Example #20
Source File: ResultPartition.java From flink with Apache License 2.0 | 6 votes |
public ResultPartition( String owningTaskName, ResultPartitionID partitionId, ResultPartitionType partitionType, ResultSubpartition[] subpartitions, int numTargetKeyGroups, ResultPartitionManager partitionManager, FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) { this.owningTaskName = checkNotNull(owningTaskName); this.partitionId = checkNotNull(partitionId); this.partitionType = checkNotNull(partitionType); this.subpartitions = checkNotNull(subpartitions); this.numTargetKeyGroups = numTargetKeyGroups; this.partitionManager = checkNotNull(partitionManager); this.bufferPoolFactory = bufferPoolFactory; }
Example #21
Source File: TwoInputStreamTaskTestHarness.java From flink with Apache License 2.0 | 6 votes |
/** * Creates a test harness with the specified number of input gates and specified number * of channels per input gate. Parameter inputGateAssignment specifies for each gate whether * it should be assigned to the first (1), or second (2) input of the task. */ public TwoInputStreamTaskTestHarness( FunctionWithException<Environment, ? extends AbstractTwoInputStreamTask<IN1, IN2, OUT>, Exception> taskFactory, int numInputGates, int numInputChannelsPerGate, int[] inputGateAssignment, TypeInformation<IN1> inputType1, TypeInformation<IN2> inputType2, TypeInformation<OUT> outputType) { super(taskFactory, outputType); inputSerializer1 = inputType1.createSerializer(executionConfig); inputSerializer2 = inputType2.createSerializer(executionConfig); this.numInputGates = numInputGates; this.numInputChannelsPerGate = numInputChannelsPerGate; this.inputGateAssignment = inputGateAssignment; }
Example #22
Source File: ResultPartitionBuilder.java From flink with Apache License 2.0 | 6 votes |
public ResultPartition build() { ResultPartitionFactory resultPartitionFactory = new ResultPartitionFactory( partitionManager, channelManager, networkBufferPool, blockingSubpartitionType, networkBuffersPerChannel, floatingNetworkBuffersPerGate, networkBufferSize, releasedOnConsumption, blockingShuffleCompressionEnabled, compressionCodec, maxBuffersPerChannel); FunctionWithException<BufferPoolOwner, BufferPool, IOException> factory = bufferPoolFactory.orElseGet(() -> resultPartitionFactory.createBufferPoolFactory(numberOfSubpartitions, partitionType)); return resultPartitionFactory.create( "Result Partition task", partitionIndex, partitionId, partitionType, numberOfSubpartitions, numTargetKeyGroups, factory); }
Example #23
Source File: S3RecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 5 votes |
private static RefCountedBufferingFileStream boundedBufferingFileStream( final FunctionWithException<File, RefCountedFileWithStream, IOException> tmpFileCreator, final Optional<File> incompletePart) throws IOException { if (!incompletePart.isPresent()) { return RefCountedBufferingFileStream.openNew(tmpFileCreator); } final File file = incompletePart.get(); return RefCountedBufferingFileStream.restore(tmpFileCreator, file); }
Example #24
Source File: RestClusterClientSavepointTriggerTest.java From flink with Apache License 2.0 | 5 votes |
private static RestServerEndpoint createRestServerEndpoint( final FunctionWithException<SavepointTriggerRequestBody, TriggerId, RestHandlerException> triggerHandlerLogic, final FunctionWithException<TriggerId, SavepointInfo, RestHandlerException> savepointHandlerLogic) throws Exception { return TestRestServerEndpoint.createAndStartRestServerEndpoint( restServerEndpointConfiguration, new TestSavepointTriggerHandler(triggerHandlerLogic), new TestSavepointHandler(savepointHandlerLogic)); }
Example #25
Source File: AkkaRpcActorOversizedResponseMessageTest.java From flink with Apache License 2.0 | 5 votes |
private <T> T runRemoteMessageResponseTest(String payload, FunctionWithException<MessageRpcGateway, T, Exception> rpcCall) throws Exception { final MessageRpcEndpoint rpcEndpoint = new MessageRpcEndpoint(rpcService1, payload); try { rpcEndpoint.start(); MessageRpcGateway rpcGateway = rpcService2.connect(rpcEndpoint.getAddress(), MessageRpcGateway.class).get(); return rpcCall.apply(rpcGateway); } finally { RpcUtils.terminateRpcEndpoint(rpcEndpoint, TIMEOUT); } }
Example #26
Source File: OneInputStreamTaskTestHarness.java From flink with Apache License 2.0 | 5 votes |
/** * Creates a test harness with the specified number of input gates and specified number * of channels per input gate and local recovery disabled. */ public OneInputStreamTaskTestHarness( FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory, int numInputGates, int numInputChannelsPerGate, TypeInformation<IN> inputType, TypeInformation<OUT> outputType) { this(taskFactory, numInputGates, numInputChannelsPerGate, inputType, outputType, TestLocalRecoveryConfig.disabled()); }
Example #27
Source File: OneInputStreamTaskTestHarness.java From flink with Apache License 2.0 | 5 votes |
/** * Creates a test harness with one input gate that has one input channel. */ public OneInputStreamTaskTestHarness( FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory, TypeInformation<IN> inputType, TypeInformation<OUT> outputType) { this(taskFactory, 1, 1, inputType, outputType, TestLocalRecoveryConfig.disabled()); }
Example #28
Source File: RestClusterClientSavepointTriggerTest.java From flink with Apache License 2.0 | 5 votes |
private static RestServerEndpoint createRestServerEndpoint( final FunctionWithException<SavepointTriggerRequestBody, TriggerId, RestHandlerException> triggerHandlerLogic, final FunctionWithException<TriggerId, SavepointInfo, RestHandlerException> savepointHandlerLogic) throws Exception { return TestRestServerEndpoint.createAndStartRestServerEndpoint( restServerEndpointConfiguration, new TestSavepointTriggerHandler(triggerHandlerLogic), new TestSavepointHandler(savepointHandlerLogic)); }
Example #29
Source File: S3RecoverableMultipartUploadFactory.java From flink with Apache License 2.0 | 5 votes |
S3RecoverableMultipartUploadFactory( final FileSystem fs, final S3AccessHelper s3AccessHelper, final int maxConcurrentUploadsPerStream, final Executor executor, final FunctionWithException<File, RefCountedFileWithStream, IOException> tmpFileSupplier) { this.fs = Preconditions.checkNotNull(fs); this.maxConcurrentUploadsPerStream = maxConcurrentUploadsPerStream; this.executor = executor; this.s3AccessHelper = s3AccessHelper; this.tmpFileSupplier = tmpFileSupplier; }
Example #30
Source File: TestingResourceActions.java From flink with Apache License 2.0 | 5 votes |
public TestingResourceActions( @Nonnull BiConsumer<InstanceID, Exception> releaseResourceConsumer, @Nonnull FunctionWithException<ResourceProfile, Collection<ResourceProfile>, ResourceManagerException> allocateResourceFunction, @Nonnull Consumer<Tuple3<JobID, AllocationID, Exception>> notifyAllocationFailureConsumer) { this.releaseResourceConsumer = releaseResourceConsumer; this.allocateResourceFunction = allocateResourceFunction; this.notifyAllocationFailureConsumer = notifyAllocationFailureConsumer; }