org.apache.flink.api.java.tuple.Tuple6 Java Examples
The following examples show how to use
org.apache.flink.api.java.tuple.Tuple6.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: VectorToColumnsTest.java From Alink with Apache License 2.0 | 6 votes |
@Test public void pipelineBatchTest() throws Exception { BatchOperator res = new VectorToColumns().setSelectedCol("c0").setOutputCols( new String[] {"f0", "f1", "f2", "f3", "f4", "f5"}).transform((BatchOperator)getData(true)); List rows = res.getDataSet().collect(); HashMap<String, Tuple6<Double, Double, Double, Double, Double, Double>> map = new HashMap<String, Tuple6<Double, Double, Double, Double, Double, Double>>(); map.put((String) ((Row) rows.get(0)).getField(0), Tuple6.of( (Double) ((Row) rows.get(0)).getField(4), (Double) ((Row) rows.get(0)).getField(5), (Double) ((Row) rows.get(0)).getField(6), (Double) ((Row) rows.get(0)).getField(7), (Double) ((Row) rows.get(0)).getField(8), (Double) ((Row) rows.get(0)).getField(9))); map.put((String) ((Row) rows.get(1)).getField(0), Tuple6.of( (Double) ((Row) rows.get(1)).getField(4), (Double) ((Row) rows.get(1)).getField(5), (Double) ((Row) rows.get(1)).getField(6), (Double) ((Row) rows.get(1)).getField(7), (Double) ((Row) rows.get(1)).getField(8), (Double) ((Row) rows.get(1)).getField(9))); assertEquals(map.get("0"), new Tuple6<>(0.0, 2.0, 3.0, 0.0, 0.0, 4.3)); assertEquals(map.get("1"), new Tuple6<>(0.0, 2.0, 3.0, 0.0, 0.0, 4.3)); }
Example #2
Source File: JoinITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testProjectOnATuple1Input() throws Exception { /* * Project join on a tuple input 1 */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<Integer, Long, String>> ds1 = CollectionDataSets.getSmall3TupleDataSet(env); DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.get5TupleDataSet(env); DataSet<Tuple6<String, Long, String, Integer, Long, Long>> joinDs = ds1.join(ds2) .where(1) .equalTo(1) .projectFirst(2, 1) .projectSecond(3) .projectFirst(0) .projectSecond(4, 1); List<Tuple6<String, Long, String, Integer, Long, Long>> result = joinDs.collect(); String expected = "Hi,1,Hallo,1,1,1\n" + "Hello,2,Hallo Welt,2,2,2\n" + "Hello world,2,Hallo Welt,3,2,2\n"; compareResultAsTuples(result, expected); }
Example #3
Source File: JoinITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testProjectOnATuple1Input() throws Exception { /* * Project join on a tuple input 1 */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<Integer, Long, String>> ds1 = CollectionDataSets.getSmall3TupleDataSet(env); DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.get5TupleDataSet(env); DataSet<Tuple6<String, Long, String, Integer, Long, Long>> joinDs = ds1.join(ds2) .where(1) .equalTo(1) .projectFirst(2, 1) .projectSecond(3) .projectFirst(0) .projectSecond(4, 1); List<Tuple6<String, Long, String, Integer, Long, Long>> result = joinDs.collect(); String expected = "Hi,1,Hallo,1,1,1\n" + "Hello,2,Hallo Welt,2,2,2\n" + "Hello world,2,Hallo Welt,3,2,2\n"; compareResultAsTuples(result, expected); }
Example #4
Source File: JoinITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testProjectJoinOnATuple2Input() throws Exception { /* * Project join on a tuple input 2 */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<Integer, Long, String>> ds1 = CollectionDataSets.getSmall3TupleDataSet(env); DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.get5TupleDataSet(env); DataSet<Tuple6<String, String, Long, Long, Long, Integer>> joinDs = ds1.join(ds2) .where(1) .equalTo(1) .projectSecond(3) .projectFirst(2, 1) .projectSecond(4, 1) .projectFirst(0); List<Tuple6<String, String, Long, Long, Long, Integer>> result = joinDs.collect(); String expected = "Hallo,Hi,1,1,1,1\n" + "Hallo Welt,Hello,2,2,2,2\n" + "Hallo Welt,Hello world,2,2,2,3\n"; compareResultAsTuples(result, expected); }
Example #5
Source File: CrossITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testProjectCrossOnATupleInput1() throws Exception{ /* * project cross on a tuple input 1 */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.getSmall3TupleDataSet(env); DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.getSmall5TupleDataSet(env); DataSet<Tuple6<String, Long, String, Integer, Long, Long>> crossDs = ds.cross(ds2) .projectFirst(2, 1) .projectSecond(3) .projectFirst(0) .projectSecond(4, 1); List<Tuple6<String, Long, String, Integer, Long, Long>> result = crossDs.collect(); String expected = "Hi,1,Hallo,1,1,1\n" + "Hi,1,Hallo Welt,1,2,2\n" + "Hi,1,Hallo Welt wie,1,1,3\n" + "Hello,2,Hallo,2,1,1\n" + "Hello,2,Hallo Welt,2,2,2\n" + "Hello,2,Hallo Welt wie,2,1,3\n" + "Hello world,2,Hallo,3,1,1\n" + "Hello world,2,Hallo Welt,3,2,2\n" + "Hello world,2,Hallo Welt wie,3,1,3\n"; compareResultAsTuples(result, expected); }
Example #6
Source File: CrossITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testProjectCrossOnATupleInput2() throws Exception { /* * project cross on a tuple input 2 */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.getSmall3TupleDataSet(env); DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.getSmall5TupleDataSet(env); DataSet<Tuple6<String, String, Long, Long, Long, Integer>> crossDs = ds.cross(ds2) .projectSecond(3) .projectFirst(2, 1) .projectSecond(4, 1) .projectFirst(0); List<Tuple6<String, String, Long, Long, Long, Integer>> result = crossDs.collect(); String expected = "Hallo,Hi,1,1,1,1\n" + "Hallo Welt,Hi,1,2,2,1\n" + "Hallo Welt wie,Hi,1,1,3,1\n" + "Hallo,Hello,2,1,1,2\n" + "Hallo Welt,Hello,2,2,2,2\n" + "Hallo Welt wie,Hello,2,1,3,2\n" + "Hallo,Hello world,2,1,1,3\n" + "Hallo Welt,Hello world,2,2,2,3\n" + "Hallo Welt wie,Hello world,2,1,3,3\n"; compareResultAsTuples(result, expected); }
Example #7
Source File: CrossITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testProjectCrossOnATupleInput2() throws Exception { /* * project cross on a tuple input 2 */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.getSmall3TupleDataSet(env); DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.getSmall5TupleDataSet(env); DataSet<Tuple6<String, String, Long, Long, Long, Integer>> crossDs = ds.cross(ds2) .projectSecond(3) .projectFirst(2, 1) .projectSecond(4, 1) .projectFirst(0); List<Tuple6<String, String, Long, Long, Long, Integer>> result = crossDs.collect(); String expected = "Hallo,Hi,1,1,1,1\n" + "Hallo Welt,Hi,1,2,2,1\n" + "Hallo Welt wie,Hi,1,1,3,1\n" + "Hallo,Hello,2,1,1,2\n" + "Hallo Welt,Hello,2,2,2,2\n" + "Hallo Welt wie,Hello,2,1,3,2\n" + "Hallo,Hello world,2,1,1,3\n" + "Hallo Welt,Hello world,2,2,2,3\n" + "Hallo Welt wie,Hello world,2,1,3,3\n"; compareResultAsTuples(result, expected); }
Example #8
Source File: CrossITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testProjectCrossOnATupleInput1() throws Exception{ /* * project cross on a tuple input 1 */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.getSmall3TupleDataSet(env); DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.getSmall5TupleDataSet(env); DataSet<Tuple6<String, Long, String, Integer, Long, Long>> crossDs = ds.cross(ds2) .projectFirst(2, 1) .projectSecond(3) .projectFirst(0) .projectSecond(4, 1); List<Tuple6<String, Long, String, Integer, Long, Long>> result = crossDs.collect(); String expected = "Hi,1,Hallo,1,1,1\n" + "Hi,1,Hallo Welt,1,2,2\n" + "Hi,1,Hallo Welt wie,1,1,3\n" + "Hello,2,Hallo,2,1,1\n" + "Hello,2,Hallo Welt,2,2,2\n" + "Hello,2,Hallo Welt wie,2,1,3\n" + "Hello world,2,Hallo,3,1,1\n" + "Hello world,2,Hallo Welt,3,2,2\n" + "Hello world,2,Hallo Welt wie,3,1,3\n"; compareResultAsTuples(result, expected); }
Example #9
Source File: JoinITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testProjectJoinOnATuple2Input() throws Exception { /* * Project join on a tuple input 2 */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<Integer, Long, String>> ds1 = CollectionDataSets.getSmall3TupleDataSet(env); DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.get5TupleDataSet(env); DataSet<Tuple6<String, String, Long, Long, Long, Integer>> joinDs = ds1.join(ds2) .where(1) .equalTo(1) .projectSecond(3) .projectFirst(2, 1) .projectSecond(4, 1) .projectFirst(0); List<Tuple6<String, String, Long, Long, Long, Integer>> result = joinDs.collect(); String expected = "Hallo,Hi,1,1,1,1\n" + "Hallo Welt,Hello,2,2,2,2\n" + "Hallo Welt,Hello world,2,2,2,3\n"; compareResultAsTuples(result, expected); }
Example #10
Source File: TestingTaskExecutorGateway.java From flink with Apache License 2.0 | 5 votes |
TestingTaskExecutorGateway( String address, String hostname, BiConsumer<ResourceID, AllocatedSlotReport> heartbeatJobManagerConsumer, BiConsumer<JobID, Throwable> disconnectJobManagerConsumer, BiFunction<TaskDeploymentDescriptor, JobMasterId, CompletableFuture<Acknowledge>> submitTaskConsumer, Function<Tuple6<SlotID, JobID, AllocationID, ResourceProfile, String, ResourceManagerId>, CompletableFuture<Acknowledge>> requestSlotFunction, BiFunction<AllocationID, Throwable, CompletableFuture<Acknowledge>> freeSlotFunction, Consumer<ResourceID> heartbeatResourceManagerConsumer, Consumer<Exception> disconnectResourceManagerConsumer, Function<ExecutionAttemptID, CompletableFuture<Acknowledge>> cancelTaskFunction, Supplier<CompletableFuture<Boolean>> canBeReleasedSupplier, TriConsumer<JobID, Set<ResultPartitionID>, Set<ResultPartitionID>> releaseOrPromotePartitionsConsumer, Consumer<Collection<IntermediateDataSetID>> releaseClusterPartitionsConsumer, TriFunction<ExecutionAttemptID, OperatorID, SerializedValue<OperatorEvent>, CompletableFuture<Acknowledge>> operatorEventHandler, Supplier<CompletableFuture<ThreadDumpInfo>> requestThreadDumpSupplier) { this.address = Preconditions.checkNotNull(address); this.hostname = Preconditions.checkNotNull(hostname); this.heartbeatJobManagerConsumer = Preconditions.checkNotNull(heartbeatJobManagerConsumer); this.disconnectJobManagerConsumer = Preconditions.checkNotNull(disconnectJobManagerConsumer); this.submitTaskConsumer = Preconditions.checkNotNull(submitTaskConsumer); this.requestSlotFunction = Preconditions.checkNotNull(requestSlotFunction); this.freeSlotFunction = Preconditions.checkNotNull(freeSlotFunction); this.heartbeatResourceManagerConsumer = heartbeatResourceManagerConsumer; this.disconnectResourceManagerConsumer = disconnectResourceManagerConsumer; this.cancelTaskFunction = cancelTaskFunction; this.canBeReleasedSupplier = canBeReleasedSupplier; this.releaseOrPromotePartitionsConsumer = releaseOrPromotePartitionsConsumer; this.releaseClusterPartitionsConsumer = releaseClusterPartitionsConsumer; this.operatorEventHandler = operatorEventHandler; this.requestThreadDumpSupplier = requestThreadDumpSupplier; }
Example #11
Source File: JoinITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testProjectJoinOnATuple2Input() throws Exception { /* * Project join on a tuple input 2 */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<Integer, Long, String>> ds1 = CollectionDataSets.getSmall3TupleDataSet(env); DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.get5TupleDataSet(env); DataSet<Tuple6<String, String, Long, Long, Long, Integer>> joinDs = ds1.join(ds2) .where(1) .equalTo(1) .projectSecond(3) .projectFirst(2, 1) .projectSecond(4, 1) .projectFirst(0); List<Tuple6<String, String, Long, Long, Long, Integer>> result = joinDs.collect(); String expected = "Hallo,Hi,1,1,1,1\n" + "Hallo Welt,Hello,2,2,2,2\n" + "Hallo Welt,Hello world,2,2,2,3\n"; compareResultAsTuples(result, expected); }
Example #12
Source File: CrossITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testProjectCrossOnATupleInput1() throws Exception{ /* * project cross on a tuple input 1 */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.getSmall3TupleDataSet(env); DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.getSmall5TupleDataSet(env); DataSet<Tuple6<String, Long, String, Integer, Long, Long>> crossDs = ds.cross(ds2) .projectFirst(2, 1) .projectSecond(3) .projectFirst(0) .projectSecond(4, 1); List<Tuple6<String, Long, String, Integer, Long, Long>> result = crossDs.collect(); String expected = "Hi,1,Hallo,1,1,1\n" + "Hi,1,Hallo Welt,1,2,2\n" + "Hi,1,Hallo Welt wie,1,1,3\n" + "Hello,2,Hallo,2,1,1\n" + "Hello,2,Hallo Welt,2,2,2\n" + "Hello,2,Hallo Welt wie,2,1,3\n" + "Hello world,2,Hallo,3,1,1\n" + "Hello world,2,Hallo Welt,3,2,2\n" + "Hello world,2,Hallo Welt wie,3,1,3\n"; compareResultAsTuples(result, expected); }
Example #13
Source File: CrossITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testProjectCrossOnATupleInput2() throws Exception { /* * project cross on a tuple input 2 */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.getSmall3TupleDataSet(env); DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.getSmall5TupleDataSet(env); DataSet<Tuple6<String, String, Long, Long, Long, Integer>> crossDs = ds.cross(ds2) .projectSecond(3) .projectFirst(2, 1) .projectSecond(4, 1) .projectFirst(0); List<Tuple6<String, String, Long, Long, Long, Integer>> result = crossDs.collect(); String expected = "Hallo,Hi,1,1,1,1\n" + "Hallo Welt,Hi,1,2,2,1\n" + "Hallo Welt wie,Hi,1,1,3,1\n" + "Hallo,Hello,2,1,1,2\n" + "Hallo Welt,Hello,2,2,2,2\n" + "Hallo Welt wie,Hello,2,1,3,2\n" + "Hallo,Hello world,2,1,1,3\n" + "Hallo Welt,Hello world,2,2,2,3\n" + "Hallo Welt wie,Hello world,2,1,3,3\n"; compareResultAsTuples(result, expected); }
Example #14
Source File: JoinITCase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testProjectOnATuple1Input() throws Exception { /* * Project join on a tuple input 1 */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<Integer, Long, String>> ds1 = CollectionDataSets.getSmall3TupleDataSet(env); DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 = CollectionDataSets.get5TupleDataSet(env); DataSet<Tuple6<String, Long, String, Integer, Long, Long>> joinDs = ds1.join(ds2) .where(1) .equalTo(1) .projectFirst(2, 1) .projectSecond(3) .projectFirst(0) .projectSecond(4, 1); List<Tuple6<String, Long, String, Integer, Long, Long>> result = joinDs.collect(); String expected = "Hi,1,Hallo,1,1,1\n" + "Hello,2,Hallo Welt,2,2,2\n" + "Hello world,2,Hallo Welt,3,2,2\n"; compareResultAsTuples(result, expected); }
Example #15
Source File: Tuple6Builder.java From flink with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") public Tuple6<T0, T1, T2, T3, T4, T5>[] build(){ return tuples.toArray(new Tuple6[tuples.size()]); }
Example #16
Source File: SiddhiTupleFactory.java From flink-siddhi with Apache License 2.0 | 4 votes |
/** * Convert object array to type of Tuple{N} where N is between 0 to 25. * * @throws IllegalArgumentException if rows's length > 25 */ public static <T extends Tuple> T newTuple(Object[] row) { Preconditions.checkNotNull(row, "Tuple row is null"); switch (row.length) { case 0: return setTupleValue(new Tuple0(), row); case 1: return setTupleValue(new Tuple1(), row); case 2: return setTupleValue(new Tuple2(), row); case 3: return setTupleValue(new Tuple3(), row); case 4: return setTupleValue(new Tuple4(), row); case 5: return setTupleValue(new Tuple5(), row); case 6: return setTupleValue(new Tuple6(), row); case 7: return setTupleValue(new Tuple7(), row); case 8: return setTupleValue(new Tuple8(), row); case 9: return setTupleValue(new Tuple9(), row); case 10: return setTupleValue(new Tuple10(), row); case 11: return setTupleValue(new Tuple11(), row); case 12: return setTupleValue(new Tuple12(), row); case 13: return setTupleValue(new Tuple13(), row); case 14: return setTupleValue(new Tuple14(), row); case 15: return setTupleValue(new Tuple15(), row); case 16: return setTupleValue(new Tuple16(), row); case 17: return setTupleValue(new Tuple17(), row); case 18: return setTupleValue(new Tuple18(), row); case 19: return setTupleValue(new Tuple19(), row); case 20: return setTupleValue(new Tuple20(), row); case 21: return setTupleValue(new Tuple21(), row); case 22: return setTupleValue(new Tuple22(), row); case 23: return setTupleValue(new Tuple23(), row); case 24: return setTupleValue(new Tuple24(), row); case 25: return setTupleValue(new Tuple25(), row); default: throw new IllegalArgumentException("Too long row: " + row.length + ", unable to convert to Tuple"); } }
Example #17
Source File: SiddhiTupleFactory.java From bahir-flink with Apache License 2.0 | 4 votes |
/** * Convert object array to type of Tuple{N} where N is between 0 to 25. * * @throws IllegalArgumentException if rows's length > 25 */ public static <T extends Tuple> T newTuple(Object[] row) { Preconditions.checkNotNull(row, "Tuple row is null"); switch (row.length) { case 0: return setTupleValue(new Tuple0(), row); case 1: return setTupleValue(new Tuple1<>(), row); case 2: return setTupleValue(new Tuple2<>(), row); case 3: return setTupleValue(new Tuple3<>(), row); case 4: return setTupleValue(new Tuple4<>(), row); case 5: return setTupleValue(new Tuple5<>(), row); case 6: return setTupleValue(new Tuple6<>(), row); case 7: return setTupleValue(new Tuple7<>(), row); case 8: return setTupleValue(new Tuple8<>(), row); case 9: return setTupleValue(new Tuple9<>(), row); case 10: return setTupleValue(new Tuple10<>(), row); case 11: return setTupleValue(new Tuple11<>(), row); case 12: return setTupleValue(new Tuple12<>(), row); case 13: return setTupleValue(new Tuple13<>(), row); case 14: return setTupleValue(new Tuple14<>(), row); case 15: return setTupleValue(new Tuple15<>(), row); case 16: return setTupleValue(new Tuple16<>(), row); case 17: return setTupleValue(new Tuple17<>(), row); case 18: return setTupleValue(new Tuple18<>(), row); case 19: return setTupleValue(new Tuple19<>(), row); case 20: return setTupleValue(new Tuple20<>(), row); case 21: return setTupleValue(new Tuple21<>(), row); case 22: return setTupleValue(new Tuple22<>(), row); case 23: return setTupleValue(new Tuple23<>(), row); case 24: return setTupleValue(new Tuple24<>(), row); case 25: return setTupleValue(new Tuple25<>(), row); default: throw new IllegalArgumentException("Too long row: " + row.length + ", unable to convert to Tuple"); } }
Example #18
Source File: SlotManagerImplTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that pending request is removed if task executor reports a slot with its allocation id. */ @Test public void testSlotRequestRemovedIfTMReportAllocation() throws Exception { try (final SlotManagerImpl slotManager = createSlotManager(ResourceManagerId.generate(), new TestingResourceActionsBuilder().build())) { final JobID jobID = new JobID(); final SlotRequest slotRequest1 = new SlotRequest(jobID, new AllocationID(), ResourceProfile.UNKNOWN, "foobar"); slotManager.registerSlotRequest(slotRequest1); final BlockingQueue<Tuple6<SlotID, JobID, AllocationID, ResourceProfile, String, ResourceManagerId>> requestSlotQueue = new ArrayBlockingQueue<>(1); final BlockingQueue<CompletableFuture<Acknowledge>> responseQueue = new ArrayBlockingQueue<>(1); final TestingTaskExecutorGateway testingTaskExecutorGateway = new TestingTaskExecutorGatewayBuilder() .setRequestSlotFunction(slotIDJobIDAllocationIDStringResourceManagerIdTuple6 -> { requestSlotQueue.offer(slotIDJobIDAllocationIDStringResourceManagerIdTuple6); try { return responseQueue.take(); } catch (InterruptedException ignored) { return FutureUtils.completedExceptionally(new FlinkException("Response queue was interrupted.")); } }) .createTestingTaskExecutorGateway(); final ResourceID taskExecutorResourceId = ResourceID.generate(); final TaskExecutorConnection taskExecutionConnection = new TaskExecutorConnection(taskExecutorResourceId, testingTaskExecutorGateway); final SlotReport slotReport = new SlotReport(createEmptySlotStatus(new SlotID(taskExecutorResourceId, 0), ResourceProfile.ANY)); final CompletableFuture<Acknowledge> firstManualSlotRequestResponse = new CompletableFuture<>(); responseQueue.offer(firstManualSlotRequestResponse); slotManager.registerTaskManager(taskExecutionConnection, slotReport); final Tuple6<SlotID, JobID, AllocationID, ResourceProfile, String, ResourceManagerId> firstRequest = requestSlotQueue.take(); final CompletableFuture<Acknowledge> secondManualSlotRequestResponse = new CompletableFuture<>(); responseQueue.offer(secondManualSlotRequestResponse); final SlotRequest slotRequest2 = new SlotRequest(jobID, new AllocationID(), ResourceProfile.UNKNOWN, "foobar"); slotManager.registerSlotRequest(slotRequest2); // fail first request firstManualSlotRequestResponse.completeExceptionally(new TimeoutException("Test exception to fail first allocation")); final Tuple6<SlotID, JobID, AllocationID, ResourceProfile, String, ResourceManagerId> secondRequest = requestSlotQueue.take(); // fail second request secondManualSlotRequestResponse.completeExceptionally(new SlotOccupiedException("Test exception", slotRequest1.getAllocationId(), jobID)); assertThat(firstRequest.f2, equalTo(slotRequest1.getAllocationId())); assertThat(secondRequest.f2, equalTo(slotRequest2.getAllocationId())); assertThat(secondRequest.f0, equalTo(firstRequest.f0)); secondManualSlotRequestResponse.complete(Acknowledge.get()); final TaskManagerSlot slot = slotManager.getSlot(secondRequest.f0); assertThat(slot.getState(), equalTo(TaskManagerSlot.State.ALLOCATED)); assertThat(slot.getAllocationId(), equalTo(firstRequest.f2)); assertThat(slotManager.getNumberRegisteredSlots(), is(1)); } }
Example #19
Source File: Tuple6Builder.java From flink with Apache License 2.0 | 4 votes |
public Tuple6Builder<T0, T1, T2, T3, T4, T5> add(T0 value0, T1 value1, T2 value2, T3 value3, T4 value4, T5 value5){ tuples.add(new Tuple6<>(value0, value1, value2, value3, value4, value5)); return this; }
Example #20
Source File: TestingTaskExecutorGateway.java From flink with Apache License 2.0 | 4 votes |
@Override public CompletableFuture<Acknowledge> requestSlot(SlotID slotId, JobID jobId, AllocationID allocationId, ResourceProfile resourceProfile, String targetAddress, ResourceManagerId resourceManagerId, Time timeout) { return requestSlotFunction.apply(Tuple6.of(slotId, jobId, allocationId, resourceProfile, targetAddress, resourceManagerId)); }
Example #21
Source File: TestingJobMasterGatewayBuilder.java From flink with Apache License 2.0 | 4 votes |
public TestingJobMasterGatewayBuilder setNotifyKvStateRegisteredFunction(Function<Tuple6<JobID, JobVertexID, KeyGroupRange, String, KvStateID, InetSocketAddress>, CompletableFuture<Acknowledge>> notifyKvStateRegisteredFunction) { this.notifyKvStateRegisteredFunction = notifyKvStateRegisteredFunction; return this; }
Example #22
Source File: SlotManagerImplTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that the SlotManager retries allocating a slot if the TaskExecutor#requestSlot call * fails. */ @Test public void testSlotRequestFailure() throws Exception { try (final SlotManagerImpl slotManager = createSlotManager(ResourceManagerId.generate(), new TestingResourceActionsBuilder().build())) { final SlotRequest slotRequest = new SlotRequest(new JobID(), new AllocationID(), ResourceProfile.UNKNOWN, "foobar"); slotManager.registerSlotRequest(slotRequest); final BlockingQueue<Tuple6<SlotID, JobID, AllocationID, ResourceProfile, String, ResourceManagerId>> requestSlotQueue = new ArrayBlockingQueue<>(1); final BlockingQueue<CompletableFuture<Acknowledge>> responseQueue = new ArrayBlockingQueue<>(1); final TestingTaskExecutorGateway testingTaskExecutorGateway = new TestingTaskExecutorGatewayBuilder() .setRequestSlotFunction(slotIDJobIDAllocationIDStringResourceManagerIdTuple6 -> { requestSlotQueue.offer(slotIDJobIDAllocationIDStringResourceManagerIdTuple6); try { return responseQueue.take(); } catch (InterruptedException ignored) { return FutureUtils.completedExceptionally(new FlinkException("Response queue was interrupted.")); } }) .createTestingTaskExecutorGateway(); final ResourceID taskExecutorResourceId = ResourceID.generate(); final TaskExecutorConnection taskExecutionConnection = new TaskExecutorConnection(taskExecutorResourceId, testingTaskExecutorGateway); final SlotReport slotReport = new SlotReport(createEmptySlotStatus(new SlotID(taskExecutorResourceId, 0), ResourceProfile.ANY)); final CompletableFuture<Acknowledge> firstManualSlotRequestResponse = new CompletableFuture<>(); responseQueue.offer(firstManualSlotRequestResponse); slotManager.registerTaskManager(taskExecutionConnection, slotReport); final Tuple6<SlotID, JobID, AllocationID, ResourceProfile, String, ResourceManagerId> firstRequest = requestSlotQueue.take(); final CompletableFuture<Acknowledge> secondManualSlotRequestResponse = new CompletableFuture<>(); responseQueue.offer(secondManualSlotRequestResponse); // fail first request firstManualSlotRequestResponse.completeExceptionally(new SlotAllocationException("Test exception")); final Tuple6<SlotID, JobID, AllocationID, ResourceProfile, String, ResourceManagerId> secondRequest = requestSlotQueue.take(); assertThat(secondRequest.f2, equalTo(firstRequest.f2)); assertThat(secondRequest.f0, equalTo(firstRequest.f0)); secondManualSlotRequestResponse.complete(Acknowledge.get()); final TaskManagerSlot slot = slotManager.getSlot(secondRequest.f0); assertThat(slot.getState(), equalTo(TaskManagerSlot.State.ALLOCATED)); assertThat(slot.getAllocationId(), equalTo(secondRequest.f2)); } }
Example #23
Source File: SlotManagerImplTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that pending slot requests are tried to be fulfilled upon new slot registrations. */ @Test public void testFulfillingPendingSlotRequest() throws Exception { final ResourceManagerId resourceManagerId = ResourceManagerId.generate(); final ResourceID resourceID = ResourceID.generate(); final JobID jobId = new JobID(); final SlotID slotId = new SlotID(resourceID, 0); final String targetAddress = "localhost"; final AllocationID allocationId = new AllocationID(); final ResourceProfile resourceProfile = ResourceProfile.fromResources(42.0, 1337); final SlotRequest slotRequest = new SlotRequest( jobId, allocationId, resourceProfile, targetAddress); final AtomicInteger numberAllocateResourceCalls = new AtomicInteger(0); ResourceActions resourceManagerActions = new TestingResourceActionsBuilder() .setAllocateResourceConsumer(ignored -> numberAllocateResourceCalls.incrementAndGet()) .build(); final CompletableFuture<Tuple6<SlotID, JobID, AllocationID, ResourceProfile, String, ResourceManagerId>> requestFuture = new CompletableFuture<>(); // accept an incoming slot request final TaskExecutorGateway taskExecutorGateway = new TestingTaskExecutorGatewayBuilder() .setRequestSlotFunction(tuple6 -> { requestFuture.complete(Tuple6.of(tuple6.f0, tuple6.f1, tuple6.f2, tuple6.f3, tuple6.f4, tuple6.f5)); return CompletableFuture.completedFuture(Acknowledge.get()); }) .createTestingTaskExecutorGateway(); final TaskExecutorConnection taskExecutorConnection = new TaskExecutorConnection(resourceID, taskExecutorGateway); final SlotStatus slotStatus = new SlotStatus(slotId, resourceProfile); final SlotReport slotReport = new SlotReport(slotStatus); try (SlotManagerImpl slotManager = createSlotManager(resourceManagerId, resourceManagerActions)) { assertTrue("The slot request should be accepted", slotManager.registerSlotRequest(slotRequest)); assertThat(numberAllocateResourceCalls.get(), is(1)); slotManager.registerTaskManager( taskExecutorConnection, slotReport); assertThat(requestFuture.get(), is(equalTo(Tuple6.of(slotId, jobId, allocationId, resourceProfile, targetAddress, resourceManagerId)))); TaskManagerSlot slot = slotManager.getSlot(slotId); assertEquals("The slot has not been allocated to the expected allocation id.", allocationId, slot.getAllocationId()); } }
Example #24
Source File: SlotManagerImplTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that a slot request which can be fulfilled will trigger a slot allocation. */ @Test public void testSlotRequestWithFreeSlot() throws Exception { final ResourceManagerId resourceManagerId = ResourceManagerId.generate(); final ResourceID resourceID = ResourceID.generate(); final JobID jobId = new JobID(); final SlotID slotId = new SlotID(resourceID, 0); final String targetAddress = "localhost"; final AllocationID allocationId = new AllocationID(); final ResourceProfile resourceProfile = ResourceProfile.fromResources(42.0, 1337); final SlotRequest slotRequest = new SlotRequest( jobId, allocationId, resourceProfile, targetAddress); ResourceActions resourceManagerActions = new TestingResourceActionsBuilder().build(); try (SlotManagerImpl slotManager = createSlotManager(resourceManagerId, resourceManagerActions)) { final CompletableFuture<Tuple6<SlotID, JobID, AllocationID, ResourceProfile, String, ResourceManagerId>> requestFuture = new CompletableFuture<>(); // accept an incoming slot request final TaskExecutorGateway taskExecutorGateway = new TestingTaskExecutorGatewayBuilder() .setRequestSlotFunction(tuple6 -> { requestFuture.complete(Tuple6.of(tuple6.f0, tuple6.f1, tuple6.f2, tuple6.f3, tuple6.f4, tuple6.f5)); return CompletableFuture.completedFuture(Acknowledge.get()); }) .createTestingTaskExecutorGateway(); final TaskExecutorConnection taskExecutorConnection = new TaskExecutorConnection(resourceID, taskExecutorGateway); final SlotStatus slotStatus = new SlotStatus(slotId, resourceProfile); final SlotReport slotReport = new SlotReport(slotStatus); slotManager.registerTaskManager( taskExecutorConnection, slotReport); assertTrue("The slot request should be accepted", slotManager.registerSlotRequest(slotRequest)); assertThat(requestFuture.get(), is(equalTo(Tuple6.of(slotId, jobId, allocationId, resourceProfile, targetAddress, resourceManagerId)))); TaskManagerSlot slot = slotManager.getSlot(slotId); assertEquals("The slot has not been allocated to the expected allocation id.", allocationId, slot.getAllocationId()); } }
Example #25
Source File: TPCHQuery10.java From flink with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { final ParameterTool params = ParameterTool.fromArgs(args); final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); if (!params.has("customer") && !params.has("orders") && !params.has("lineitem") && !params.has("nation")) { System.err.println(" This program expects data from the TPC-H benchmark as input data."); System.err.println(" Due to legal restrictions, we can not ship generated data."); System.err.println(" You can find the TPC-H data generator at http://www.tpc.org/tpch/."); System.err.println(" Usage: TPCHQuery10 --customer <path> --orders <path> --lineitem <path> --nation <path> [--output <path>]"); return; } // get customer data set: (custkey, name, address, nationkey, acctbal) DataSet<Tuple5<Integer, String, String, Integer, Double>> customers = getCustomerDataSet(env, params.get("customer")); // get orders data set: (orderkey, custkey, orderdate) DataSet<Tuple3<Integer, Integer, String>> orders = getOrdersDataSet(env, params.get("orders")); // get lineitem data set: (orderkey, extendedprice, discount, returnflag) DataSet<Tuple4<Integer, Double, Double, String>> lineitems = getLineitemDataSet(env, params.get("lineitem")); // get nation data set: (nationkey, name) DataSet<Tuple2<Integer, String>> nations = getNationsDataSet(env, params.get("nation")); // orders filtered by year: (orderkey, custkey) DataSet<Tuple2<Integer, Integer>> ordersFilteredByYear = // filter by year orders.filter(order -> Integer.parseInt(order.f2.substring(0, 4)) > 1990) // project fields out that are no longer required .project(0, 1); // lineitems filtered by flag: (orderkey, revenue) DataSet<Tuple2<Integer, Double>> lineitemsFilteredByFlag = // filter by flag lineitems.filter(lineitem -> lineitem.f3.equals("R")) // compute revenue and project out return flag // revenue per item = l_extendedprice * (1 - l_discount) .map(lineitem -> new Tuple2<>(lineitem.f0, lineitem.f1 * (1 - lineitem.f2))) .returns(Types.TUPLE(Types.INT, Types.DOUBLE)); // for lambda with generics // join orders with lineitems: (custkey, revenue) DataSet<Tuple2<Integer, Double>> revenueByCustomer = ordersFilteredByYear.joinWithHuge(lineitemsFilteredByFlag) .where(0).equalTo(0) .projectFirst(1).projectSecond(1); revenueByCustomer = revenueByCustomer.groupBy(0).aggregate(Aggregations.SUM, 1); // join customer with nation (custkey, name, address, nationname, acctbal) DataSet<Tuple5<Integer, String, String, String, Double>> customerWithNation = customers .joinWithTiny(nations) .where(3).equalTo(0) .projectFirst(0, 1, 2).projectSecond(1).projectFirst(4); // join customer (with nation) with revenue (custkey, name, address, nationname, acctbal, revenue) DataSet<Tuple6<Integer, String, String, String, Double, Double>> result = customerWithNation.join(revenueByCustomer) .where(0).equalTo(0) .projectFirst(0, 1, 2, 3, 4).projectSecond(1); // emit result if (params.has("output")) { result.writeAsCsv(params.get("output"), "\n", "|"); // execute program env.execute("TPCH Query 10 Example"); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); result.print(); } }
Example #26
Source File: TestingJobMasterGateway.java From flink with Apache License 2.0 | 4 votes |
public TestingJobMasterGateway( @Nonnull String address, @Nonnull String hostname, @Nonnull Supplier<CompletableFuture<Acknowledge>> cancelFunction, @Nonnull Function<TaskExecutionState, CompletableFuture<Acknowledge>> updateTaskExecutionStateFunction, @Nonnull BiFunction<JobVertexID, ExecutionAttemptID, CompletableFuture<SerializedInputSplit>> requestNextInputSplitFunction, @Nonnull BiFunction<IntermediateDataSetID, ResultPartitionID, CompletableFuture<ExecutionState>> requestPartitionStateFunction, @Nonnull Function<ResultPartitionID, CompletableFuture<Acknowledge>> scheduleOrUpdateConsumersFunction, @Nonnull Function<ResourceID, CompletableFuture<Acknowledge>> disconnectTaskManagerFunction, @Nonnull Consumer<ResourceManagerId> disconnectResourceManagerConsumer, @Nonnull BiFunction<ResourceID, Collection<SlotOffer>, CompletableFuture<Collection<SlotOffer>>> offerSlotsFunction, @Nonnull TriConsumer<ResourceID, AllocationID, Throwable> failSlotConsumer, @Nonnull BiFunction<String, UnresolvedTaskManagerLocation, CompletableFuture<RegistrationResponse>> registerTaskManagerFunction, @Nonnull BiConsumer<ResourceID, AccumulatorReport> taskManagerHeartbeatConsumer, @Nonnull Consumer<ResourceID> resourceManagerHeartbeatConsumer, @Nonnull Supplier<CompletableFuture<JobDetails>> requestJobDetailsSupplier, @Nonnull Supplier<CompletableFuture<ArchivedExecutionGraph>> requestJobSupplier, @Nonnull BiFunction<String, Boolean, CompletableFuture<String>> triggerSavepointFunction, @Nonnull BiFunction<String, Boolean, CompletableFuture<String>> stopWithSavepointFunction, @Nonnull Function<JobVertexID, CompletableFuture<OperatorBackPressureStatsResponse>> requestOperatorBackPressureStatsFunction, @Nonnull BiConsumer<AllocationID, Throwable> notifyAllocationFailureConsumer, @Nonnull Consumer<Tuple5<JobID, ExecutionAttemptID, Long, CheckpointMetrics, TaskStateSnapshot>> acknowledgeCheckpointConsumer, @Nonnull Consumer<DeclineCheckpoint> declineCheckpointConsumer, @Nonnull Supplier<JobMasterId> fencingTokenSupplier, @Nonnull BiFunction<JobID, String, CompletableFuture<KvStateLocation>> requestKvStateLocationFunction, @Nonnull Function<Tuple6<JobID, JobVertexID, KeyGroupRange, String, KvStateID, InetSocketAddress>, CompletableFuture<Acknowledge>> notifyKvStateRegisteredFunction, @Nonnull Function<Tuple4<JobID, JobVertexID, KeyGroupRange, String>, CompletableFuture<Acknowledge>> notifyKvStateUnregisteredFunction, @Nonnull TriFunction<String, Object, byte[], CompletableFuture<Object>> updateAggregateFunction, @Nonnull TriFunction<ExecutionAttemptID, OperatorID, SerializedValue<OperatorEvent>, CompletableFuture<Acknowledge>> operatorEventSender, @Nonnull BiFunction<OperatorID, SerializedValue<CoordinationRequest>, CompletableFuture<CoordinationResponse>> deliverCoordinationRequestFunction) { this.address = address; this.hostname = hostname; this.cancelFunction = cancelFunction; this.updateTaskExecutionStateFunction = updateTaskExecutionStateFunction; this.requestNextInputSplitFunction = requestNextInputSplitFunction; this.requestPartitionStateFunction = requestPartitionStateFunction; this.scheduleOrUpdateConsumersFunction = scheduleOrUpdateConsumersFunction; this.disconnectTaskManagerFunction = disconnectTaskManagerFunction; this.disconnectResourceManagerConsumer = disconnectResourceManagerConsumer; this.offerSlotsFunction = offerSlotsFunction; this.failSlotConsumer = failSlotConsumer; this.registerTaskManagerFunction = registerTaskManagerFunction; this.taskManagerHeartbeatConsumer = taskManagerHeartbeatConsumer; this.resourceManagerHeartbeatConsumer = resourceManagerHeartbeatConsumer; this.requestJobDetailsSupplier = requestJobDetailsSupplier; this.requestJobSupplier = requestJobSupplier; this.triggerSavepointFunction = triggerSavepointFunction; this.stopWithSavepointFunction = stopWithSavepointFunction; this.requestOperatorBackPressureStatsFunction = requestOperatorBackPressureStatsFunction; this.notifyAllocationFailureConsumer = notifyAllocationFailureConsumer; this.acknowledgeCheckpointConsumer = acknowledgeCheckpointConsumer; this.declineCheckpointConsumer = declineCheckpointConsumer; this.fencingTokenSupplier = fencingTokenSupplier; this.requestKvStateLocationFunction = requestKvStateLocationFunction; this.notifyKvStateRegisteredFunction = notifyKvStateRegisteredFunction; this.notifyKvStateUnregisteredFunction = notifyKvStateUnregisteredFunction; this.updateAggregateFunction = updateAggregateFunction; this.operatorEventSender = operatorEventSender; this.deliverCoordinationRequestFunction = deliverCoordinationRequestFunction; }
Example #27
Source File: TestingJobMasterGateway.java From flink with Apache License 2.0 | 4 votes |
@Override public CompletableFuture<Acknowledge> notifyKvStateRegistered(JobID jobId, JobVertexID jobVertexId, KeyGroupRange keyGroupRange, String registrationName, KvStateID kvStateId, InetSocketAddress kvStateServerAddress) { return notifyKvStateRegisteredFunction.apply(Tuple6.of(jobId, jobVertexId, keyGroupRange, registrationName, kvStateId, kvStateServerAddress)); }
Example #28
Source File: TestingTaskExecutorGatewayBuilder.java From flink with Apache License 2.0 | 4 votes |
public TestingTaskExecutorGatewayBuilder setRequestSlotFunction(Function<Tuple6<SlotID, JobID, AllocationID, ResourceProfile, String, ResourceManagerId>, CompletableFuture<Acknowledge>> requestSlotFunction) { this.requestSlotFunction = requestSlotFunction; return this; }
Example #29
Source File: TestingJobMasterGateway.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
public TestingJobMasterGateway( @Nonnull String address, @Nonnull String hostname, @Nonnull Supplier<CompletableFuture<Acknowledge>> cancelFunction, @Nonnull Supplier<CompletableFuture<Acknowledge>> stopFunction, @Nonnull BiFunction<Integer, RescalingBehaviour, CompletableFuture<Acknowledge>> rescalingJobFunction, @Nonnull TriFunction<Collection<JobVertexID>, Integer, RescalingBehaviour, CompletableFuture<Acknowledge>> rescalingOperatorsFunction, @Nonnull Function<TaskExecutionState, CompletableFuture<Acknowledge>> updateTaskExecutionStateFunction, @Nonnull BiFunction<JobVertexID, ExecutionAttemptID, CompletableFuture<SerializedInputSplit>> requestNextInputSplitFunction, @Nonnull BiFunction<IntermediateDataSetID, ResultPartitionID, CompletableFuture<ExecutionState>> requestPartitionStateFunction, @Nonnull Function<ResultPartitionID, CompletableFuture<Acknowledge>> scheduleOrUpdateConsumersFunction, @Nonnull Function<ResourceID, CompletableFuture<Acknowledge>> disconnectTaskManagerFunction, @Nonnull Consumer<ResourceManagerId> disconnectResourceManagerConsumer, @Nonnull Supplier<CompletableFuture<ClassloadingProps>> classloadingPropsSupplier, @Nonnull BiFunction<ResourceID, Collection<SlotOffer>, CompletableFuture<Collection<SlotOffer>>> offerSlotsFunction, @Nonnull TriConsumer<ResourceID, AllocationID, Throwable> failSlotConsumer, @Nonnull BiFunction<String, TaskManagerLocation, CompletableFuture<RegistrationResponse>> registerTaskManagerFunction, @Nonnull BiConsumer<ResourceID, AccumulatorReport> taskManagerHeartbeatConsumer, @Nonnull Consumer<ResourceID> resourceManagerHeartbeatConsumer, @Nonnull Supplier<CompletableFuture<JobDetails>> requestJobDetailsSupplier, @Nonnull Supplier<CompletableFuture<ArchivedExecutionGraph>> requestJobSupplier, @Nonnull BiFunction<String, Boolean, CompletableFuture<String>> triggerSavepointFunction, @Nonnull Function<JobVertexID, CompletableFuture<OperatorBackPressureStatsResponse>> requestOperatorBackPressureStatsFunction, @Nonnull BiConsumer<AllocationID, Throwable> notifyAllocationFailureConsumer, @Nonnull Consumer<Tuple5<JobID, ExecutionAttemptID, Long, CheckpointMetrics, TaskStateSnapshot>> acknowledgeCheckpointConsumer, @Nonnull Consumer<DeclineCheckpoint> declineCheckpointConsumer, @Nonnull Supplier<JobMasterId> fencingTokenSupplier, @Nonnull BiFunction<JobID, String, CompletableFuture<KvStateLocation>> requestKvStateLocationFunction, @Nonnull Function<Tuple6<JobID, JobVertexID, KeyGroupRange, String, KvStateID, InetSocketAddress>, CompletableFuture<Acknowledge>> notifyKvStateRegisteredFunction, @Nonnull Function<Tuple4<JobID, JobVertexID, KeyGroupRange, String>, CompletableFuture<Acknowledge>> notifyKvStateUnregisteredFunction, @Nonnull TriFunction<String, Object, byte[], CompletableFuture<Object>> updateAggregateFunction) { this.address = address; this.hostname = hostname; this.cancelFunction = cancelFunction; this.stopFunction = stopFunction; this.rescalingJobFunction = rescalingJobFunction; this.rescalingOperatorsFunction = rescalingOperatorsFunction; this.updateTaskExecutionStateFunction = updateTaskExecutionStateFunction; this.requestNextInputSplitFunction = requestNextInputSplitFunction; this.requestPartitionStateFunction = requestPartitionStateFunction; this.scheduleOrUpdateConsumersFunction = scheduleOrUpdateConsumersFunction; this.disconnectTaskManagerFunction = disconnectTaskManagerFunction; this.disconnectResourceManagerConsumer = disconnectResourceManagerConsumer; this.classloadingPropsSupplier = classloadingPropsSupplier; this.offerSlotsFunction = offerSlotsFunction; this.failSlotConsumer = failSlotConsumer; this.registerTaskManagerFunction = registerTaskManagerFunction; this.taskManagerHeartbeatConsumer = taskManagerHeartbeatConsumer; this.resourceManagerHeartbeatConsumer = resourceManagerHeartbeatConsumer; this.requestJobDetailsSupplier = requestJobDetailsSupplier; this.requestJobSupplier = requestJobSupplier; this.triggerSavepointFunction = triggerSavepointFunction; this.requestOperatorBackPressureStatsFunction = requestOperatorBackPressureStatsFunction; this.notifyAllocationFailureConsumer = notifyAllocationFailureConsumer; this.acknowledgeCheckpointConsumer = acknowledgeCheckpointConsumer; this.declineCheckpointConsumer = declineCheckpointConsumer; this.fencingTokenSupplier = fencingTokenSupplier; this.requestKvStateLocationFunction = requestKvStateLocationFunction; this.notifyKvStateRegisteredFunction = notifyKvStateRegisteredFunction; this.notifyKvStateUnregisteredFunction = notifyKvStateUnregisteredFunction; this.updateAggregateFunction = updateAggregateFunction; }
Example #30
Source File: TestingJobMasterGateway.java From flink with Apache License 2.0 | 4 votes |
@Override public CompletableFuture<Acknowledge> notifyKvStateRegistered(JobID jobId, JobVertexID jobVertexId, KeyGroupRange keyGroupRange, String registrationName, KvStateID kvStateId, InetSocketAddress kvStateServerAddress) { return notifyKvStateRegisteredFunction.apply(Tuple6.of(jobId, jobVertexId, keyGroupRange, registrationName, kvStateId, kvStateServerAddress)); }