Java Code Examples for org.apache.flink.api.common.operators.util.FieldList#size()
The following examples show how to use
org.apache.flink.api.common.operators.util.FieldList#size() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FieldListTest.java From flink with Apache License 2.0 | 5 votes |
private static void check(FieldList set, int... elements) { if (elements == null) { assertEquals(0, set.size()); return; } assertEquals(elements.length, set.size()); // test contains for (int i : elements) { set.contains(i); } // test to array { int[] arr = set.toArray(); assertTrue(Arrays.equals(arr, elements)); } { int[] fromIter = new int[set.size()]; Iterator<Integer> iter = set.iterator(); for (int i = 0; i < fromIter.length; i++) { fromIter[i] = iter.next(); } assertFalse(iter.hasNext()); assertTrue(Arrays.equals(fromIter, elements)); } }
Example 2
Source File: Utils.java From flink with Apache License 2.0 | 5 votes |
private static boolean[] getSortOrders(FieldList keys, boolean[] orders) { if (orders == null) { orders = new boolean[keys.size()]; Arrays.fill(orders, true); } return orders; }
Example 3
Source File: Utils.java From flink with Apache License 2.0 | 5 votes |
private static boolean[] getSortOrders(FieldList keys, boolean[] orders) { if (orders == null) { orders = new boolean[keys.size()]; Arrays.fill(orders, true); } return orders; }
Example 4
Source File: Utils.java From flink with Apache License 2.0 | 5 votes |
public static Ordering createOrdering(FieldList fields, boolean[] directions) { final Ordering o = new Ordering(); for (int i = 0; i < fields.size(); i++) { o.appendOrdering(fields.get(i), null, directions == null || directions[i] ? Order.ASCENDING : Order.DESCENDING); } return o; }
Example 5
Source File: OperatorDescriptorDual.java From flink with Apache License 2.0 | 5 votes |
protected boolean checkEquivalentFieldPositionsInKeyFields(FieldList fields1, FieldList fields2, int numRelevantFields) { // check number of produced partitioning fields if(fields1.size() < numRelevantFields || fields2.size() < numRelevantFields) { return false; } else { for(int i=0; i<numRelevantFields; i++) { int pField1 = fields1.get(i); int pField2 = fields2.get(i); // check if position of both produced fields is the same in both requested fields int j; for(j=0; j<this.keys1.size(); j++) { if(this.keys1.get(j) == pField1 && this.keys2.get(j) == pField2) { break; } else if(this.keys1.get(j) != pField1 && this.keys2.get(j) != pField2) { // do nothing } else { return false; } } if(j == this.keys1.size()) { throw new CompilerException("Fields were not found in key fields."); } } } return true; }
Example 6
Source File: OperatorDescriptorDual.java From flink with Apache License 2.0 | 5 votes |
protected boolean checkEquivalentFieldPositionsInKeyFields(FieldList fields1, FieldList fields2) { // check number of produced partitioning fields if(fields1.size() != fields2.size()) { return false; } else { return checkEquivalentFieldPositionsInKeyFields(fields1, fields2, fields1.size()); } }
Example 7
Source File: OperatorDescriptorDual.java From flink with Apache License 2.0 | 5 votes |
protected boolean checkEquivalentFieldPositionsInKeyFields(FieldList fields1, FieldList fields2, int numRelevantFields) { // check number of produced partitioning fields if(fields1.size() < numRelevantFields || fields2.size() < numRelevantFields) { return false; } else { for(int i=0; i<numRelevantFields; i++) { int pField1 = fields1.get(i); int pField2 = fields2.get(i); // check if position of both produced fields is the same in both requested fields int j; for(j=0; j<this.keys1.size(); j++) { if(this.keys1.get(j) == pField1 && this.keys2.get(j) == pField2) { break; } else if(this.keys1.get(j) != pField1 && this.keys2.get(j) != pField2) { // do nothing } else { return false; } } if(j == this.keys1.size()) { throw new CompilerException("Fields were not found in key fields."); } } } return true; }
Example 8
Source File: OperatorDescriptorDual.java From flink with Apache License 2.0 | 5 votes |
protected boolean checkEquivalentFieldPositionsInKeyFields(FieldList fields1, FieldList fields2) { // check number of produced partitioning fields if(fields1.size() != fields2.size()) { return false; } else { return checkEquivalentFieldPositionsInKeyFields(fields1, fields2, fields1.size()); } }
Example 9
Source File: PartitioningReusageTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private int getPosInFieldList(int field, FieldList list) { int pos; for(pos=0; pos<list.size(); pos++) { if(field == list.get(pos)) { break; } } if(pos == list.size()) { return -1; } else { return pos; } }
Example 10
Source File: Utils.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private static boolean[] getSortOrders(FieldList keys, boolean[] orders) { if (orders == null) { orders = new boolean[keys.size()]; Arrays.fill(orders, true); } return orders; }
Example 11
Source File: Utils.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public static Ordering createOrdering(FieldList fields) { final Ordering o = new Ordering(); for (int i = 0; i < fields.size(); i++) { o.appendOrdering(fields.get(i), null, Order.ANY); } return o; }
Example 12
Source File: PartitioningReusageTest.java From flink with Apache License 2.0 | 5 votes |
private int getPosInFieldList(int field, FieldList list) { int pos; for(pos=0; pos<list.size(); pos++) { if(field == list.get(pos)) { break; } } if(pos == list.size()) { return -1; } else { return pos; } }
Example 13
Source File: OperatorDescriptorDual.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
protected boolean checkEquivalentFieldPositionsInKeyFields(FieldList fields1, FieldList fields2, int numRelevantFields) { // check number of produced partitioning fields if(fields1.size() < numRelevantFields || fields2.size() < numRelevantFields) { return false; } else { for(int i=0; i<numRelevantFields; i++) { int pField1 = fields1.get(i); int pField2 = fields2.get(i); // check if position of both produced fields is the same in both requested fields int j; for(j=0; j<this.keys1.size(); j++) { if(this.keys1.get(j) == pField1 && this.keys2.get(j) == pField2) { break; } else if(this.keys1.get(j) != pField1 && this.keys2.get(j) != pField2) { // do nothing } else { return false; } } if(j == this.keys1.size()) { throw new CompilerException("Fields were not found in key fields."); } } } return true; }
Example 14
Source File: OperatorDescriptorDual.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
protected boolean checkEquivalentFieldPositionsInKeyFields(FieldList fields1, FieldList fields2) { // check number of produced partitioning fields if(fields1.size() != fields2.size()) { return false; } else { return checkEquivalentFieldPositionsInKeyFields(fields1, fields2, fields1.size()); } }
Example 15
Source File: JavaApiPostPass.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private static final boolean[] getSortOrders(FieldList keys, boolean[] orders) { if (orders == null) { orders = new boolean[keys.size()]; Arrays.fill(orders, true); } return orders; }
Example 16
Source File: PartitioningReusageTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
private void checkValidCoGroupInputProperties(DualInputPlanNode coGroup) { GlobalProperties inProps1 = coGroup.getInput1().getGlobalProperties(); GlobalProperties inProps2 = coGroup.getInput2().getGlobalProperties(); if(inProps1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED && inProps2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) { // check that both inputs are hash partitioned on the same fields FieldList pFields1 = inProps1.getPartitioningFields(); FieldList pFields2 = inProps2.getPartitioningFields(); assertTrue("Inputs are not the same number of fields. Input 1: "+pFields1+", Input 2: "+pFields2, pFields1.size() == pFields2.size()); FieldList reqPFields1 = coGroup.getKeysForInput1(); FieldList reqPFields2 = coGroup.getKeysForInput2(); for(int i=0; i<pFields1.size(); i++) { // get fields int f1 = pFields1.get(i); int f2 = pFields2.get(i); // check that field positions in original key field list are identical int pos1 = getPosInFieldList(f1, reqPFields1); int pos2 = getPosInFieldList(f2, reqPFields2); if(pos1 < 0) { fail("Input 1 is partitioned on field "+f1+" which is not contained in the key set "+reqPFields1); } if(pos2 < 0) { fail("Input 2 is partitioned on field "+f2+" which is not contained in the key set "+reqPFields2); } if(pos1 != pos2) { fail("Inputs are not partitioned on the same key fields"); } } } else { throw new UnsupportedOperationException("This method has only been implemented to check for hash partitioned coGroup inputs"); } }
Example 17
Source File: PartitioningReusageTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
private void checkValidJoinInputProperties(DualInputPlanNode join) { GlobalProperties inProps1 = join.getInput1().getGlobalProperties(); GlobalProperties inProps2 = join.getInput2().getGlobalProperties(); if(inProps1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED && inProps2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) { // check that both inputs are hash partitioned on the same fields FieldList pFields1 = inProps1.getPartitioningFields(); FieldList pFields2 = inProps2.getPartitioningFields(); assertTrue("Inputs are not the same number of fields. Input 1: "+pFields1+", Input 2: "+pFields2, pFields1.size() == pFields2.size()); FieldList reqPFields1 = join.getKeysForInput1(); FieldList reqPFields2 = join.getKeysForInput2(); for(int i=0; i<pFields1.size(); i++) { // get fields int f1 = pFields1.get(i); int f2 = pFields2.get(i); // check that field positions in original key field list are identical int pos1 = getPosInFieldList(f1, reqPFields1); int pos2 = getPosInFieldList(f2, reqPFields2); if(pos1 < 0) { fail("Input 1 is partitioned on field "+f1+" which is not contained in the key set "+reqPFields1); } if(pos2 < 0) { fail("Input 2 is partitioned on field "+f2+" which is not contained in the key set "+reqPFields2); } if(pos1 != pos2) { fail("Inputs are not partitioned on the same key fields"); } } } else if(inProps1.getPartitioning() == PartitioningProperty.FULL_REPLICATION && inProps2.getPartitioning() == PartitioningProperty.RANDOM_PARTITIONED) { // we are good. No need to check for fields } else if(inProps1.getPartitioning() == PartitioningProperty.RANDOM_PARTITIONED && inProps2.getPartitioning() == PartitioningProperty.FULL_REPLICATION) { // we are good. No need to check for fields } else { throw new UnsupportedOperationException("This method has only been implemented to check for hash partitioned coGroupinputs"); } }
Example 18
Source File: PartitioningReusageTest.java From flink with Apache License 2.0 | 4 votes |
private void checkValidCoGroupInputProperties(DualInputPlanNode coGroup) { GlobalProperties inProps1 = coGroup.getInput1().getGlobalProperties(); GlobalProperties inProps2 = coGroup.getInput2().getGlobalProperties(); if(inProps1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED && inProps2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) { // check that both inputs are hash partitioned on the same fields FieldList pFields1 = inProps1.getPartitioningFields(); FieldList pFields2 = inProps2.getPartitioningFields(); assertTrue("Inputs are not the same number of fields. Input 1: "+pFields1+", Input 2: "+pFields2, pFields1.size() == pFields2.size()); FieldList reqPFields1 = coGroup.getKeysForInput1(); FieldList reqPFields2 = coGroup.getKeysForInput2(); for(int i=0; i<pFields1.size(); i++) { // get fields int f1 = pFields1.get(i); int f2 = pFields2.get(i); // check that field positions in original key field list are identical int pos1 = getPosInFieldList(f1, reqPFields1); int pos2 = getPosInFieldList(f2, reqPFields2); if(pos1 < 0) { fail("Input 1 is partitioned on field "+f1+" which is not contained in the key set "+reqPFields1); } if(pos2 < 0) { fail("Input 2 is partitioned on field "+f2+" which is not contained in the key set "+reqPFields2); } if(pos1 != pos2) { fail("Inputs are not partitioned on the same key fields"); } } } else { throw new UnsupportedOperationException("This method has only been implemented to check for hash partitioned coGroup inputs"); } }
Example 19
Source File: PartitioningReusageTest.java From flink with Apache License 2.0 | 4 votes |
private void checkValidJoinInputProperties(DualInputPlanNode join) { GlobalProperties inProps1 = join.getInput1().getGlobalProperties(); GlobalProperties inProps2 = join.getInput2().getGlobalProperties(); if(inProps1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED && inProps2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) { // check that both inputs are hash partitioned on the same fields FieldList pFields1 = inProps1.getPartitioningFields(); FieldList pFields2 = inProps2.getPartitioningFields(); assertTrue("Inputs are not the same number of fields. Input 1: "+pFields1+", Input 2: "+pFields2, pFields1.size() == pFields2.size()); FieldList reqPFields1 = join.getKeysForInput1(); FieldList reqPFields2 = join.getKeysForInput2(); for(int i=0; i<pFields1.size(); i++) { // get fields int f1 = pFields1.get(i); int f2 = pFields2.get(i); // check that field positions in original key field list are identical int pos1 = getPosInFieldList(f1, reqPFields1); int pos2 = getPosInFieldList(f2, reqPFields2); if(pos1 < 0) { fail("Input 1 is partitioned on field "+f1+" which is not contained in the key set "+reqPFields1); } if(pos2 < 0) { fail("Input 2 is partitioned on field "+f2+" which is not contained in the key set "+reqPFields2); } if(pos1 != pos2) { fail("Inputs are not partitioned on the same key fields"); } } } else if(inProps1.getPartitioning() == PartitioningProperty.FULL_REPLICATION && inProps2.getPartitioning() == PartitioningProperty.RANDOM_PARTITIONED) { // we are good. No need to check for fields } else if(inProps1.getPartitioning() == PartitioningProperty.RANDOM_PARTITIONED && inProps2.getPartitioning() == PartitioningProperty.FULL_REPLICATION) { // we are good. No need to check for fields } else { throw new UnsupportedOperationException("This method has only been implemented to check for hash partitioned coGroupinputs"); } }
Example 20
Source File: PartitioningReusageTest.java From flink with Apache License 2.0 | 4 votes |
private void checkValidCoGroupInputProperties(DualInputPlanNode coGroup) { GlobalProperties inProps1 = coGroup.getInput1().getGlobalProperties(); GlobalProperties inProps2 = coGroup.getInput2().getGlobalProperties(); if(inProps1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED && inProps2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) { // check that both inputs are hash partitioned on the same fields FieldList pFields1 = inProps1.getPartitioningFields(); FieldList pFields2 = inProps2.getPartitioningFields(); assertTrue("Inputs are not the same number of fields. Input 1: "+pFields1+", Input 2: "+pFields2, pFields1.size() == pFields2.size()); FieldList reqPFields1 = coGroup.getKeysForInput1(); FieldList reqPFields2 = coGroup.getKeysForInput2(); for(int i=0; i<pFields1.size(); i++) { // get fields int f1 = pFields1.get(i); int f2 = pFields2.get(i); // check that field positions in original key field list are identical int pos1 = getPosInFieldList(f1, reqPFields1); int pos2 = getPosInFieldList(f2, reqPFields2); if(pos1 < 0) { fail("Input 1 is partitioned on field "+f1+" which is not contained in the key set "+reqPFields1); } if(pos2 < 0) { fail("Input 2 is partitioned on field "+f2+" which is not contained in the key set "+reqPFields2); } if(pos1 != pos2) { fail("Inputs are not partitioned on the same key fields"); } } } else { throw new UnsupportedOperationException("This method has only been implemented to check for hash partitioned coGroup inputs"); } }