Java Code Examples for org.apache.flink.table.api.DataTypes#STRING
The following examples show how to use
org.apache.flink.table.api.DataTypes#STRING .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaSourceMain.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment blinkStreamEnv = StreamExecutionEnvironment.getExecutionEnvironment(); blinkStreamEnv.setParallelism(1); EnvironmentSettings blinkStreamSettings = EnvironmentSettings.newInstance() .useBlinkPlanner() .inStreamingMode() .build(); StreamTableEnvironment blinkStreamTableEnv = StreamTableEnvironment.create(blinkStreamEnv, blinkStreamSettings); ParameterTool parameterTool = ExecutionEnvUtil.PARAMETER_TOOL; Properties properties = KafkaConfigUtil.buildKafkaProps(parameterTool); DataStream<String> dataStream = blinkStreamEnv.addSource(new FlinkKafkaConsumer011<>(parameterTool.get("kafka.topic"), new SimpleStringSchema(), properties)); Table table = blinkStreamTableEnv.fromDataStream(dataStream, "word"); blinkStreamTableEnv.registerTable("kafkaDataStream", table); RetractStreamTableSink<Row> retractStreamTableSink = new MyRetractStreamTableSink(new String[]{"_count", "word"}, new DataType[]{DataTypes.BIGINT(), DataTypes.STRING()}); blinkStreamTableEnv.registerTableSink("sinkTable", retractStreamTableSink); Table wordCount = blinkStreamTableEnv.sqlQuery("SELECT count(word) AS _count,word FROM kafkaDataStream GROUP BY word"); wordCount.insertInto("sinkTable"); blinkStreamTableEnv.execute("Blink Kafka Table Source"); }
Example 2
Source File: CustomKafkaSourceMain.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment blinkStreamEnv = StreamExecutionEnvironment.getExecutionEnvironment(); blinkStreamEnv.setParallelism(1); EnvironmentSettings blinkStreamSettings = EnvironmentSettings.newInstance() .useBlinkPlanner() .inStreamingMode() .build(); StreamTableEnvironment blinkStreamTableEnv = StreamTableEnvironment.create(blinkStreamEnv, blinkStreamSettings); blinkStreamTableEnv.registerTableSource("kafkaDataStream", new MyKafkaTableSource(ExecutionEnvUtil.PARAMETER_TOOL)); RetractStreamTableSink<Row> retractStreamTableSink = new MyRetractStreamTableSink(new String[]{"_count", "word"}, new DataType[]{DataTypes.BIGINT(), DataTypes.STRING()}); blinkStreamTableEnv.registerTableSink("sinkTable", retractStreamTableSink); Table wordCount = blinkStreamTableEnv.sqlQuery("SELECT count(word) AS _count,word FROM kafkaDataStream GROUP BY word"); wordCount.insertInto("sinkTable"); blinkStreamTableEnv.execute("Blink Custom Kafka Table Source"); }
Example 3
Source File: HiveCatalogDataTypeTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testDataTypes() throws Exception { DataType[] types = new DataType[] { DataTypes.TINYINT(), DataTypes.SMALLINT(), DataTypes.INT(), DataTypes.BIGINT(), DataTypes.FLOAT(), DataTypes.DOUBLE(), DataTypes.BOOLEAN(), DataTypes.STRING(), DataTypes.BYTES(), DataTypes.DATE(), DataTypes.TIMESTAMP(), DataTypes.CHAR(HiveChar.MAX_CHAR_LENGTH), DataTypes.VARCHAR(HiveVarchar.MAX_VARCHAR_LENGTH), DataTypes.DECIMAL(5, 3) }; verifyDataTypes(types); }
Example 4
Source File: HiveGenericUDTFTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testSplit() throws Exception { Object[] constantArgs = new Object[] { null }; DataType[] dataTypes = new DataType[] { DataTypes.STRING() }; HiveGenericUDTF udf = init( TestSplitUDTF.class, constantArgs, dataTypes ); udf.eval("1,2,3,5"); assertEquals(Arrays.asList(Row.of("1"), Row.of("2"), Row.of("3"), Row.of("5")), collector.result); }
Example 5
Source File: HiveModuleTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testHiveBuiltInFunction() { FunctionDefinition fd = new HiveModule().getFunctionDefinition("reverse").get(); ScalarFunction func = ((ScalarFunctionDefinition) fd).getScalarFunction(); HiveSimpleUDF udf = (HiveSimpleUDF) func; DataType[] inputType = new DataType[] { DataTypes.STRING() }; udf.setArgumentTypesAndConstants(new Object[0], inputType); udf.getHiveResultType(new Object[0], inputType); udf.open(null); assertEquals("cba", udf.eval("abc")); }
Example 6
Source File: HiveCatalogDataTypeTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testDataTypes() throws Exception { DataType[] types = new DataType[] { DataTypes.TINYINT(), DataTypes.SMALLINT(), DataTypes.INT(), DataTypes.BIGINT(), DataTypes.FLOAT(), DataTypes.DOUBLE(), DataTypes.BOOLEAN(), DataTypes.STRING(), DataTypes.BYTES(), DataTypes.DATE(), DataTypes.TIMESTAMP(9), DataTypes.CHAR(HiveChar.MAX_CHAR_LENGTH), DataTypes.VARCHAR(HiveVarchar.MAX_VARCHAR_LENGTH), DataTypes.DECIMAL(5, 3) }; verifyDataTypes(types); }
Example 7
Source File: KafkaSourceMain.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment blinkStreamEnv = StreamExecutionEnvironment.getExecutionEnvironment(); blinkStreamEnv.setParallelism(1); EnvironmentSettings blinkStreamSettings = EnvironmentSettings.newInstance() .useBlinkPlanner() .inStreamingMode() .build(); StreamTableEnvironment blinkStreamTableEnv = StreamTableEnvironment.create(blinkStreamEnv, blinkStreamSettings); ParameterTool parameterTool = ExecutionEnvUtil.PARAMETER_TOOL; Properties properties = KafkaConfigUtil.buildKafkaProps(parameterTool); DataStream<String> dataStream = blinkStreamEnv.addSource(new FlinkKafkaConsumer011<>(parameterTool.get("kafka.topic"), new SimpleStringSchema(), properties)); Table table = blinkStreamTableEnv.fromDataStream(dataStream, "word"); blinkStreamTableEnv.registerTable("kafkaDataStream", table); RetractStreamTableSink<Row> retractStreamTableSink = new MyRetractStreamTableSink(new String[]{"_count", "word"}, new DataType[]{DataTypes.BIGINT(), DataTypes.STRING()}); blinkStreamTableEnv.registerTableSink("sinkTable", retractStreamTableSink); Table wordCount = blinkStreamTableEnv.sqlQuery("SELECT count(word) AS _count,word FROM kafkaDataStream GROUP BY word"); wordCount.insertInto("sinkTable"); blinkStreamTableEnv.execute("Blink Kafka Table Source"); }
Example 8
Source File: SchemaUtils.java From pulsar-flink with Apache License 2.0 | 5 votes |
public static DataType si2SqlType(SchemaInfo si) throws IncompatibleSchemaException { switch (si.getType()) { case NONE: case BYTES: return DataTypes.BYTES(); case BOOLEAN: return DataTypes.BOOLEAN(); case DATE: return DataTypes.DATE(); case STRING: return DataTypes.STRING(); case TIMESTAMP: return DataTypes.TIMESTAMP(3).bridgedTo(java.sql.Timestamp.class); case INT8: return DataTypes.TINYINT(); case DOUBLE: return DataTypes.DOUBLE(); case FLOAT: return DataTypes.FLOAT(); case INT32: return DataTypes.INT(); case INT64: return DataTypes.BIGINT(); case INT16: return DataTypes.SMALLINT(); case AVRO: case JSON: Schema avroSchema = new Schema.Parser().parse(new String(si.getSchema(), StandardCharsets.UTF_8)); return avro2SqlType(avroSchema, Collections.emptySet()); default: throw new UnsupportedOperationException(String.format("We do not support %s currently.", si.getType())); } }
Example 9
Source File: KuduTypeUtils.java From bahir-flink with Apache License 2.0 | 5 votes |
public static DataType toFlinkType(Type type, ColumnTypeAttributes typeAttributes) { switch (type) { case STRING: return DataTypes.STRING(); case FLOAT: return DataTypes.FLOAT(); case INT8: return DataTypes.TINYINT(); case INT16: return DataTypes.SMALLINT(); case INT32: return DataTypes.INT(); case INT64: return DataTypes.BIGINT(); case DOUBLE: return DataTypes.DOUBLE(); case DECIMAL: return DataTypes.DECIMAL(typeAttributes.getPrecision(), typeAttributes.getScale()); case BOOL: return DataTypes.BOOLEAN(); case BINARY: return DataTypes.BYTES(); case UNIXTIME_MICROS: return new AtomicDataType(new TimestampType(3), Timestamp.class); default: throw new IllegalArgumentException("Illegal var type: " + type); } }
Example 10
Source File: HiveTypeUtil.java From flink with Apache License 2.0 | 5 votes |
private static DataType toFlinkPrimitiveType(PrimitiveTypeInfo hiveType) { checkNotNull(hiveType, "hiveType cannot be null"); switch (hiveType.getPrimitiveCategory()) { case CHAR: return DataTypes.CHAR(((CharTypeInfo) hiveType).getLength()); case VARCHAR: return DataTypes.VARCHAR(((VarcharTypeInfo) hiveType).getLength()); case STRING: return DataTypes.STRING(); case BOOLEAN: return DataTypes.BOOLEAN(); case BYTE: return DataTypes.TINYINT(); case SHORT: return DataTypes.SMALLINT(); case INT: return DataTypes.INT(); case LONG: return DataTypes.BIGINT(); case FLOAT: return DataTypes.FLOAT(); case DOUBLE: return DataTypes.DOUBLE(); case DATE: return DataTypes.DATE(); case TIMESTAMP: return DataTypes.TIMESTAMP(); case BINARY: return DataTypes.BYTES(); case DECIMAL: DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) hiveType; return DataTypes.DECIMAL(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale()); default: throw new UnsupportedOperationException( String.format("Flink doesn't support Hive primitive type %s yet", hiveType)); } }
Example 11
Source File: HiveGenericUDTFTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testStack() throws Exception { Object[] constantArgs = new Object[] { 2, null, null, null, null }; DataType[] dataTypes = new DataType[] { DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING(), DataTypes.STRING(), DataTypes.STRING() }; HiveGenericUDTF udf = init( GenericUDTFStack.class, constantArgs, dataTypes ); udf.eval(2, "a", "b", "c", "d"); assertEquals(Arrays.asList(Row.of("a", "b"), Row.of("c", "d")), collector.result); }
Example 12
Source File: HiveGenericUDTFTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testStack() throws Exception { Object[] constantArgs = new Object[] { 2, null, null, null, null }; DataType[] dataTypes = new DataType[] { DataTypes.INT(), DataTypes.STRING(), DataTypes.STRING(), DataTypes.STRING(), DataTypes.STRING() }; HiveGenericUDTF udf = init( GenericUDTFStack.class, constantArgs, dataTypes ); udf.eval(2, "a", "b", "c", "d"); assertEquals(Arrays.asList(Row.of("a", "b"), Row.of("c", "d")), collector.result); }
Example 13
Source File: HiveTypeUtil.java From flink with Apache License 2.0 | 5 votes |
private static DataType toFlinkPrimitiveType(PrimitiveTypeInfo hiveType) { checkNotNull(hiveType, "hiveType cannot be null"); switch (hiveType.getPrimitiveCategory()) { case CHAR: return DataTypes.CHAR(((CharTypeInfo) hiveType).getLength()); case VARCHAR: return DataTypes.VARCHAR(((VarcharTypeInfo) hiveType).getLength()); case STRING: return DataTypes.STRING(); case BOOLEAN: return DataTypes.BOOLEAN(); case BYTE: return DataTypes.TINYINT(); case SHORT: return DataTypes.SMALLINT(); case INT: return DataTypes.INT(); case LONG: return DataTypes.BIGINT(); case FLOAT: return DataTypes.FLOAT(); case DOUBLE: return DataTypes.DOUBLE(); case DATE: return DataTypes.DATE(); case TIMESTAMP: return DataTypes.TIMESTAMP(9); case BINARY: return DataTypes.BYTES(); case DECIMAL: DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) hiveType; return DataTypes.DECIMAL(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale()); default: throw new UnsupportedOperationException( String.format("Flink doesn't support Hive primitive type %s yet", hiveType)); } }
Example 14
Source File: PostgresCatalog.java From flink with Apache License 2.0 | 4 votes |
/** * Converts Postgres type to Flink {@link DataType}. * * @see org.postgresql.jdbc.TypeInfoCache */ private DataType fromJDBCType(ResultSetMetaData metadata, int colIndex) throws SQLException { String pgType = metadata.getColumnTypeName(colIndex); int precision = metadata.getPrecision(colIndex); int scale = metadata.getScale(colIndex); switch (pgType) { case PG_BOOLEAN: return DataTypes.BOOLEAN(); case PG_BOOLEAN_ARRAY: return DataTypes.ARRAY(DataTypes.BOOLEAN()); case PG_BYTEA: return DataTypes.BYTES(); case PG_BYTEA_ARRAY: return DataTypes.ARRAY(DataTypes.BYTES()); case PG_SMALLINT: return DataTypes.SMALLINT(); case PG_SMALLINT_ARRAY: return DataTypes.ARRAY(DataTypes.SMALLINT()); case PG_INTEGER: case PG_SERIAL: return DataTypes.INT(); case PG_INTEGER_ARRAY: return DataTypes.ARRAY(DataTypes.INT()); case PG_BIGINT: case PG_BIGSERIAL: return DataTypes.BIGINT(); case PG_BIGINT_ARRAY: return DataTypes.ARRAY(DataTypes.BIGINT()); case PG_REAL: return DataTypes.FLOAT(); case PG_REAL_ARRAY: return DataTypes.ARRAY(DataTypes.FLOAT()); case PG_DOUBLE_PRECISION: return DataTypes.DOUBLE(); case PG_DOUBLE_PRECISION_ARRAY: return DataTypes.ARRAY(DataTypes.DOUBLE()); case PG_NUMERIC: // see SPARK-26538: handle numeric without explicit precision and scale. if (precision > 0) { return DataTypes.DECIMAL(precision, metadata.getScale(colIndex)); } return DataTypes.DECIMAL(DecimalType.MAX_PRECISION, 18); case PG_NUMERIC_ARRAY: // see SPARK-26538: handle numeric without explicit precision and scale. if (precision > 0) { return DataTypes.ARRAY(DataTypes.DECIMAL(precision, metadata.getScale(colIndex))); } return DataTypes.ARRAY(DataTypes.DECIMAL(DecimalType.MAX_PRECISION, 18)); case PG_CHAR: case PG_CHARACTER: return DataTypes.CHAR(precision); case PG_CHAR_ARRAY: case PG_CHARACTER_ARRAY: return DataTypes.ARRAY(DataTypes.CHAR(precision)); case PG_CHARACTER_VARYING: return DataTypes.VARCHAR(precision); case PG_CHARACTER_VARYING_ARRAY: return DataTypes.ARRAY(DataTypes.VARCHAR(precision)); case PG_TEXT: return DataTypes.STRING(); case PG_TEXT_ARRAY: return DataTypes.ARRAY(DataTypes.STRING()); case PG_TIMESTAMP: return DataTypes.TIMESTAMP(scale); case PG_TIMESTAMP_ARRAY: return DataTypes.ARRAY(DataTypes.TIMESTAMP(scale)); case PG_TIMESTAMPTZ: return DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(scale); case PG_TIMESTAMPTZ_ARRAY: return DataTypes.ARRAY(DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(scale)); case PG_TIME: return DataTypes.TIME(scale); case PG_TIME_ARRAY: return DataTypes.ARRAY(DataTypes.TIME(scale)); case PG_DATE: return DataTypes.DATE(); case PG_DATE_ARRAY: return DataTypes.ARRAY(DataTypes.DATE()); default: throw new UnsupportedOperationException( String.format("Doesn't support Postgres type '%s' yet", pgType)); } }
Example 15
Source File: SingleValueAggFunction.java From flink with Apache License 2.0 | 4 votes |
@Override public DataType getResultType() { return DataTypes.STRING(); }
Example 16
Source File: SingleValueAggFunction.java From flink with Apache License 2.0 | 4 votes |
@Override public DataType getResultType() { return DataTypes.STRING(); }
Example 17
Source File: LeadLagAggFunction.java From flink with Apache License 2.0 | 4 votes |
@Override public DataType getResultType() { return DataTypes.STRING(); }
Example 18
Source File: MinAggFunction.java From flink with Apache License 2.0 | 4 votes |
@Override public DataType getResultType() { return DataTypes.STRING(); }
Example 19
Source File: MyKafkaTableSource.java From flink-learning with Apache License 2.0 | 4 votes |
@Override public DataType getProducedDataType() { return DataTypes.STRING(); //不能少,否则报错 }
Example 20
Source File: MaxAggFunction.java From flink with Apache License 2.0 | 4 votes |
@Override public DataType getResultType() { return DataTypes.STRING(); }