org.apache.flink.api.java.hadoop.mapreduce.utils.HadoopUtils Java Examples
The following examples show how to use
org.apache.flink.api.java.hadoop.mapreduce.utils.HadoopUtils.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HiveTableOutputFormat.java From flink with Apache License 2.0 | 6 votes |
public HiveTableOutputFormat(JobConf jobConf, ObjectPath tablePath, CatalogTable table, HiveTablePartition hiveTablePartition, Properties tableProperties, boolean overwrite) { super(jobConf.getCredentials()); Preconditions.checkNotNull(table, "table cannot be null"); Preconditions.checkNotNull(hiveTablePartition, "HiveTablePartition cannot be null"); Preconditions.checkNotNull(tableProperties, "Table properties cannot be null"); HadoopUtils.mergeHadoopConf(jobConf); this.jobConf = jobConf; this.tablePath = tablePath; this.partitionColumns = table.getPartitionKeys(); TableSchema tableSchema = table.getSchema(); this.fieldNames = tableSchema.getFieldNames(); this.fieldTypes = tableSchema.getFieldDataTypes(); this.hiveTablePartition = hiveTablePartition; this.tableProperties = tableProperties; this.overwrite = overwrite; isPartitioned = partitionColumns != null && !partitionColumns.isEmpty(); isDynamicPartition = isPartitioned && partitionColumns.size() > hiveTablePartition.getPartitionSpec().size(); hiveVersion = Preconditions.checkNotNull(jobConf.get(HiveCatalogValidator.CATALOG_HIVE_VERSION), "Hive version is not defined"); }
Example #2
Source File: HadoopInputFormatBase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public HadoopInputFormatBase(org.apache.hadoop.mapreduce.InputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, Job job) { super(Preconditions.checkNotNull(job, "Job can not be null").getCredentials()); this.mapreduceInputFormat = Preconditions.checkNotNull(mapreduceInputFormat); this.keyClass = Preconditions.checkNotNull(key); this.valueClass = Preconditions.checkNotNull(value); this.configuration = job.getConfiguration(); HadoopUtils.mergeHadoopConf(configuration); }
Example #3
Source File: HCatInputFormatBase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Creates a HCatInputFormat for the given database, table, and * {@link org.apache.hadoop.conf.Configuration}. * By default, the InputFormat returns {@link org.apache.hive.hcatalog.data.HCatRecord}. * The return type of the InputFormat can be changed to Flink-native tuples by calling * {@link HCatInputFormatBase#asFlinkTuples()}. * * @param database The name of the database to read from. * @param table The name of the table to read. * @param config The Configuration for the InputFormat. * @throws java.io.IOException */ public HCatInputFormatBase(String database, String table, Configuration config) throws IOException { super(); this.configuration = config; HadoopUtils.mergeHadoopConf(this.configuration); this.hCatInputFormat = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.setInput(this.configuration, database, table); this.outputSchema = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.getTableSchema(this.configuration); // configure output schema of HCatFormat configuration.set("mapreduce.lib.hcat.output.schema", HCatUtil.serialize(outputSchema)); // set type information this.resultType = new WritableTypeInfo(DefaultHCatRecord.class); }
Example #4
Source File: HadoopInputFormatBase.java From flink with Apache License 2.0 | 5 votes |
public HadoopInputFormatBase(org.apache.hadoop.mapreduce.InputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, Job job) { super(Preconditions.checkNotNull(job, "Job can not be null").getCredentials()); this.mapreduceInputFormat = Preconditions.checkNotNull(mapreduceInputFormat); this.keyClass = Preconditions.checkNotNull(key); this.valueClass = Preconditions.checkNotNull(value); this.configuration = job.getConfiguration(); HadoopUtils.mergeHadoopConf(configuration); }
Example #5
Source File: HCatInputFormatBase.java From flink with Apache License 2.0 | 5 votes |
/** * Creates a HCatInputFormat for the given database, table, and * {@link org.apache.hadoop.conf.Configuration}. * By default, the InputFormat returns {@link org.apache.hive.hcatalog.data.HCatRecord}. * The return type of the InputFormat can be changed to Flink-native tuples by calling * {@link HCatInputFormatBase#asFlinkTuples()}. * * @param database The name of the database to read from. * @param table The name of the table to read. * @param config The Configuration for the InputFormat. * @throws java.io.IOException */ public HCatInputFormatBase(String database, String table, Configuration config) throws IOException { super(); this.configuration = config; HadoopUtils.mergeHadoopConf(this.configuration); this.hCatInputFormat = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.setInput(this.configuration, database, table); this.outputSchema = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.getTableSchema(this.configuration); // configure output schema of HCatFormat configuration.set("mapreduce.lib.hcat.output.schema", HCatUtil.serialize(outputSchema)); // set type information this.resultType = new WritableTypeInfo(DefaultHCatRecord.class); }
Example #6
Source File: HadoopInputFormatBase.java From flink with Apache License 2.0 | 5 votes |
public HadoopInputFormatBase(org.apache.hadoop.mapreduce.InputFormat<K, V> mapreduceInputFormat, Class<K> key, Class<V> value, Job job) { super(Preconditions.checkNotNull(job, "Job can not be null").getCredentials()); this.mapreduceInputFormat = Preconditions.checkNotNull(mapreduceInputFormat); this.keyClass = Preconditions.checkNotNull(key); this.valueClass = Preconditions.checkNotNull(value); this.configuration = job.getConfiguration(); HadoopUtils.mergeHadoopConf(configuration); }
Example #7
Source File: HCatInputFormatBase.java From flink with Apache License 2.0 | 5 votes |
/** * Creates a HCatInputFormat for the given database, table, and * {@link org.apache.hadoop.conf.Configuration}. * By default, the InputFormat returns {@link org.apache.hive.hcatalog.data.HCatRecord}. * The return type of the InputFormat can be changed to Flink-native tuples by calling * {@link HCatInputFormatBase#asFlinkTuples()}. * * @param database The name of the database to read from. * @param table The name of the table to read. * @param config The Configuration for the InputFormat. * @throws java.io.IOException */ public HCatInputFormatBase(String database, String table, Configuration config) throws IOException { super(); this.configuration = config; HadoopUtils.mergeHadoopConf(this.configuration); this.hCatInputFormat = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.setInput(this.configuration, database, table); this.outputSchema = org.apache.hive.hcatalog.mapreduce.HCatInputFormat.getTableSchema(this.configuration); // configure output schema of HCatFormat configuration.set("mapreduce.lib.hcat.output.schema", HCatUtil.serialize(outputSchema)); // set type information this.resultType = new WritableTypeInfo(DefaultHCatRecord.class); }
Example #8
Source File: HadoopOutputFormatBase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
public HadoopOutputFormatBase(org.apache.hadoop.mapreduce.OutputFormat<K, V> mapreduceOutputFormat, Job job) { super(job.getCredentials()); this.mapreduceOutputFormat = mapreduceOutputFormat; this.configuration = job.getConfiguration(); HadoopUtils.mergeHadoopConf(configuration); }
Example #9
Source File: HadoopOutputFormatBase.java From flink with Apache License 2.0 | 4 votes |
public HadoopOutputFormatBase(org.apache.hadoop.mapreduce.OutputFormat<K, V> mapreduceOutputFormat, Job job) { super(job.getCredentials()); this.mapreduceOutputFormat = mapreduceOutputFormat; this.configuration = job.getConfiguration(); HadoopUtils.mergeHadoopConf(configuration); }
Example #10
Source File: HadoopOutputFormatBase.java From flink with Apache License 2.0 | 4 votes |
public HadoopOutputFormatBase(org.apache.hadoop.mapreduce.OutputFormat<K, V> mapreduceOutputFormat, Job job) { super(job.getCredentials()); this.mapreduceOutputFormat = mapreduceOutputFormat; this.configuration = job.getConfiguration(); HadoopUtils.mergeHadoopConf(configuration); }