Java Code Examples for org.apache.hadoop.mapred.JobConf#readFields()
The following examples show how to use
org.apache.hadoop.mapred.JobConf#readFields() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HadoopInputSplit.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); // the job conf knows how to deserialize itself jobConf = new JobConf(); jobConf.readFields(in); try { hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType); } catch (Exception e) { throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e); } if (hadoopInputSplit instanceof Configurable) { ((Configurable) hadoopInputSplit).setConf(this.jobConf); } else if (hadoopInputSplit instanceof JobConfigurable) { ((JobConfigurable) hadoopInputSplit).configure(this.jobConf); } hadoopInputSplit.readFields(in); }
Example 2
Source File: HadoopInputSplit.java From flink with Apache License 2.0 | 6 votes |
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); // the job conf knows how to deserialize itself jobConf = new JobConf(); jobConf.readFields(in); try { hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType); } catch (Exception e) { throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e); } if (hadoopInputSplit instanceof Configurable) { ((Configurable) hadoopInputSplit).setConf(this.jobConf); } else if (hadoopInputSplit instanceof JobConfigurable) { ((JobConfigurable) hadoopInputSplit).configure(this.jobConf); } hadoopInputSplit.readFields(in); }
Example 3
Source File: HiveWarehouseDataReaderFactory.java From spark-llap with Apache License 2.0 | 6 votes |
@Override public DataReader<ColumnarBatch> createDataReader() { LlapInputSplit llapInputSplit = new LlapInputSplit(); ByteArrayInputStream splitByteArrayStream = new ByteArrayInputStream(splitBytes); ByteArrayInputStream confByteArrayStream = new ByteArrayInputStream(confBytes); JobConf conf = new JobConf(); try(DataInputStream splitByteData = new DataInputStream(splitByteArrayStream); DataInputStream confByteData = new DataInputStream(confByteArrayStream)) { llapInputSplit.readFields(splitByteData); conf.readFields(confByteData); return getDataReader(llapInputSplit, conf, arrowAllocatorMax); } catch (Exception e) { throw new RuntimeException(e); } }
Example 4
Source File: HadoopInputSplit.java From flink with Apache License 2.0 | 6 votes |
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); try { hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType); } catch (Exception e) { throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e); } if (needsJobConf(hadoopInputSplit)) { // the job conf knows how to deserialize itself jobConf = new JobConf(); jobConf.readFields(in); if (hadoopInputSplit instanceof Configurable) { ((Configurable) hadoopInputSplit).setConf(this.jobConf); } else if (hadoopInputSplit instanceof JobConfigurable) { ((JobConfigurable) hadoopInputSplit).configure(this.jobConf); } } hadoopInputSplit.readFields(in); }
Example 5
Source File: HadoopReduceCombineFunction.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException { Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> reducerClass = (Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject(); reducer = InstantiationUtil.instantiate(reducerClass); Class<Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN>> combinerClass = (Class<Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN>>) in.readObject(); combiner = InstantiationUtil.instantiate(combinerClass); jobConf = new JobConf(); jobConf.readFields(in); }
Example 6
Source File: HadoopReduceFunction.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException { Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> reducerClass = (Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject(); reducer = InstantiationUtil.instantiate(reducerClass); jobConf = new JobConf(); jobConf.readFields(in); }
Example 7
Source File: HadoopMapFunction.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException { Class<Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> mapperClass = (Class<Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject(); mapper = InstantiationUtil.instantiate(mapperClass); jobConf = new JobConf(); jobConf.readFields(in); }
Example 8
Source File: HadoopReduceCombineFunction.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException { Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> reducerClass = (Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject(); reducer = InstantiationUtil.instantiate(reducerClass); Class<Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN>> combinerClass = (Class<Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN>>) in.readObject(); combiner = InstantiationUtil.instantiate(combinerClass); jobConf = new JobConf(); jobConf.readFields(in); }
Example 9
Source File: HadoopReduceFunction.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException { Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> reducerClass = (Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject(); reducer = InstantiationUtil.instantiate(reducerClass); jobConf = new JobConf(); jobConf.readFields(in); }
Example 10
Source File: HadoopMapFunction.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException { Class<Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> mapperClass = (Class<Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject(); mapper = InstantiationUtil.instantiate(mapperClass); jobConf = new JobConf(); jobConf.readFields(in); }
Example 11
Source File: HadoopV2TaskContext.java From ignite with Apache License 2.0 | 5 votes |
/** * @param taskInfo Task info. * @param job Job. * @param jobId Job ID. * @param locNodeId Local node ID. * @param jobConfDataInput DataInput for read JobConf. */ public HadoopV2TaskContext(HadoopTaskInfo taskInfo, HadoopJobEx job, HadoopJobId jobId, @Nullable UUID locNodeId, DataInput jobConfDataInput) throws IgniteCheckedException { super(taskInfo, job); this.locNodeId = locNodeId; // Before create JobConf instance we should set new context class loader. ClassLoader oldLdr = HadoopCommonUtils.setContextClassLoader(getClass().getClassLoader()); try { JobConf jobConf = new JobConf(); try { jobConf.readFields(jobConfDataInput); } catch (IOException e) { throw new IgniteCheckedException(e); } // For map-reduce jobs prefer local writes. jobConf.setBooleanIfUnset(PARAM_IGFS_PREFER_LOCAL_WRITES, true); initializePartiallyRawComparator(jobConf); jobCtx = new JobContextImpl(jobConf, new JobID(jobId.globalId().toString(), jobId.localId())); useNewMapper = jobConf.getUseNewMapper(); useNewReducer = jobConf.getUseNewReducer(); useNewCombiner = jobConf.getCombinerClass() == null; } finally { HadoopCommonUtils.restoreContextClassLoader(oldLdr); } }
Example 12
Source File: HadoopReduceCombineFunction.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException { Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> reducerClass = (Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject(); reducer = InstantiationUtil.instantiate(reducerClass); Class<Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN>> combinerClass = (Class<Reducer<KEYIN, VALUEIN, KEYIN, VALUEIN>>) in.readObject(); combiner = InstantiationUtil.instantiate(combinerClass); jobConf = new JobConf(); jobConf.readFields(in); }
Example 13
Source File: HadoopReduceFunction.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException { Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> reducerClass = (Class<Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject(); reducer = InstantiationUtil.instantiate(reducerClass); jobConf = new JobConf(); jobConf.readFields(in); }
Example 14
Source File: HadoopMapFunction.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException { Class<Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>> mapperClass = (Class<Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT>>) in.readObject(); mapper = InstantiationUtil.instantiate(mapperClass); jobConf = new JobConf(); jobConf.readFields(in); }
Example 15
Source File: KryoSerializer.java From spork with Apache License 2.0 | 5 votes |
public static JobConf deserializeJobConf(byte[] buffer) { JobConf conf = new JobConf(); try { conf.readFields(new DataInputStream(new ByteArrayInputStream(buffer))); } catch (IOException e) { LOG.error("Error de-serializing job configuration"); return null; } return conf; }