Java Code Examples for org.apache.hadoop.io.NullWritable#get()
The following examples show how to use
org.apache.hadoop.io.NullWritable#get() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFileOutputCommitter.java From hadoop with Apache License 2.0 | 6 votes |
private void writeOutput(RecordWriter theRecordWriter, TaskAttemptContext context) throws IOException, InterruptedException { NullWritable nullWritable = NullWritable.get(); try { theRecordWriter.write(key1, val1); theRecordWriter.write(null, nullWritable); theRecordWriter.write(null, val1); theRecordWriter.write(nullWritable, val2); theRecordWriter.write(key2, nullWritable); theRecordWriter.write(key1, null); theRecordWriter.write(null, null); theRecordWriter.write(key2, val2); } finally { theRecordWriter.close(null); } }
Example 2
Source File: TestFileOutputCommitter.java From hadoop with Apache License 2.0 | 6 votes |
private void writeOutput(RecordWriter theRecordWriter, TaskAttemptContext context) throws IOException, InterruptedException { NullWritable nullWritable = NullWritable.get(); try { theRecordWriter.write(key1, val1); theRecordWriter.write(null, nullWritable); theRecordWriter.write(null, val1); theRecordWriter.write(nullWritable, val2); theRecordWriter.write(key2, nullWritable); theRecordWriter.write(key1, null); theRecordWriter.write(null, null); theRecordWriter.write(key2, val2); } finally { theRecordWriter.close(context); } }
Example 3
Source File: WritableSerializer.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Override public T createInstance() { if (typeClass == NullWritable.class) { return (T) NullWritable.get(); } return InstantiationUtil.instantiate(typeClass); }
Example 4
Source File: InputSampler.java From hadoop with Apache License 2.0 | 5 votes |
/** * Write a partition file for the given job, using the Sampler provided. * Queries the sampler for a sample keyset, sorts by the output key * comparator, selects the keys for each rank, and writes to the destination * returned from {@link TotalOrderPartitioner#getPartitionFile}. */ @SuppressWarnings("unchecked") // getInputFormat, getOutputKeyComparator public static <K,V> void writePartitionFile(Job job, Sampler<K,V> sampler) throws IOException, ClassNotFoundException, InterruptedException { Configuration conf = job.getConfiguration(); final InputFormat inf = ReflectionUtils.newInstance(job.getInputFormatClass(), conf); int numPartitions = job.getNumReduceTasks(); K[] samples = (K[])sampler.getSample(inf, job); LOG.info("Using " + samples.length + " samples"); RawComparator<K> comparator = (RawComparator<K>) job.getSortComparator(); Arrays.sort(samples, comparator); Path dst = new Path(TotalOrderPartitioner.getPartitionFile(conf)); FileSystem fs = dst.getFileSystem(conf); if (fs.exists(dst)) { fs.delete(dst, false); } SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, dst, job.getMapOutputKeyClass(), NullWritable.class); NullWritable nullValue = NullWritable.get(); float stepSize = samples.length / (float) numPartitions; int last = -1; for(int i = 1; i < numPartitions; ++i) { int k = Math.round(stepSize * i); while (last >= k && comparator.compare(samples[last], samples[k]) == 0) { ++k; } writer.append(samples[k], nullValue); last = k; } writer.close(); }
Example 5
Source File: CompositeRecordReader.java From hadoop with Apache License 2.0 | 5 votes |
/** * Create a new key common to all child RRs. * @throws ClassCastException if key classes differ. */ @SuppressWarnings("unchecked") protected K createKey() { if (keyclass == null || keyclass.equals(NullWritable.class)) { return (K) NullWritable.get(); } return (K) ReflectionUtils.newInstance(keyclass, getConf()); }
Example 6
Source File: TestGridMixClasses.java From hadoop with Apache License 2.0 | 5 votes |
@Override public DataInputBuffer getValue() throws IOException { ByteArrayOutputStream dt = new ByteArrayOutputStream(); NullWritable key = NullWritable.get(); key.write(new DataOutputStream(dt)); DataInputBuffer result = new DataInputBuffer(); byte[] b = dt.toByteArray(); result.reset(b, 0, b.length); return result; }
Example 7
Source File: WrappedRecordReader.java From hadoop with Apache License 2.0 | 5 votes |
/** * Request new key from proxied RR. */ @SuppressWarnings("unchecked") public K createKey() { if (keyclass != null) { return (K) ReflectionUtils.newInstance(keyclass, conf); } return (K) NullWritable.get(); }
Example 8
Source File: TestRecovery.java From hadoop with Apache License 2.0 | 5 votes |
private void writeBadOutput(TaskAttempt attempt, Configuration conf) throws Exception { TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, TypeConverter.fromYarn(attempt.getID())); TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat(); RecordWriter theRecordWriter = theOutputFormat .getRecordWriter(tContext); NullWritable nullWritable = NullWritable.get(); try { theRecordWriter.write(key2, val2); theRecordWriter.write(null, nullWritable); theRecordWriter.write(null, val2); theRecordWriter.write(nullWritable, val1); theRecordWriter.write(key1, nullWritable); theRecordWriter.write(key2, null); theRecordWriter.write(null, null); theRecordWriter.write(key1, val1); } finally { theRecordWriter.close(tContext); } OutputFormat outputFormat = ReflectionUtils.newInstance( tContext.getOutputFormatClass(), conf); OutputCommitter committer = outputFormat.getOutputCommitter(tContext); committer.commitTask(tContext); }
Example 9
Source File: TestTotalOrderPartitioner.java From hadoop with Apache License 2.0 | 5 votes |
public void testTotalOrderCustomComparator() throws Exception { TotalOrderPartitioner<Text,NullWritable> partitioner = new TotalOrderPartitioner<Text,NullWritable>(); Configuration conf = new Configuration(); Text[] revSplitStrings = Arrays.copyOf(splitStrings, splitStrings.length); Arrays.sort(revSplitStrings, new ReverseStringComparator()); Path p = TestTotalOrderPartitioner.<Text>writePartitionFile( "totalordercustomcomparator", conf, revSplitStrings); conf.setBoolean(TotalOrderPartitioner.NATURAL_ORDER, false); conf.setClass(MRJobConfig.MAP_OUTPUT_KEY_CLASS, Text.class, Object.class); conf.setClass(MRJobConfig.KEY_COMPARATOR, ReverseStringComparator.class, RawComparator.class); ArrayList<Check<Text>> revCheck = new ArrayList<Check<Text>>(); revCheck.add(new Check<Text>(new Text("aaaaa"), 9)); revCheck.add(new Check<Text>(new Text("aaabb"), 9)); revCheck.add(new Check<Text>(new Text("aabbb"), 9)); revCheck.add(new Check<Text>(new Text("aaaaa"), 9)); revCheck.add(new Check<Text>(new Text("babbb"), 8)); revCheck.add(new Check<Text>(new Text("baabb"), 8)); revCheck.add(new Check<Text>(new Text("yai"), 1)); revCheck.add(new Check<Text>(new Text("yak"), 1)); revCheck.add(new Check<Text>(new Text("z"), 0)); revCheck.add(new Check<Text>(new Text("ddngo"), 4)); revCheck.add(new Check<Text>(new Text("hi"), 3)); try { partitioner.setConf(conf); NullWritable nw = NullWritable.get(); for (Check<Text> chk : revCheck) { assertEquals(chk.data.toString(), chk.part, partitioner.getPartition(chk.data, nw, splitStrings.length + 1)); } } finally { p.getFileSystem(conf).delete(p, true); } }
Example 10
Source File: WrappedRecordReader.java From hadoop with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public U createValue() { if (valueclass != null) { return (U) ReflectionUtils.newInstance(valueclass, conf); } return (U) NullWritable.get(); }
Example 11
Source File: SparkUHCDictionary.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
@Override public Tuple2<String, Tuple3<Writable, Writable, String>> call(Tuple2<Integer, List<String>> columnValues) throws Exception { if (initialized == false) { synchronized (SparkFactDistinct.class) { if (initialized == false) { init(); } } } try (KylinConfig.SetAndUnsetThreadLocalConfig autoUnset = KylinConfig.setAndUnsetThreadLocalConfig(config); ByteArrayOutputStream baos = new ByteArrayOutputStream(); DataOutputStream outputStream = new DataOutputStream(baos)) { TblColRef col = uhcColumns.get(columnValues._1); logger.info("Processing column " + col.getName()); if (cube.getDescriptor().getShardByColumns().contains(col)) { //for ShardByColumns builder = DictionaryGenerator.newDictionaryBuilder(col.getType()); builder.init(null, 0, null); } else { //for GlobalDictionaryColumns DictionaryInfo dictionaryInfo = new DictionaryInfo(col.getColumnDesc(), col.getDatatype()); String builderClass = cubeDesc.getDictionaryBuilderClass(col); builder = (IDictionaryBuilder) ClassUtil.newInstance(builderClass); builder.init(dictionaryInfo, 0, hdfsDir); } Iterator<String> values = columnValues._2.iterator(); while (values.hasNext()) { builder.addValue(values.next()); } Dictionary<String> dict = builder.build(); String dictFileName = col.getIdentity() + "/" + col.getName() + DICT_FILE_POSTFIX; logger.info("Dictionary file name is " + dictFileName); outputStream.writeUTF(dict.getClass().getName()); dict.write(outputStream); Tuple3 tuple3 = new Tuple3(NullWritable.get(), new ArrayPrimitiveWritable(baos.toByteArray()), dictFileName); return new Tuple2<>(BatchConstants.CFG_OUTPUT_DICT, tuple3); } }
Example 12
Source File: WritableSerializer.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Override public T createInstance() { if (typeClass == NullWritable.class) { return (T) NullWritable.get(); } return InstantiationUtil.instantiate(typeClass); }
Example 13
Source File: MDSHiveDirectVectorizedReader.java From multiple-dimension-spread with Apache License 2.0 | 4 votes |
@Override public NullWritable createKey(){ return NullWritable.get(); }
Example 14
Source File: MySQLDumpInputFormat.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 4 votes |
@Override public NullWritable getCurrentValue() { return NullWritable.get(); }
Example 15
Source File: MDSHiveLineReader.java From multiple-dimension-spread with Apache License 2.0 | 4 votes |
@Override public NullWritable createKey(){ return NullWritable.get(); }
Example 16
Source File: MyRecordReader.java From MapReduce-Demo with MIT License | 4 votes |
@Override public NullWritable getCurrentKey() throws IOException, InterruptedException { // 获取当前key,因为合并文件,我们应该将文件对象付给value,key赋空即可 return NullWritable.get(); }
Example 17
Source File: KeyOnlyTextOutputReader.java From hadoop with Apache License 2.0 | 4 votes |
@Override public NullWritable getCurrentValue() throws IOException { return NullWritable.get(); }
Example 18
Source File: TestTextOutputFormat.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testFormat() throws Exception { JobConf job = new JobConf(); job.set(JobContext.TASK_ATTEMPT_ID, attempt); FileOutputFormat.setOutputPath(job, workDir.getParent().getParent()); FileOutputFormat.setWorkOutputPath(job, workDir); FileSystem fs = workDir.getFileSystem(job); if (!fs.mkdirs(workDir)) { fail("Failed to create output directory"); } String file = "test_format.txt"; // A reporter that does nothing Reporter reporter = Reporter.NULL; TextOutputFormat<Object,Object> theOutputFormat = new TextOutputFormat<Object,Object>(); RecordWriter<Object,Object> theRecordWriter = theOutputFormat.getRecordWriter(localFs, job, file, reporter); Text key1 = new Text("key1"); Text key2 = new Text("key2"); Text val1 = new Text("val1"); Text val2 = new Text("val2"); NullWritable nullWritable = NullWritable.get(); try { theRecordWriter.write(key1, val1); theRecordWriter.write(null, nullWritable); theRecordWriter.write(null, val1); theRecordWriter.write(nullWritable, val2); theRecordWriter.write(key2, nullWritable); theRecordWriter.write(key1, null); theRecordWriter.write(null, null); theRecordWriter.write(key2, val2); } finally { theRecordWriter.close(reporter); } File expectedFile = new File(new Path(workDir, file).toString()); StringBuffer expectedOutput = new StringBuffer(); expectedOutput.append(key1).append('\t').append(val1).append("\n"); expectedOutput.append(val1).append("\n"); expectedOutput.append(val2).append("\n"); expectedOutput.append(key2).append("\n"); expectedOutput.append(key1).append("\n"); expectedOutput.append(key2).append('\t').append(val2).append("\n"); String output = UtilsForTests.slurp(expectedFile); assertEquals(expectedOutput.toString(), output); }
Example 19
Source File: TestPipeApplication.java From hadoop with Apache License 2.0 | 4 votes |
@Override public NullWritable createValue() { return NullWritable.get(); }
Example 20
Source File: PipeApplicationRunnableStub.java From hadoop with Apache License 2.0 | 4 votes |
public void binaryProtocolStub() { try { initSoket(); System.out.println("start OK"); // RUN_MAP.code // should be 3 int answer = WritableUtils.readVInt(dataInput); System.out.println("RunMap:" + answer); TestPipeApplication.FakeSplit split = new TestPipeApplication.FakeSplit(); readObject(split, dataInput); WritableUtils.readVInt(dataInput); WritableUtils.readVInt(dataInput); // end runMap // get InputTypes WritableUtils.readVInt(dataInput); String inText = Text.readString(dataInput); System.out.println("Key class:" + inText); inText = Text.readString(dataInput); System.out.println("Value class:" + inText); @SuppressWarnings("unused") int inCode = 0; // read all data from sender and write to output while ((inCode = WritableUtils.readVInt(dataInput)) == 4) { FloatWritable key = new FloatWritable(); NullWritable value = NullWritable.get(); readObject(key, dataInput); System.out.println("value:" + key.get()); readObject(value, dataInput); } WritableUtils.writeVInt(dataOut, 54); dataOut.flush(); dataOut.close(); } catch (Exception x) { x.printStackTrace(); } finally { closeSoket(); } }