org.apache.hadoop.conf.Configurable Java Examples
The following examples show how to use
org.apache.hadoop.conf.Configurable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestGenericWritable.java From hadoop-gpu with Apache License 2.0 | 6 votes |
public void testBarWritable() throws Exception { System.out.println("Testing Writable, Configurable wrapped in GenericWritable"); FooGenericWritable generic = new FooGenericWritable(); generic.setConf(conf); Bar bar = new Bar(); bar.setConf(conf); generic.set(bar); //test writing generic writable FooGenericWritable after = (FooGenericWritable)TestWritable.testWritable(generic, conf); //test configuration System.out.println("Testing if Configuration is passed to wrapped classes"); assertTrue(after.get() instanceof Configurable); assertNotNull(((Configurable)after.get()).getConf()); }
Example #2
Source File: HadoopInputSplit.java From flink with Apache License 2.0 | 6 votes |
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); // the job conf knows how to deserialize itself jobConf = new JobConf(); jobConf.readFields(in); try { hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType); } catch (Exception e) { throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e); } if (hadoopInputSplit instanceof Configurable) { ((Configurable) hadoopInputSplit).setConf(this.jobConf); } else if (hadoopInputSplit instanceof JobConfigurable) { ((JobConfigurable) hadoopInputSplit).configure(this.jobConf); } hadoopInputSplit.readFields(in); }
Example #3
Source File: HadoopInputSplit.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); // the job conf knows how to deserialize itself jobConf = new JobConf(); jobConf.readFields(in); try { hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType); } catch (Exception e) { throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e); } if (hadoopInputSplit instanceof Configurable) { ((Configurable) hadoopInputSplit).setConf(this.jobConf); } else if (hadoopInputSplit instanceof JobConfigurable) { ((JobConfigurable) hadoopInputSplit).configure(this.jobConf); } hadoopInputSplit.readFields(in); }
Example #4
Source File: HadoopCodecFactory.java From presto with Apache License 2.0 | 6 votes |
private CompressionCodec createCompressionCodec(String codecName) { try { Class<? extends CompressionCodec> codecClass = classLoader.loadClass(codecName).asSubclass(CompressionCodec.class); Constructor<? extends CompressionCodec> constructor = codecClass.getDeclaredConstructor(); constructor.setAccessible(true); CompressionCodec codec = constructor.newInstance(); if (codec instanceof Configurable) { // Hadoop is crazy... you have to give codecs an empty configuration or they throw NPEs // but you need to make sure the configuration doesn't "load" defaults or it spends // forever loading XML with no useful information ((Configurable) codec).setConf(new Configuration(false)); } return codec; } catch (ReflectiveOperationException e) { throw new IllegalArgumentException("Unknown codec: " + codecName, e); } }
Example #5
Source File: IngestJob.java From datawave with Apache License 2.0 | 6 votes |
protected void startDaemonProcesses(Configuration configuration) { String daemonClassNames = configuration.get(DAEMON_PROCESSES_PROPERTY); if (daemonClassNames == null) { return; } for (String className : StringUtils.split(daemonClassNames, ',')) { try { @SuppressWarnings("unchecked") Class<? extends Runnable> daemonClass = (Class<? extends Runnable>) Class.forName(className.trim()); Runnable daemon = daemonClass.newInstance(); if (daemon instanceof Configurable) { Configurable configurable = (Configurable) daemon; configurable.setConf(configuration); } Thread daemonThread = new Thread(daemon); daemonThread.setDaemon(true); daemonThread.start(); } catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) { throw new IllegalArgumentException(e); } } }
Example #6
Source File: TestGenericWritable.java From hadoop with Apache License 2.0 | 6 votes |
public void testBarWritable() throws Exception { System.out.println("Testing Writable, Configurable wrapped in GenericWritable"); FooGenericWritable generic = new FooGenericWritable(); generic.setConf(conf); Bar bar = new Bar(); bar.setConf(conf); generic.set(bar); //test writing generic writable FooGenericWritable after = (FooGenericWritable)TestWritable.testWritable(generic, conf); //test configuration System.out.println("Testing if Configuration is passed to wrapped classes"); assertTrue(after.get() instanceof Configurable); assertNotNull(((Configurable)after.get()).getConf()); }
Example #7
Source File: WALFile.java From streamx with Apache License 2.0 | 6 votes |
/** * Get the 'value' corresponding to the last read 'key'. * * @param val : The 'value' to be read. */ public synchronized void getCurrentValue(Writable val) throws IOException { if (val instanceof Configurable) { ((Configurable) val).setConf(this.conf); } // Position stream to 'current' value seekToCurrentValue(); val.readFields(valIn); if (valIn.read() > 0) { log.info("available bytes: " + valIn.available()); throw new IOException(val + " read " + (valBuffer.getPosition() - keyLength) + " bytes, should read " + (valBuffer.getLength() - keyLength)); } }
Example #8
Source File: WALFile.java From streamx with Apache License 2.0 | 6 votes |
/** * Get the 'value' corresponding to the last read 'key'. * * @param val : The 'value' to be read. */ public synchronized WALEntry getCurrentValue(WALEntry val) throws IOException { if (val instanceof Configurable) { ((Configurable) val).setConf(this.conf); } // Position stream to 'current' value seekToCurrentValue(); val = deserializeValue(val); if (valIn.read() > 0) { log.info("available bytes: " + valIn.available()); throw new IOException(val + " read " + (valBuffer.getPosition() - keyLength) + " bytes, should read " + (valBuffer.getLength() - keyLength)); } return val; }
Example #9
Source File: TestGenericWritable.java From big-c with Apache License 2.0 | 6 votes |
public void testBarWritable() throws Exception { System.out.println("Testing Writable, Configurable wrapped in GenericWritable"); FooGenericWritable generic = new FooGenericWritable(); generic.setConf(conf); Bar bar = new Bar(); bar.setConf(conf); generic.set(bar); //test writing generic writable FooGenericWritable after = (FooGenericWritable)TestWritable.testWritable(generic, conf); //test configuration System.out.println("Testing if Configuration is passed to wrapped classes"); assertTrue(after.get() instanceof Configurable); assertNotNull(((Configurable)after.get()).getConf()); }
Example #10
Source File: CombinedFileRecordReader.java From Cubert with Apache License 2.0 | 6 votes |
/** * Create new record record from the original InputFormat and initialize it. * * @throws IOException * @throws InterruptedException */ private void createNewRecordReader() throws IOException, InterruptedException { FileSplit split = new FileSplit(combineFileSplit.getPath(currentFileIndex), combineFileSplit.getOffset(currentFileIndex), combineFileSplit.getLength(currentFileIndex), null); if (split instanceof Configurable) { ((Configurable) split).setConf(context.getConfiguration()); } current = inputFormat.createRecordReader(split, context); current.initialize(split, context); }
Example #11
Source File: HadoopInputSplit.java From flink with Apache License 2.0 | 6 votes |
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); try { hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType); } catch (Exception e) { throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e); } if (needsJobConf(hadoopInputSplit)) { // the job conf knows how to deserialize itself jobConf = new JobConf(); jobConf.readFields(in); if (hadoopInputSplit instanceof Configurable) { ((Configurable) hadoopInputSplit).setConf(this.jobConf); } else if (hadoopInputSplit instanceof JobConfigurable) { ((JobConfigurable) hadoopInputSplit).configure(this.jobConf); } } hadoopInputSplit.readFields(in); }
Example #12
Source File: CellBlockBuilder.java From hbase with Apache License 2.0 | 6 votes |
private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec, CompressionCodec compressor) throws IOException { Compressor poolCompressor = null; try { if (compressor != null) { if (compressor instanceof Configurable) { ((Configurable) compressor).setConf(this.conf); } poolCompressor = CodecPool.getCompressor(compressor); os = compressor.createOutputStream(os, poolCompressor); } Codec.Encoder encoder = codec.getEncoder(os); while (cellScanner.advance()) { encoder.write(cellScanner.current()); } encoder.flush(); } catch (BufferOverflowException | IndexOutOfBoundsException e) { throw new DoNotRetryIOException(e); } finally { os.close(); if (poolCompressor != null) { CodecPool.returnCompressor(poolCompressor); } } }
Example #13
Source File: CellBlockBuilder.java From hbase with Apache License 2.0 | 6 votes |
private ByteBuffer decompress(CompressionCodec compressor, InputStream cellBlockStream, int osInitialSize) throws IOException { // GZIPCodec fails w/ NPE if no configuration. if (compressor instanceof Configurable) { ((Configurable) compressor).setConf(this.conf); } Decompressor poolDecompressor = CodecPool.getDecompressor(compressor); CompressionInputStream cis = compressor.createInputStream(cellBlockStream, poolDecompressor); ByteBufferOutputStream bbos; try { // TODO: This is ugly. The buffer will be resized on us if we guess wrong. // TODO: Reuse buffers. bbos = new ByteBufferOutputStream(osInitialSize); IOUtils.copy(cis, bbos); bbos.close(); return bbos.getByteBuffer(); } finally { CodecPool.returnDecompressor(poolDecompressor); } }
Example #14
Source File: ParquetInputFormat.java From parquet-mr with Apache License 2.0 | 6 votes |
private static UnboundRecordFilter getUnboundRecordFilterInstance(Configuration configuration) { Class<?> clazz = ConfigurationUtil.getClassFromConfig(configuration, UNBOUND_RECORD_FILTER, UnboundRecordFilter.class); if (clazz == null) { return null; } try { UnboundRecordFilter unboundRecordFilter = (UnboundRecordFilter) clazz.newInstance(); if (unboundRecordFilter instanceof Configurable) { ((Configurable)unboundRecordFilter).setConf(configuration); } return unboundRecordFilter; } catch (InstantiationException | IllegalAccessException e) { throw new BadConfigurationException( "could not instantiate unbound record filter class", e); } }
Example #15
Source File: TestGenericWritable.java From RDFS with Apache License 2.0 | 6 votes |
public void testBarWritable() throws Exception { System.out.println("Testing Writable, Configurable wrapped in GenericWritable"); FooGenericWritable generic = new FooGenericWritable(); generic.setConf(conf); Bar bar = new Bar(); bar.setConf(conf); generic.set(bar); //test writing generic writable FooGenericWritable after = (FooGenericWritable)TestWritable.testWritable(generic, conf); //test configuration System.out.println("Testing if Configuration is passed to wrapped classes"); assertTrue(after.get() instanceof Configurable); assertNotNull(((Configurable)after.get()).getConf()); }
Example #16
Source File: IFile.java From tez with Apache License 2.0 | 6 votes |
private static InputStream getDecompressedInputStreamWithBufferSize(CompressionCodec codec, IFileInputStream checksumIn, Decompressor decompressor, int compressedLength) throws IOException { String bufferSizeProp = TezRuntimeUtils.getBufferSizeProperty(codec); if (bufferSizeProp != null) { Configurable configurableCodec = (Configurable) codec; Configuration conf = configurableCodec.getConf(); int bufSize = Math.min(compressedLength, DEFAULT_BUFFER_SIZE); LOG.trace("buffer size was set according to min(compressedLength, {}): {}={}", DEFAULT_BUFFER_SIZE, bufferSizeProp, bufSize); conf.setInt(bufferSizeProp, bufSize); } return codec.createInputStream(checksumIn, decompressor); }
Example #17
Source File: TestIFile.java From tez with Apache License 2.0 | 6 votes |
@Test public void testInMemoryBufferSize() throws IOException { // for smaller amount of data, codec buffer should be sized according to compressed data length List<KVPair> data = KVDataGen.generateTestData(false, rnd.nextInt(100)); Writer writer = writeTestFile(false, false, data, codec); readAndVerifyData(writer.getRawLength(), writer.getCompressedLength(), data, codec); Configurable configurableCodec = (Configurable) codec; Assert.assertEquals(writer.getCompressedLength(), configurableCodec.getConf().getInt(TezRuntimeUtils.getBufferSizeProperty(codec), 0)); // buffer size cannot grow infinitely with compressed data size data = KVDataGen.generateTestDataOfKeySize(false, 20000, rnd.nextInt(100)); writer = writeTestFile(false, false, data, codec); readAndVerifyData(writer.getRawLength(), writer.getCompressedLength(), data, codec); Assert.assertEquals(128*1024, configurableCodec.getConf().getInt(TezRuntimeUtils.getBufferSizeProperty(codec), 0)); }
Example #18
Source File: Compression.java From hbase with Apache License 2.0 | 5 votes |
public InputStream createDecompressionStream( InputStream downStream, Decompressor decompressor, int downStreamBufferSize) throws IOException { CompressionCodec codec = getCodec(conf); // Set the internal buffer size to read from down stream. if (downStreamBufferSize > 0) { ((Configurable)codec).getConf().setInt("io.file.buffer.size", downStreamBufferSize); } CompressionInputStream cis = codec.createInputStream(downStream, decompressor); BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE); return bis2; }
Example #19
Source File: HadoopOutputFormatBase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void configure(Configuration parameters) { // enforce sequential configure() calls synchronized (CONFIGURE_MUTEX) { // configure MR OutputFormat if necessary if (this.mapredOutputFormat instanceof Configurable) { ((Configurable) this.mapredOutputFormat).setConf(this.jobConf); } else if (this.mapredOutputFormat instanceof JobConfigurable) { ((JobConfigurable) this.mapredOutputFormat).configure(this.jobConf); } } }
Example #20
Source File: HadoopInputFormatBase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void configure(Configuration parameters) { // enforce sequential configuration() calls synchronized (CONFIGURE_MUTEX) { // configure MR InputFormat if necessary if (this.mapredInputFormat instanceof Configurable) { ((Configurable) this.mapredInputFormat).setConf(this.jobConf); } else if (this.mapredInputFormat instanceof JobConfigurable) { ((JobConfigurable) this.mapredInputFormat).configure(this.jobConf); } } }
Example #21
Source File: HadoopInputFormatBase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void configure(Configuration parameters) { // enforce sequential configuration() calls synchronized (CONFIGURE_MUTEX) { if (mapreduceInputFormat instanceof Configurable) { ((Configurable) mapreduceInputFormat).setConf(configuration); } } }
Example #22
Source File: HadoopOutputFormatBase.java From flink with Apache License 2.0 | 5 votes |
@Override public void configure(Configuration parameters) { // enforce sequential configure() calls synchronized (CONFIGURE_MUTEX) { if (this.mapreduceOutputFormat instanceof Configurable) { ((Configurable) this.mapreduceOutputFormat).setConf(this.configuration); } } }
Example #23
Source File: HadoopInputFormatBase.java From flink with Apache License 2.0 | 5 votes |
@Override public void configure(Configuration parameters) { // enforce sequential configuration() calls synchronized (CONFIGURE_MUTEX) { if (mapreduceInputFormat instanceof Configurable) { ((Configurable) mapreduceInputFormat).setConf(configuration); } } }
Example #24
Source File: HadoopOutputFormatBase.java From flink with Apache License 2.0 | 5 votes |
@Override public void configure(Configuration parameters) { // enforce sequential configure() calls synchronized (CONFIGURE_MUTEX) { // configure MR OutputFormat if necessary if (this.mapredOutputFormat instanceof Configurable) { ((Configurable) this.mapredOutputFormat).setConf(this.jobConf); } else if (this.mapredOutputFormat instanceof JobConfigurable) { ((JobConfigurable) this.mapredOutputFormat).configure(this.jobConf); } } }
Example #25
Source File: HadoopOutputFormatBase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void configure(Configuration parameters) { // enforce sequential configure() calls synchronized (CONFIGURE_MUTEX) { if (this.mapreduceOutputFormat instanceof Configurable) { ((Configurable) this.mapreduceOutputFormat).setConf(this.configuration); } } }
Example #26
Source File: HadoopInputFormatBase.java From flink with Apache License 2.0 | 5 votes |
@Override public void open(HadoopInputSplit split) throws IOException { // enforce sequential open() calls synchronized (OPEN_MUTEX) { this.recordReader = this.mapredInputFormat.getRecordReader(split.getHadoopInputSplit(), jobConf, new HadoopDummyReporter()); if (this.recordReader instanceof Configurable) { ((Configurable) this.recordReader).setConf(jobConf); } key = this.recordReader.createKey(); value = this.recordReader.createValue(); this.fetched = false; } }
Example #27
Source File: HadoopInputFormatBase.java From flink with Apache License 2.0 | 5 votes |
@Override public void configure(Configuration parameters) { // enforce sequential configuration() calls synchronized (CONFIGURE_MUTEX) { // configure MR InputFormat if necessary if (this.mapredInputFormat instanceof Configurable) { ((Configurable) this.mapredInputFormat).setConf(this.jobConf); } else if (this.mapredInputFormat instanceof JobConfigurable) { ((JobConfigurable) this.mapredInputFormat).configure(this.jobConf); } } }
Example #28
Source File: MultiMapperSplit.java From Cubert with Apache License 2.0 | 5 votes |
@Override public void readFields(DataInput in) throws IOException { multiMapperIndex = in.readInt(); // patch the conf to this multiMapperIndex ConfigurationDiff confDiff = new ConfigurationDiff(conf); confDiff.applyDiff(multiMapperIndex); boolean isFileSplit = in.readBoolean(); if (isFileSplit) { Path file = new Path(Text.readString(in)); long start = in.readLong(); long length = in.readLong(); actualSplit = new FileSplit(file, start, length, null); } else { String actualSplitClass = Text.readString(in); try { actualSplit = ClassCache.forName(actualSplitClass) .asSubclass(InputSplit.class) .newInstance(); if (actualSplit instanceof Configurable) ((Configurable) actualSplit).setConf(conf); ((Writable) actualSplit).readFields(in); } catch (Exception e) { throw new RuntimeException(e); } } }
Example #29
Source File: HadoopInputFormatBase.java From flink with Apache License 2.0 | 5 votes |
@Override public void configure(Configuration parameters) { // enforce sequential configuration() calls synchronized (CONFIGURE_MUTEX) { // configure MR InputFormat if necessary if (this.mapredInputFormat instanceof Configurable) { ((Configurable) this.mapredInputFormat).setConf(this.jobConf); } else if (this.mapredInputFormat instanceof JobConfigurable) { ((JobConfigurable) this.mapredInputFormat).configure(this.jobConf); } } }
Example #30
Source File: FilterExpression.java From examples with Apache License 2.0 | 5 votes |
/** {@inheritDoc} */ @Override public void setConf(Configuration conf) { if(expression instanceof Configurable) { ((Configurable)expression).setConf(conf); } }