org.apache.hadoop.io.MapWritable Java Examples
The following examples show how to use
org.apache.hadoop.io.MapWritable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NewInstallUserReducer.java From BigDataPlatform with GNU General Public License v3.0 | 6 votes |
@Override protected void reduce(StatsUserDimension key, Iterable<TimeOutputValue> values, Context context) throws IOException, InterruptedException { this.unique.clear(); //开始计算uuid的个数 for (TimeOutputValue value : values) { this.unique.add(value.getId()); } MapWritable map = new MapWritable(); map.put(new IntWritable(-1), new IntWritable(this.unique.size())); //设置kpi名称 String kpiName = key.getStatsCommon().getKpi().getKpiName(); if (KpiType.NEW_INSTALL_USER.name.equals(kpiName)) { //计算stats_user表中的新增用户 outputValue.setKpi(KpiType.NEW_INSTALL_USER); } else if (KpiType.BROWSER_NEW_INSTALL_USER.name.equals(kpiName)) { //计算stats_device_browser的新增用户 outputValue.setKpi(KpiType.BROWSER_NEW_INSTALL_USER); } outputValue.setValue(map); context.write(key, outputValue); }
Example #2
Source File: CassandraHiveRecordReader.java From Hive-Cassandra with Apache License 2.0 | 6 votes |
private void populateMap(SortedMap<ByteBuffer, IColumn> cvalue, MapWritable value) { for (Map.Entry<ByteBuffer, IColumn> e : cvalue.entrySet()) { ByteBuffer k = e.getKey(); IColumn v = e.getValue(); if (!v.isLive()) { continue; } BytesWritable newKey = convertByteBuffer(k); BytesWritable newValue = convertByteBuffer(v.value()); value.put(newKey, newValue); } }
Example #3
Source File: NewInstallUserReducer.java From BigDataArchitect with Apache License 2.0 | 6 votes |
@Override protected void reduce(StatsUserDimension key, Iterable<TimeOutputValue> values, Context context) throws IOException, InterruptedException { this.unique.clear(); // 开始计算uuid的个数 for (TimeOutputValue value : values) { this.unique.add(value.getId());//uid,用户ID } MapWritable map = new MapWritable();//相当于java中HashMap map.put(new IntWritable(-1), new IntWritable(this.unique.size())); outputValue.setValue(map); // 设置kpi名称 String kpiName = key.getStatsCommon().getKpi().getKpiName(); if (KpiType.NEW_INSTALL_USER.name.equals(kpiName)) { // 计算stats_user表中的新增用户 outputValue.setKpi(KpiType.NEW_INSTALL_USER); } else if (KpiType.BROWSER_NEW_INSTALL_USER.name.equals(kpiName)) { // 计算stats_device_browser表中的新增用户 outputValue.setKpi(KpiType.BROWSER_NEW_INSTALL_USER); } context.write(key, outputValue); }
Example #4
Source File: FilterData.java From incubator-retired-pirk with Apache License 2.0 | 6 votes |
@Override public Boolean call(MapWritable dataElement) throws Exception { accum.incNumRecordsReceived(1); // Perform the filter boolean passFilter = ((DataFilter) filter).filterDataElement(dataElement, dSchema); if (passFilter) { accum.incNumRecordsAfterFilter(1); } else // false, then we filter out the record { accum.incNumRecordsFiltered(1); } return passFilter; }
Example #5
Source File: QueryUtils.java From incubator-retired-pirk with Apache License 2.0 | 6 votes |
/** * Pulls the correct selector from the MapWritable data element given the queryType * <p> * Pulls first element of array if element is an array type */ public static String getSelectorByQueryType(MapWritable dataMap, QuerySchema qSchema, DataSchema dSchema) { String selector; String fieldName = qSchema.getSelectorName(); if (dSchema.isArrayElement(fieldName)) { if (dataMap.get(dSchema.getTextName(fieldName)) instanceof WritableArrayWritable) { String[] selectorArray = ((WritableArrayWritable) dataMap.get(dSchema.getTextName(fieldName))).toStrings(); selector = selectorArray[0]; } else { String[] elementArray = ((ArrayWritable) dataMap.get(dSchema.getTextName(fieldName))).toStrings(); selector = elementArray[0]; } } else { selector = dataMap.get(dSchema.getTextName(fieldName)).toString(); } return selector; }
Example #6
Source File: SerializationEventConverterTest.java From elasticsearch-hadoop with Apache License 2.0 | 6 votes |
@Test public void generateEventWritable() throws Exception { MapWritable document = new MapWritable(); document.put(new Text("field"), new Text("value")); SerializationEventConverter eventConverter = new SerializationEventConverter(); SerializationFailure iaeFailure = new SerializationFailure(new IllegalArgumentException("garbage"), document, new ArrayList<String>()); String rawEvent = eventConverter.getRawEvent(iaeFailure); assertThat(rawEvent, Matchers.startsWith("org.apache.hadoop.io.MapWritable@")); String timestamp = eventConverter.getTimestamp(iaeFailure); assertTrue(StringUtils.hasText(timestamp)); assertTrue(DateUtils.parseDate(timestamp).getTime().getTime() > 1L); String exceptionType = eventConverter.renderExceptionType(iaeFailure); assertEquals("illegal_argument_exception", exceptionType); String exceptionMessage = eventConverter.renderExceptionMessage(iaeFailure); assertEquals("garbage", exceptionMessage); String eventMessage = eventConverter.renderEventMessage(iaeFailure); assertEquals("Could not construct bulk entry from record", eventMessage); }
Example #7
Source File: HourlyActiveUserCollector.java From BigDataArchitect with Apache License 2.0 | 6 votes |
@Override public void collect(Configuration conf, BaseDimension key, BaseStatsValueWritable value, PreparedStatement pstmt, IDimensionConverter converter) throws SQLException, IOException { StatsUserDimension statsUser = (StatsUserDimension) key; MapWritableValue mapWritableValue = (MapWritableValue) value; MapWritable map = mapWritableValue.getValue(); // hourly_active_user int i = 0; pstmt.setInt(++i, converter.getDimensionIdByValue(statsUser.getStatsCommon().getPlatform())); pstmt.setInt(++i, converter.getDimensionIdByValue(statsUser.getStatsCommon().getDate())); pstmt.setInt(++i, converter.getDimensionIdByValue(statsUser.getStatsCommon().getKpi())); // 根据kpi // 设置每个小时的情况 for (i++; i < 28; i++) { int v = ((IntWritable)map.get(new IntWritable(i - 4))).get(); pstmt.setInt(i, v); pstmt.setInt(i + 25, v); } pstmt.setString(i, conf.get(GlobalConstants.RUNNING_DATE_PARAMES)); pstmt.addBatch(); }
Example #8
Source File: HiveSerializationEventConverterTest.java From elasticsearch-hadoop with Apache License 2.0 | 6 votes |
@Test public void generateEventHiveRecordLimited() throws Exception { Map<Writable, Writable> map = new MapWritable(); map.put(new Text("one"), new IntWritable(1)); map.put(new Text("two"), new IntWritable(2)); map.put(new Text("three"), new IntWritable(3)); HiveType tuple = new HiveType(map, TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo( TypeInfoFactory.getMapTypeInfo(TypeInfoFactory.stringTypeInfo, TypeInfoFactory.intTypeInfo))); SerializationEventConverter eventConverter = new SerializationEventConverter(); SerializationFailure iaeFailure = new SerializationFailure(new IllegalArgumentException("garbage"), tuple, new ArrayList<String>()); String rawEvent = eventConverter.getRawEvent(iaeFailure); assertThat(rawEvent, startsWith("HiveType{object=org.apache.hadoop.io.MapWritable@")); String timestamp = eventConverter.getTimestamp(iaeFailure); assertTrue(StringUtils.hasText(timestamp)); assertTrue(DateUtils.parseDate(timestamp).getTime().getTime() > 1L); String exceptionType = eventConverter.renderExceptionType(iaeFailure); assertEquals("illegal_argument_exception", exceptionType); String exceptionMessage = eventConverter.renderExceptionMessage(iaeFailure); assertEquals("garbage", exceptionMessage); String eventMessage = eventConverter.renderEventMessage(iaeFailure); assertEquals("Could not construct bulk entry from record", eventMessage); }
Example #9
Source File: AbstractMROldApiSearchTest.java From elasticsearch-hadoop with Apache License 2.0 | 6 votes |
private JobConf createJobConf() throws IOException { JobConf conf = HdpBootstrap.hadoopConfig(); conf.setInputFormat(EsInputFormat.class); conf.setOutputFormat(PrintStreamOutputFormat.class); conf.setOutputKeyClass(Text.class); boolean type = random.nextBoolean(); Class<?> mapType = (type ? MapWritable.class : LinkedMapWritable.class); conf.setOutputValueClass(mapType); HadoopCfgUtils.setGenericOptions(conf); conf.set(ConfigurationOptions.ES_QUERY, query); conf.setNumReduceTasks(0); conf.set(ConfigurationOptions.ES_READ_METADATA, String.valueOf(readMetadata)); conf.set(ConfigurationOptions.ES_READ_METADATA_VERSION, String.valueOf(true)); conf.set(ConfigurationOptions.ES_OUTPUT_JSON, String.valueOf(readAsJson)); new QueryTestParams(tempFolder).provisionQueries(conf); FileInputFormat.setInputPaths(conf, new Path(MRSuite.testData.sampleArtistsDatUri())); HdpBootstrap.addProperties(conf, TestSettings.TESTING_PROPS, false); return conf; }
Example #10
Source File: HourlyActiveUserCollector.java From BigDataPlatform with GNU General Public License v3.0 | 6 votes |
@Override public void collect(Configuration conf, BaseDimension key, BaseStatsValueWritable value, PreparedStatement pstmt, IDimensionConverter converter) throws SQLException, IOException { StatsUserDimension statsUser = (StatsUserDimension) key; MapWritableValue mapWritableValue = (MapWritableValue) value; MapWritable map = mapWritableValue.getValue(); // hourly_active_user int i = 0; pstmt.setInt(++i, converter.getDimensionIdByValue(statsUser.getStatsCommon().getPlatform())); pstmt.setInt(++i, converter.getDimensionIdByValue(statsUser.getStatsCommon().getDate())); pstmt.setInt(++i, converter.getDimensionIdByValue(statsUser.getStatsCommon().getKpi())); // 根据kpi // 设置每个小时的情况 for (i++; i < 28; i++) { int v = ((IntWritable)map.get(new IntWritable(i - 4))).get(); pstmt.setInt(i, v); pstmt.setInt(i + 25, v); } pstmt.setString(i, conf.get(GlobalConstants.RUNNING_DATE_PARAMS)); pstmt.addBatch(); }
Example #11
Source File: AbstractExtraMRTests.java From elasticsearch-hadoop with Apache License 2.0 | 6 votes |
private JobConf createReadJobConf() throws IOException { JobConf conf = HdpBootstrap.hadoopConfig(); conf.setInputFormat(EsInputFormat.class); conf.setOutputFormat(PrintStreamOutputFormat.class); conf.setOutputKeyClass(Text.class); boolean type = random.nextBoolean(); Class<?> mapType = (type ? MapWritable.class : LinkedMapWritable.class); conf.setOutputValueClass(MapWritable.class); HadoopCfgUtils.setGenericOptions(conf); conf.setNumReduceTasks(0); conf.set(ConfigurationOptions.ES_READ_METADATA, String.valueOf(random.nextBoolean())); conf.set(ConfigurationOptions.ES_READ_METADATA_VERSION, String.valueOf(true)); conf.set(ConfigurationOptions.ES_OUTPUT_JSON, "true"); FileInputFormat.setInputPaths(conf, new Path(MRSuite.testData.gibberishDat(conf))); return conf; }
Example #12
Source File: TransactionalRegionServer.java From hbase-secondary-index with GNU General Public License v3.0 | 6 votes |
/** * {@inheritDoc} */ @Override protected void handleReportForDutyResponse(final MapWritable c) throws IOException { super.handleReportForDutyResponse(c); initializeTHLog(); String n = Thread.currentThread().getName(); UncaughtExceptionHandler handler = new UncaughtExceptionHandler() { public void uncaughtException(final Thread t, final Throwable e) { abort("Set stop flag in " + t.getName(), e); LOG.fatal("Set stop flag in " + t.getName(), e); } }; setDaemonThreadRunning(this.cleanOldTransactionsThread, n + ".oldTransactionCleaner", handler); setDaemonThreadRunning(this.transactionLeases, "Transactional leases"); }
Example #13
Source File: TSMRWriteExample.java From incubator-iotdb with Apache License 2.0 | 6 votes |
@Override protected void reduce(Text key, Iterable<MapWritable> values, Reducer<Text, MapWritable, NullWritable, HDFSTSRecord>.Context context) throws IOException, InterruptedException { long sensor1_value_sum = 0; long sensor2_value_sum = 0; double sensor3_value_sum = 0; long num = 0; for (MapWritable value : values) { num++; sensor1_value_sum += ((LongWritable) value.get(new Text(Constant.SENSOR_1))).get(); sensor2_value_sum += ((LongWritable) value.get(new Text(Constant.SENSOR_2))).get(); sensor3_value_sum += ((DoubleWritable) value.get(new Text(Constant.SENSOR_3))).get(); } HDFSTSRecord tsRecord = new HDFSTSRecord(1L, key.toString()); if (num != 0) { DataPoint dPoint1 = new LongDataPoint(Constant.SENSOR_1, sensor1_value_sum / num); DataPoint dPoint2 = new LongDataPoint(Constant.SENSOR_2, sensor2_value_sum / num); DataPoint dPoint3 = new DoubleDataPoint(Constant.SENSOR_3, sensor3_value_sum / num); tsRecord.addTuple(dPoint1); tsRecord.addTuple(dPoint2); tsRecord.addTuple(dPoint3); } context.write(NullWritable.get(), tsRecord); }
Example #14
Source File: XmlDataValidationMapper.java From jumbune with GNU Lesser General Public License v3.0 | 5 votes |
private void cleanOutput(MapWritable mapErrorType) { Set<Map.Entry<Writable, Writable>> errorTypeEntrySet = mapErrorType.entrySet(); for (Map.Entry<Writable, Writable> errorTypeMap : errorTypeEntrySet) { ArrayListWritable<XMLErrorWritable> errorList = (ArrayListWritable<XMLErrorWritable>) errorTypeMap.getValue(); for (XMLErrorWritable error : errorList) { error = null; } errorList.clear(); } errorTypeEntrySet.clear(); }
Example #15
Source File: TypedBytesWritableInput.java From hadoop with Apache License 2.0 | 5 votes |
public Class<? extends Writable> readType() throws IOException { Type type = in.readType(); if (type == null) { return null; } switch (type) { case BYTES: return BytesWritable.class; case BYTE: return ByteWritable.class; case BOOL: return BooleanWritable.class; case INT: return VIntWritable.class; case LONG: return VLongWritable.class; case FLOAT: return FloatWritable.class; case DOUBLE: return DoubleWritable.class; case STRING: return Text.class; case VECTOR: return ArrayWritable.class; case MAP: return MapWritable.class; case WRITABLE: return Writable.class; default: throw new RuntimeException("unknown type"); } }
Example #16
Source File: CamusWrapper.java From HiveKa with Apache License 2.0 | 5 votes |
public CamusWrapper(R record, long timestamp, String server, String service) { this.record = record; this.timestamp = timestamp; this.partitionMap = new MapWritable(); partitionMap.put(new Text("server"), new Text(server)); partitionMap.put(new Text("service"), new Text(service)); }
Example #17
Source File: TypedBytesWritableInput.java From big-c with Apache License 2.0 | 5 votes |
public Class<? extends Writable> readType() throws IOException { Type type = in.readType(); if (type == null) { return null; } switch (type) { case BYTES: return BytesWritable.class; case BYTE: return ByteWritable.class; case BOOL: return BooleanWritable.class; case INT: return VIntWritable.class; case LONG: return VLongWritable.class; case FLOAT: return FloatWritable.class; case DOUBLE: return DoubleWritable.class; case STRING: return Text.class; case VECTOR: return ArrayWritable.class; case MAP: return MapWritable.class; case WRITABLE: return Writable.class; default: throw new RuntimeException("unknown type"); } }
Example #18
Source File: TypedBytesWritableOutput.java From big-c with Apache License 2.0 | 5 votes |
public void writeMap(MapWritable mw) throws IOException { out.writeMapHeader(mw.size()); for (Map.Entry<Writable, Writable> entry : mw.entrySet()) { write(entry.getKey()); write(entry.getValue()); } }
Example #19
Source File: TypedBytesWritableOutput.java From big-c with Apache License 2.0 | 5 votes |
public void write(Writable w) throws IOException { if (w instanceof TypedBytesWritable) { writeTypedBytes((TypedBytesWritable) w); } else if (w instanceof BytesWritable) { writeBytes((BytesWritable) w); } else if (w instanceof ByteWritable) { writeByte((ByteWritable) w); } else if (w instanceof BooleanWritable) { writeBoolean((BooleanWritable) w); } else if (w instanceof IntWritable) { writeInt((IntWritable) w); } else if (w instanceof VIntWritable) { writeVInt((VIntWritable) w); } else if (w instanceof LongWritable) { writeLong((LongWritable) w); } else if (w instanceof VLongWritable) { writeVLong((VLongWritable) w); } else if (w instanceof FloatWritable) { writeFloat((FloatWritable) w); } else if (w instanceof DoubleWritable) { writeDouble((DoubleWritable) w); } else if (w instanceof Text) { writeText((Text) w); } else if (w instanceof ArrayWritable) { writeArray((ArrayWritable) w); } else if (w instanceof MapWritable) { writeMap((MapWritable) w); } else if (w instanceof SortedMapWritable) { writeSortedMap((SortedMapWritable) w); } else if (w instanceof Record) { writeRecord((Record) w); } else { writeWritable(w); // last resort } }
Example #20
Source File: HashSelectorAndPartitionData.java From incubator-retired-pirk with Apache License 2.0 | 5 votes |
public static Tuple2<Integer,List<BigInteger>> hashSelectorAndFormPartitionsBigInteger(MapWritable dataElement, QuerySchema qSchema, DataSchema dSchema, QueryInfo queryInfo) throws Exception { // Pull the selector based on the query type String selector = QueryUtils.getSelectorByQueryType(dataElement, qSchema, dSchema); int hash = KeyedHash.hash(queryInfo.getHashKey(), queryInfo.getHashBitSize(), selector); logger.debug("selector = " + selector + " hash = " + hash); // Extract the data bits based on the query type // Partition by the given partitionSize List<BigInteger> hitValPartitions = QueryUtils.partitionDataElement(dataElement, qSchema, dSchema, queryInfo.getEmbedSelector()); return new Tuple2<>(hash, hitValPartitions); }
Example #21
Source File: ComputeStreamingResponse.java From incubator-retired-pirk with Apache License 2.0 | 5 votes |
/** * Method to perform the query given an input JavaDStream of JSON * */ public void performQuery(JavaDStream<MapWritable> input) { logger.info("Performing query: "); // Process non-overlapping windows of data of duration windowLength seconds // If we are using queue streams, there is no need to window if (!useQueueStream) { input.window(Durations.seconds(windowLength), Durations.seconds(windowLength)); } // Extract the selectors for each dataElement based upon the query type // and perform a keyed hash of the selectors JavaPairDStream<Integer,List<BigInteger>> selectorHashToDocRDD = input.mapToPair(new HashSelectorsAndPartitionData(bVars)); // Group by hashed selector (row) -- can combine with the line above, separating for testing and benchmarking... JavaPairDStream<Integer,Iterable<List<BigInteger>>> selectorGroupRDD = selectorHashToDocRDD.groupByKey(); // Calculate the encrypted row values for each row, emit <colNum, colVal> for each row JavaPairDStream<Long,BigInteger> encRowRDD = selectorGroupRDD.flatMapToPair(new EncRowCalc(accum, bVars)); // Multiply the column values by colNum: emit <colNum, finalColVal> and write the final result object encryptedColumnCalc(encRowRDD); // Start the streaming computation start(); }
Example #22
Source File: HiveCassandraStandardColumnInputFormat.java From Hive-Cassandra with Apache License 2.0 | 5 votes |
@Override public org.apache.hadoop.mapreduce.RecordReader<BytesWritable, MapWritable> createRecordReader( org.apache.hadoop.mapreduce.InputSplit arg0, TaskAttemptContext tac) throws IOException, InterruptedException { if(isTransposed && tac.getConfiguration().getBoolean(AbstractColumnSerDe.CASSANDRA_ENABLE_WIDEROW_ITERATOR, true)) { return new CassandraHiveRecordReader(new ColumnFamilyWideRowRecordReader(), isTransposed); } else { return new CassandraHiveRecordReader(new ColumnFamilyRecordReader(), isTransposed); } }
Example #23
Source File: StringUtils.java From incubator-retired-pirk with Apache License 2.0 | 5 votes |
/** * Method to convert a MapWritable into a JSON string * */ @SuppressWarnings("unchecked") public static String mapWritableToString(MapWritable map) { // Convert to JSON and then write to a String - ensures JSON read-in compatibility JSONObject jsonObj = new JSONObject(); for (Writable key : map.keySet()) { jsonObj.put(key.toString(), map.get(key).toString()); } return jsonObj.toJSONString(); }
Example #24
Source File: SolrHiveWriter.java From hive-solr with MIT License | 5 votes |
@Override public void write(Writable w) throws IOException { MapWritable map = (MapWritable) w; SolrInputDocument doc = new SolrInputDocument(); for (final Map.Entry<Writable, Writable> entry : map.entrySet()) { String key = entry.getKey().toString();//得到key String value=entry.getValue().toString().trim();// null值会转成空字符串 得到value //只有value有值的数据,我们才推送到solr里面,无值数据,不再发送到solr里面 if(value.length()>0){ doc.setField(key,value); } } // count.incrementAndGet(); datas.add(doc); //批量处理,大于等于一定量提交 if(datas.size()==batchSize){ try { sc.add(datas); // sc.commit();//不提交,等待flush }catch (Exception e){ e.printStackTrace(); }finally { //清空集合数据 datas.clear(); } } }
Example #25
Source File: ParquetExportMapper.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 5 votes |
@Override protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); // Instantiate a copy of the user's class to hold and parse the record. String recordClassName = conf.get( ExportJobBase.SQOOP_EXPORT_TABLE_CLASS_KEY); if (null == recordClassName) { throw new IOException("Export table class name (" + ExportJobBase.SQOOP_EXPORT_TABLE_CLASS_KEY + ") is not set!"); } try { Class cls = Class.forName(recordClassName, true, Thread.currentThread().getContextClassLoader()); recordImpl = (SqoopRecord) ReflectionUtils.newInstance(cls, conf); } catch (ClassNotFoundException cnfe) { throw new IOException(cnfe); } if (null == recordImpl) { throw new IOException("Could not instantiate object of type " + recordClassName); } columnTypes = DefaultStringifier.load(conf, AVRO_COLUMN_TYPES_MAP, MapWritable.class); }
Example #26
Source File: TestJdbcExportJob.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 5 votes |
@Test public void testAvroWithMoreColumnsSpecified() throws Exception { SqoopOptions opts = new SqoopOptions(); opts.setExportDir("myexportdir"); String[] columns = { "Age", "Name", "Gender", "Address" }; opts.setColumns(columns); JdbcExportJob jdbcExportJob = stubJdbcExportJob(opts, FileType.AVRO_DATA_FILE); Job job = new Job(); jdbcExportJob.configureInputFormat(job, null, null, null); assertEquals(asSetOfText("Age", "Name", "Gender"), DefaultStringifier.load(job.getConfiguration(), AvroExportMapper.AVRO_COLUMN_TYPES_MAP, MapWritable.class).keySet()); }
Example #27
Source File: TestJdbcExportJob.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 5 votes |
@Test public void testAvroWithSomeColumnsSpecified() throws Exception { SqoopOptions opts = new SqoopOptions(); opts.setExportDir("myexportdir"); String[] columns = { "Age", "Name" }; opts.setColumns(columns); JdbcExportJob jdbcExportJob = stubJdbcExportJob(opts, FileType.AVRO_DATA_FILE); Job job = new Job(); jdbcExportJob.configureInputFormat(job, null, null, null); assertEquals(asSetOfText("Age", "Name"), DefaultStringifier.load(job.getConfiguration(), AvroExportMapper.AVRO_COLUMN_TYPES_MAP, MapWritable.class).keySet()); }
Example #28
Source File: TestJdbcExportJob.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 5 votes |
@Test public void testAvroWithOneColumnSpecified() throws Exception { SqoopOptions opts = new SqoopOptions(); opts.setExportDir("myexportdir"); String[] columns = { "Gender" }; opts.setColumns(columns); JdbcExportJob jdbcExportJob = stubJdbcExportJob(opts, FileType.AVRO_DATA_FILE); Job job = new Job(); jdbcExportJob.configureInputFormat(job, null, null, null); assertEquals(asSetOfText("Gender"), DefaultStringifier.load(job.getConfiguration(), AvroExportMapper.AVRO_COLUMN_TYPES_MAP, MapWritable.class).keySet()); }
Example #29
Source File: TestJdbcExportJob.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 5 votes |
@Test public void testAvroWithAllColumnsSpecified() throws Exception { SqoopOptions opts = new SqoopOptions(); opts.setExportDir("myexportdir"); String[] columns = { "Age", "Name", "Gender" }; opts.setColumns(columns); JdbcExportJob jdbcExportJob = stubJdbcExportJob(opts, FileType.AVRO_DATA_FILE); Job job = new Job(); jdbcExportJob.configureInputFormat(job, null, null, null); assertEquals(asSetOfText("Age", "Name", "Gender"), DefaultStringifier.load(job.getConfiguration(), AvroExportMapper.AVRO_COLUMN_TYPES_MAP, MapWritable.class).keySet()); }
Example #30
Source File: TypedBytesWritableOutput.java From hadoop with Apache License 2.0 | 5 votes |
public void writeMap(MapWritable mw) throws IOException { out.writeMapHeader(mw.size()); for (Map.Entry<Writable, Writable> entry : mw.entrySet()) { write(entry.getKey()); write(entry.getValue()); } }