Java Code Examples for org.apache.hadoop.io.IntWritable#set()
The following examples show how to use
org.apache.hadoop.io.IntWritable#set() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestJoinDatamerge.java From big-c with Apache License 2.0 | 6 votes |
public void map(IntWritable key, TupleWritable val, Context context) throws IOException, InterruptedException { int k = key.get(); final String kvstr = "Unexpected tuple: " + stringify(key, val); assertTrue(kvstr, 0 == k % (srcs * srcs)); for (int i = 0; i < val.size(); ++i) { final int vali = ((IntWritable)val.get(i)).get(); assertTrue(kvstr, (vali - i) * srcs == 10 * k); } context.write(key, one); // If the user modifies the key or any of the values in the tuple, it // should not affect the rest of the join. key.set(-1); if (val.has(0)) { ((IntWritable)val.get(0)).set(0); } }
Example 2
Source File: TaskTracker.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** Queries the job tracker for a set of outputs ready to be copied * @param fromEventId the first event ID we want to start from, this is * modified by the call to this method * @param jobClient the job tracker * @return a set of locations to copy outputs from * @throws IOException */ private List<TaskCompletionEvent> queryJobTracker(IntWritable fromEventId, JobID jobId, InterTrackerProtocol jobClient) throws IOException { TaskCompletionEvent t[] = jobClient.getTaskCompletionEvents( jobId, fromEventId.get(), probe_sample_size); //we are interested in map task completion events only. So store //only those List <TaskCompletionEvent> recentMapEvents = new ArrayList<TaskCompletionEvent>(); for (int i = 0; i < t.length; i++) { if (t[i].isMap) { recentMapEvents.add(t[i]); } } fromEventId.set(fromEventId.get() + t.length); return recentMapEvents; }
Example 3
Source File: Counters.java From hadoop-gpu with Apache License 2.0 | 6 votes |
private static String getBlock(String str, char open, char close, IntWritable index) throws ParseException { StringBuilder split = new StringBuilder(); int next = StringUtils.findNext(str, open, StringUtils.ESCAPE_CHAR, index.get(), split); split.setLength(0); // clear the buffer if (next >= 0) { ++next; // move over '(' next = StringUtils.findNext(str, close, StringUtils.ESCAPE_CHAR, next, split); if (next >= 0) { ++next; // move over ')' index.set(next); return split.toString(); // found a block } else { throw new ParseException("Unexpected end of block", next); } } return null; // found nothing }
Example 4
Source File: TestJoinDatamerge.java From hadoop with Apache License 2.0 | 6 votes |
public void map(IntWritable key, TupleWritable val, Context context) throws IOException, InterruptedException { int k = key.get(); final String kvstr = "Unexpected tuple: " + stringify(key, val); assertTrue(kvstr, 0 == k % (srcs * srcs)); for (int i = 0; i < val.size(); ++i) { final int vali = ((IntWritable)val.get(i)).get(); assertTrue(kvstr, (vali - i) * srcs == 10 * k); } context.write(key, one); // If the user modifies the key or any of the values in the tuple, it // should not affect the rest of the join. key.set(-1); if (val.has(0)) { ((IntWritable)val.get(0)).set(0); } }
Example 5
Source File: TestRecordReaderValueIterator.java From hudi with Apache License 2.0 | 5 votes |
@Override public boolean next(IntWritable key, Text value) { if (currIndex >= entries.size()) { return false; } key.set(entries.get(currIndex).getLeft()); value.set(entries.get(currIndex).getRight()); currIndex++; return true; }
Example 6
Source File: TaskTracker.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void addFreeSlot(IntWritable numFreeSlots, int maxSlots, String caller) { synchronized (numFreeSlots) { numFreeSlots.set(numFreeSlots.get() + 1); assert (numFreeSlots.get() <= maxSlots); LOG.info(caller + " : current free slots : " + numFreeSlots.get()); numFreeSlots.notifyAll(); } }
Example 7
Source File: TestDatamerge.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private static Path[] writeSimpleSrc(Path testdir, Configuration conf, int srcs) throws IOException { SequenceFile.Writer out[] = null; Path[] src = new Path[srcs]; try { out = createWriters(testdir, conf, srcs, src); final int capacity = srcs * 2 + 1; IntWritable key = new IntWritable(); IntWritable val = new IntWritable(); for (int k = 0; k < capacity; ++k) { for (int i = 0; i < srcs; ++i) { key.set(k % srcs == 0 ? k * srcs : k * srcs + i); val.set(10 * k + i); out[i].append(key, val); if (i == k) { // add duplicate key out[i].append(key, val); } } } } finally { if (out != null) { for (int i = 0; i < srcs; ++i) { if (out[i] != null) out[i].close(); } } } return src; }
Example 8
Source File: TestDatamerge.java From RDFS with Apache License 2.0 | 5 votes |
private static Path[] writeSimpleSrc(Path testdir, Configuration conf, int srcs) throws IOException { SequenceFile.Writer out[] = null; Path[] src = new Path[srcs]; try { out = createWriters(testdir, conf, srcs, src); final int capacity = srcs * 2 + 1; IntWritable key = new IntWritable(); IntWritable val = new IntWritable(); for (int k = 0; k < capacity; ++k) { for (int i = 0; i < srcs; ++i) { key.set(k % srcs == 0 ? k * srcs : k * srcs + i); val.set(10 * k + i); out[i].append(key, val); if (i == k) { // add duplicate key out[i].append(key, val); } } } } finally { if (out != null) { for (int i = 0; i < srcs; ++i) { if (out[i] != null) out[i].close(); } } } return src; }
Example 9
Source File: KVDataGen.java From tez with Apache License 2.0 | 5 votes |
/** * Generate key value pair of given amount of keys. * * @param sorted whether data should be sorted by key * @param keys number of keys * @param repeatCount number of keys to be repeated * @return */ public static List<KVPair> generateTestDataOfKeySize(boolean sorted, int keys, int repeatCount) { List<KVPair> data = new LinkedList<KVPair>(); Random rnd = new Random(); KVPair kvp = null; for (int i = 0; i < keys; i++) { String keyStr = (sorted) ? ("key" + i) : (rnd.nextLong() + "key" + i); Text key = new Text(keyStr); IntWritable value = new IntWritable(i + repeatCount); kvp = new KVPair(key, value); data.add(kvp); if ((repeatCount > 0) && (i % 2 == 0)) { // Repeat this key for random number of times int count = rnd.nextInt(5); for (int j = 0; j < count; j++) { repeatCount++; value.set(i + rnd.nextInt()); kvp = new KVPair(key, value); data.add(kvp); } } } // If we need to generated repeated keys, try to add some repeated keys to the end of file also. if (repeatCount > 0 && kvp != null) { data.add(kvp); data.add(kvp); } return data; }
Example 10
Source File: ShuffleScheduler.java From tez with Apache License 2.0 | 5 votes |
private int incrementAndGetFailureAttempt(InputAttemptIdentifier srcAttempt) { int failures = 1; if (failureCounts.containsKey(srcAttempt)) { IntWritable x = failureCounts.get(srcAttempt); x.set(x.get() + 1); failures = x.get(); } else { failureCounts.put(srcAttempt, new IntWritable(1)); } return failures; }
Example 11
Source File: TestMiniMRLocalFS.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public boolean next(IntWritable key, Text value) throws IOException { if (index < past) { key.set(index); value.set(data[index]); index += 1; return true; } return false; }
Example 12
Source File: ProbabilisticTopicModelBaseUDTF.java From incubator-hivemall with Apache License 2.0 | 5 votes |
protected void forwardModel() throws HiveException { final IntWritable topicIdx = new IntWritable(); final Text word = new Text(); final FloatWritable score = new FloatWritable(); final Object[] forwardObjs = new Object[3]; forwardObjs[0] = topicIdx; forwardObjs[1] = word; forwardObjs[2] = score; for (int k = 0; k < topics; k++) { topicIdx.set(k); final SortedMap<Float, List<String>> topicWords = model.getTopicWords(k); if (topicWords == null) { continue; } for (Map.Entry<Float, List<String>> e : topicWords.entrySet()) { score.set(e.getKey().floatValue()); for (String v : e.getValue()) { word.set(v); forward(forwardObjs); } } } logger.info("Forwarded topic words each of " + topics + " topics"); }
Example 13
Source File: TaskTracker.java From RDFS with Apache License 2.0 | 5 votes |
/** Queries the job tracker for a set of outputs ready to be copied * @param fromEventId the first event ID we want to start from, this is * modified by the call to this method * @param jobClient the job tracker * @return a set of locations to copy outputs from * @throws IOException */ private List<TaskCompletionEvent> queryJobTracker(IntWritable fromEventId, JobID jobId, InterTrackerProtocol jobClient) throws IOException { if (jobClient == null) { List<TaskCompletionEvent> empty = Collections.emptyList(); return empty; } TaskCompletionEvent t[] = jobClient.getTaskCompletionEvents( jobId, fromEventId.get(), probe_sample_size); //we are interested in map task completion events only. So store //only those List <TaskCompletionEvent> recentMapEvents = new ArrayList<TaskCompletionEvent>(); for (int i = 0; i < t.length; i++) { if (t[i].isMap) { if (useTaskCompletionEventsStore) { // Try to get it from a store so that we don't have duplicate instances // in memory in the same JVM. This could happen if there are multiple TT's // and different reduce tasks from the same job are running in each TT. recentMapEvents.add(getTceFromStore(t[i])); } else { recentMapEvents.add(t[i]); } } } fromEventId.set(fromEventId.get() + t.length); return recentMapEvents; }
Example 14
Source File: CustomerFlowElement.java From WIFIProbe with Apache License 2.0 | 5 votes |
public void write(DataOutput dataOutput) throws IOException { Text text = new Text(wifiProb==null?"":wifiProb); text.write(dataOutput); IntWritable intWritable = new IntWritable(); intWritable.set(inNoOutWifi); intWritable.write(dataOutput); intWritable.set(inNoOutStore); intWritable.write(dataOutput); intWritable.set(outNoInWifi); intWritable.write(dataOutput); intWritable.set(outNoInStore); intWritable.write(dataOutput); intWritable.set(inAndOutWifi); intWritable.write(dataOutput); intWritable.set(inAndOutStore); intWritable.write(dataOutput); intWritable.set(stayInWifi); intWritable.write(dataOutput); intWritable.set(stayInStore); intWritable.write(dataOutput); DoubleWritable doubleWritable = new DoubleWritable(); doubleWritable.set(jumpRate); doubleWritable.write(dataOutput); doubleWritable.set(deepVisit); doubleWritable.write(dataOutput); doubleWritable.set(inStoreRate); doubleWritable.write(dataOutput); }
Example 15
Source File: CustomWritableWithChar.java From pxf with Apache License 2.0 | 5 votes |
@Override public void write(DataOutput paramDataOutput) throws IOException { IntWritable localIntWritable = new IntWritable(); localIntWritable.set(this.int1); localIntWritable.write(paramDataOutput); localIntWritable.set(this.char1); localIntWritable.write(paramDataOutput); }
Example 16
Source File: CustomWritableWithCircle.java From pxf with Apache License 2.0 | 5 votes |
@Override public void write(DataOutput paramDataOutput) throws IOException { IntWritable localIntWritable = new IntWritable(); localIntWritable.set(this.int1); localIntWritable.write(paramDataOutput); Text localText = new Text(); localText.set(this.circle); localText.write(paramDataOutput); }
Example 17
Source File: FactorizationMachineUDTF.java From incubator-hivemall with Apache License 2.0 | 5 votes |
private void forwardAsIntFeature(@Nonnull final FactorizationMachineModel model, final int factors) throws HiveException { final IntWritable f_idx = new IntWritable(0); final FloatWritable f_Wi = new FloatWritable(0.f); final FloatWritable[] f_Vi = HiveUtils.newFloatArray(factors, 0.f); final Object[] forwardObjs = new Object[3]; forwardObjs[0] = f_idx; forwardObjs[1] = f_Wi; forwardObjs[2] = null; // W0 f_idx.set(0); f_Wi.set(model.getW0()); // V0 is null forward(forwardObjs); // Wi, Vif (i starts from 1..P) forwardObjs[2] = Arrays.asList(f_Vi); for (int i = model.getMinIndex(), maxIdx = model.getMaxIndex(); i <= maxIdx; i++) { final float[] vi = model.getV(i, false); if (vi == null) { continue; } f_idx.set(i); // set Wi final float w = model.getW(i); f_Wi.set(w); // set Vif for (int f = 0; f < factors; f++) { float v = vi[f]; f_Vi[f].set(v); } forward(forwardObjs); } }
Example 18
Source File: OnlineMatrixFactorizationUDTF.java From incubator-hivemall with Apache License 2.0 | 4 votes |
@Override public void close() throws HiveException { if (model != null) { if (count == 0) { this.model = null; // help GC return; } if (iterations > 1) { runIterativeTraining(iterations); } final IntWritable idx = new IntWritable(); final FloatWritable[] Pu = HiveUtils.newFloatArray(factor, 0.f); final FloatWritable[] Qi = HiveUtils.newFloatArray(factor, 0.f); final FloatWritable Bu = new FloatWritable(); final FloatWritable Bi = new FloatWritable(); final Object[] forwardObj; if (updateMeanRating) { assert useBiasClause; float meanRating = model.getMeanRating(); FloatWritable mu = new FloatWritable(meanRating); forwardObj = new Object[] {idx, Pu, Qi, Bu, Bi, mu}; } else { if (useBiasClause) { forwardObj = new Object[] {idx, Pu, Qi, Bu, Bi}; } else { forwardObj = new Object[] {idx, Pu, Qi}; } } int numForwarded = 0; for (int i = model.getMinIndex(), maxIdx = model.getMaxIndex(); i <= maxIdx; i++) { idx.set(i); Rating[] userRatings = model.getUserVector(i); if (userRatings == null) { forwardObj[1] = null; } else { forwardObj[1] = Pu; copyTo(userRatings, Pu); } Rating[] itemRatings = model.getItemVector(i); if (itemRatings == null) { forwardObj[2] = null; } else { forwardObj[2] = Qi; copyTo(itemRatings, Qi); } if (useBiasClause) { Bu.set(model.getUserBias(i)); Bi.set(model.getItemBias(i)); } forward(forwardObj); numForwarded++; } this.model = null; // help GC logger.info("Forwarded the prediction model of " + numForwarded + " rows. [totalErrors=" + cvState.getTotalErrors() + ", lastLosses=" + cvState.getCumulativeLoss() + ", #trainingExamples=" + count + "]"); } }
Example 19
Source File: FieldAwareFactorizationMachineUDTF.java From incubator-hivemall with Apache License 2.0 | 4 votes |
@Override protected void forwardModel() throws HiveException { this._model = null; this._fieldList = null; this._sumVfX = null; final int factors = _factors; final IntWritable idx = new IntWritable(); final FloatWritable Wi = new FloatWritable(0.f); final FloatWritable[] Vi = HiveUtils.newFloatArray(factors, 0.f); final List<FloatWritable> ViObj = Arrays.asList(Vi); final Object[] forwardObjs = new Object[4]; String modelId = HadoopUtils.getUniqueTaskIdString(); forwardObjs[0] = new Text(modelId); forwardObjs[1] = idx; forwardObjs[2] = Wi; forwardObjs[3] = null; // Vi // W0 idx.set(0); Wi.set(_ffmModel.getW0()); forward(forwardObjs); final Entry entryW = new Entry(_ffmModel._buf, 1); final Entry entryV = new Entry(_ffmModel._buf, factors); final float[] Vf = new float[factors]; for (Int2LongMap.Entry e : Fastutil.fastIterable(_ffmModel._map)) { // set i final int i = e.getIntKey(); idx.set(i); final long offset = e.getLongValue(); if (Entry.isEntryW(i)) {// set Wi entryW.setOffset(offset); float w = entryW.getW(); if (w == 0.f) { continue; // skip w_i=0 } Wi.set(w); forwardObjs[2] = Wi; forwardObjs[3] = null; } else {// set Vif entryV.setOffset(offset); entryV.getV(Vf); for (int f = 0; f < factors; f++) { Vi[f].set(Vf[f]); } forwardObjs[2] = null; forwardObjs[3] = ViObj; } forward(forwardObjs); } }
Example 20
Source File: SleepJob.java From hadoop-book with Apache License 2.0 | 4 votes |
public RecordReader<IntWritable, IntWritable> getRecordReader( InputSplit ignored, JobConf conf, Reporter reporter) throws IOException { final int count = conf.getInt("sleep.job.map.sleep.count", 1); if (count < 0) { throw new IOException("Invalid map count: " + count); } final int redcount = conf.getInt("sleep.job.reduce.sleep.count", 1); if (redcount < 0) { throw new IOException("Invalid reduce count: " + redcount); } final int emitPerMapTask = (redcount * conf.getNumReduceTasks()); return new RecordReader<IntWritable, IntWritable>() { private int records = 0; private int emitCount = 0; public boolean next(IntWritable key, IntWritable value) throws IOException { key.set(emitCount); int emit = emitPerMapTask / count; if ((emitPerMapTask) % count > records) { ++emit; } emitCount += emit; value.set(emit); return records++ < count; } public IntWritable createKey() { return new IntWritable(); } public IntWritable createValue() { return new IntWritable(); } public long getPos() throws IOException { return records; } public void close() throws IOException { } public float getProgress() throws IOException { return records / ((float) count); } }; }