Java Code Examples for org.apache.hadoop.io.FloatWritable#get()
The following examples show how to use
org.apache.hadoop.io.FloatWritable#get() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NodeDumper.java From anthelion with Apache License 2.0 | 6 votes |
/** * Flips and collects the url and numeric sort value. */ public void reduce(FloatWritable key, Iterator<Text> values, OutputCollector<Text, FloatWritable> output, Reporter reporter) throws IOException { // take the negative of the negative to get original value, sometimes 0 // value are a little weird float val = key.get(); FloatWritable number = new FloatWritable(val == 0 ? 0 : -val); long numCollected = 0; // collect all values, this time with the url as key while (values.hasNext() && (numCollected < topn)) { Text url = WritableUtils.clone(values.next(), conf); output.collect(url, number); numCollected++; } }
Example 2
Source File: NodeDumper.java From nutch-htmlunit with Apache License 2.0 | 6 votes |
/** * Flips and collects the url and numeric sort value. */ public void reduce(FloatWritable key, Iterator<Text> values, OutputCollector<Text, FloatWritable> output, Reporter reporter) throws IOException { // take the negative of the negative to get original value, sometimes 0 // value are a little weird float val = key.get(); FloatWritable number = new FloatWritable(val == 0 ? 0 : -val); long numCollected = 0; // collect all values, this time with the url as key while (values.hasNext() && (numCollected < topn)) { Text url = WritableUtils.clone(values.next(), conf); output.collect(url, number); numCollected++; } }
Example 3
Source File: ArgminKLDistanceUDAF.java From incubator-hivemall with Apache License 2.0 | 5 votes |
public boolean iterate(FloatWritable mean, FloatWritable covar) { if (mean == null || covar == null) { return true; } if (partial == null) { this.partial = new PartialResult(); } final float covar_f = covar.get(); if (covar_f == 0.f) {// avoid null division return true; } partial.sum_mean_div_covar += (mean.get() / covar_f); partial.sum_inv_covar += (1.f / covar_f); return true; }
Example 4
Source File: ToSparseFeaturesUDF.java From incubator-hivemall with Apache License 2.0 | 5 votes |
@Nullable public List<String> evaluate(@Nullable final List<FloatWritable> features, @Nullable String biasName) { if (features == null) { return null; } final int size = features.size(); if (size == 0) { return Collections.emptyList(); } final StringBuilder buf = new StringBuilder(64); final ArrayList<String> list = new ArrayList<String>(size); for (int i = 0; i < size; i++) { final FloatWritable o = features.get(i); if (o != null) { final String s; final float v = o.get(); if (biasName != null) { s = buf.append(biasName).append(':').append(v).toString(); } else { s = buf.append(i).append(':').append(v).toString(); } list.add(s); StringUtils.clear(buf); } } return list; }
Example 5
Source File: MapGetSumUDF.java From incubator-hivemall with Apache License 2.0 | 5 votes |
public DoubleWritable evaluate(Map<IntWritable, FloatWritable> map, List<IntWritable> keys) { double sum = 0d; for (IntWritable k : keys) { FloatWritable v = map.get(k); if (v != null) { sum += (double) v.get(); } } return val(sum); }
Example 6
Source File: thetaREDUCE.java From MLHadoop with Apache License 2.0 | 5 votes |
public void reduce(Text key, Iterable<FloatWritable> values, Context context) throws IOException, InterruptedException{ float sum=0; int count=0; for(FloatWritable value:values){ sum+=value.get(); count++; } context.write(key, new FloatWritable(sum/count)); }
Example 7
Source File: thetaREDUCE.java From MLHadoop with Apache License 2.0 | 5 votes |
public void reduce(Text key, Iterable<FloatWritable> values, Context context) throws IOException, InterruptedException{ float sum=0; int count=0; for(FloatWritable value:values){ sum+=value.get(); count++; } context.write(key, new FloatWritable(sum/count)); }
Example 8
Source File: CustomWritable.java From pxf with Apache License 2.0 | 4 votes |
@Override public void readFields(DataInput in) throws IOException { // 0. Timestamp Text tms_text = new Text(tms); tms_text.readFields(in); tms = tms_text.toString(); // 1. integers IntWritable intw = new IntWritable(); for (int i = 0; i < num.length; i++) { intw.readFields(in); num[i] = intw.get(); } intw.readFields(in); int1 = intw.get(); intw.readFields(in); int2 = intw.get(); // 2. strings Text txt = new Text(); for (int i = 0; i < strings.length; i++) { txt.readFields(in); strings[i] = txt.toString(); } txt.readFields(in); st1 = txt.toString(); // 3. doubles DoubleWritable dw = new DoubleWritable(); for (int i = 0; i < dubs.length; i++) { dw.readFields(in); dubs[i] = dw.get(); } dw.readFields(in); db = dw.get(); // 4. floats FloatWritable fw = new FloatWritable(); for (int i = 0; i < fts.length; i++) { fw.readFields(in); fts[i] = fw.get(); } fw.readFields(in); ft = fw.get(); // 5. longs LongWritable lw = new LongWritable(); for (int i = 0; i < lngs.length; i++) { lw.readFields(in); lngs[i] = lw.get(); } lw.readFields(in); lng = lw.get(); // 6. booleans BooleanWritable bw = new BooleanWritable(); for (int i = 0; i < bools.length; ++i) { bw.readFields(in); bools[i] = bw.get(); } bw.readFields(in); bool = bw.get(); // 7. shorts ShortWritable sw = new ShortWritable(); for (int i = 0; i < shrts.length; ++i) { sw.readFields(in); shrts[i] = sw.get(); } sw.readFields(in); shrt = sw.get(); // 8. bytes BytesWritable btsw = new BytesWritable(); btsw.readFields(in); byte[] buffer = btsw.getBytes(); bts = new byte[btsw.getLength()]; for (int i = 0; i < btsw.getLength(); i++) { bts[i] = buffer[i]; } }
Example 9
Source File: PipesNonJavaInputFormat.java From hadoop with Apache License 2.0 | 4 votes |
public synchronized boolean next(FloatWritable key, NullWritable value) throws IOException { progress = key.get(); return true; }
Example 10
Source File: PipesNonJavaInputFormat.java From big-c with Apache License 2.0 | 4 votes |
public synchronized boolean next(FloatWritable key, NullWritable value) throws IOException { progress = key.get(); return true; }
Example 11
Source File: AdaptiveFetchSchedule.java From anthelion with Apache License 2.0 | 4 votes |
@Override public CrawlDatum setFetchSchedule(Text url, CrawlDatum datum, long prevFetchTime, long prevModifiedTime, long fetchTime, long modifiedTime, int state) { super.setFetchSchedule(url, datum, prevFetchTime, prevModifiedTime, fetchTime, modifiedTime, state); float interval = datum.getFetchInterval(); long refTime = fetchTime; if (datum.getMetaData().containsKey(Nutch.WRITABLE_FIXED_INTERVAL_KEY)) { // Is fetch interval preset in CrawlDatum MD? Then use preset interval FloatWritable customIntervalWritable= (FloatWritable)(datum.getMetaData().get(Nutch.WRITABLE_FIXED_INTERVAL_KEY)); interval = customIntervalWritable.get(); } else { if (modifiedTime <= 0) modifiedTime = fetchTime; switch (state) { case FetchSchedule.STATUS_MODIFIED: interval *= (1.0f - DEC_RATE); break; case FetchSchedule.STATUS_NOTMODIFIED: interval *= (1.0f + INC_RATE); break; case FetchSchedule.STATUS_UNKNOWN: break; } if (SYNC_DELTA) { // try to synchronize with the time of change long delta = (fetchTime - modifiedTime) / 1000L; if (delta > interval) interval = delta; refTime = fetchTime - Math.round(delta * SYNC_DELTA_RATE * 1000); } if (interval < MIN_INTERVAL) { interval = MIN_INTERVAL; } else if (interval > MAX_INTERVAL) { interval = MAX_INTERVAL; } } datum.setFetchInterval(interval); datum.setFetchTime(refTime + Math.round(interval * 1000.0)); datum.setModifiedTime(modifiedTime); return datum; }
Example 12
Source File: PipesNonJavaInputFormat.java From RDFS with Apache License 2.0 | 4 votes |
public synchronized boolean next(FloatWritable key, NullWritable value) throws IOException { progress = key.get(); return true; }
Example 13
Source File: AdaptiveFetchSchedule.java From nutch-htmlunit with Apache License 2.0 | 4 votes |
@Override public CrawlDatum setFetchSchedule(Text url, CrawlDatum datum, long prevFetchTime, long prevModifiedTime, long fetchTime, long modifiedTime, int state) { super.setFetchSchedule(url, datum, prevFetchTime, prevModifiedTime, fetchTime, modifiedTime, state); float interval = datum.getFetchInterval(); long refTime = fetchTime; // https://issues.apache.org/jira/browse/NUTCH-1430 interval = (interval == 0) ? defaultInterval : interval; if (datum.getMetaData().containsKey(Nutch.WRITABLE_FIXED_INTERVAL_KEY)) { // Is fetch interval preset in CrawlDatum MD? Then use preset interval FloatWritable customIntervalWritable= (FloatWritable)(datum.getMetaData().get(Nutch.WRITABLE_FIXED_INTERVAL_KEY)); interval = customIntervalWritable.get(); } else { if (modifiedTime <= 0) modifiedTime = fetchTime; switch (state) { case FetchSchedule.STATUS_MODIFIED: interval *= (1.0f - DEC_RATE); break; case FetchSchedule.STATUS_NOTMODIFIED: interval *= (1.0f + INC_RATE); break; case FetchSchedule.STATUS_UNKNOWN: break; } if (SYNC_DELTA) { // try to synchronize with the time of change long delta = (fetchTime - modifiedTime) / 1000L; if (delta > interval) interval = delta; refTime = fetchTime - Math.round(delta * SYNC_DELTA_RATE * 1000); } if (interval < MIN_INTERVAL) { interval = MIN_INTERVAL; } else if (interval > MAX_INTERVAL) { interval = MAX_INTERVAL; } } datum.setFetchInterval(interval); datum.setFetchTime(refTime + Math.round(interval * 1000.0)); datum.setModifiedTime(modifiedTime); return datum; }
Example 14
Source File: PipesNonJavaInputFormat.java From hadoop-gpu with Apache License 2.0 | 4 votes |
public synchronized boolean next(FloatWritable key, NullWritable value) throws IOException { progress = key.get(); return true; }