Java Code Examples for java.util.HashMap#merge()
The following examples show how to use
java.util.HashMap#merge() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DocWordSplitCount.java From Alink with Apache License 2.0 | 6 votes |
public void eval(String content) { if (null == content || content.length() == 0) { return; } String[] words = content.split(this.delimiter); HashMap <String, Long> map = new HashMap <>(0); for (String word : words) { if (word.length() > 0) { map.merge(word, 1L, Long::sum); } } for (Map.Entry <String, Long> entry : map.entrySet()) { collect(Row.of(entry.getKey(), entry.getValue())); } }
Example 2
Source File: Lightfield.java From cineast with MIT License | 6 votes |
/** * This method represents the last step that's executed when processing a query. A list of partial-results (DistanceElements) returned by * the lookup stage is processed based on some internal method and finally converted to a list of ScoreElements. The filtered list of * ScoreElements is returned by the feature module during retrieval. * * @param partialResults List of partial results returned by the lookup stage. * @param qc A ReadableQueryConfig object that contains query-related configuration parameters. * @return List of final results. Is supposed to be de-duplicated and the number of items should not exceed the number of items per module. */ @Override protected List<ScoreElement> postprocessQuery(List<SegmentDistanceElement> partialResults, ReadableQueryConfig qc) { /* Perform search for each extracted feature and adjust scores. */ HashMap<String,DistanceElement> map = new HashMap<>(); for (DistanceElement result : partialResults) { map.merge(result.getId(), result, (v1, v2) -> { if (v1.getDistance() < v2.getDistance()) { return v1; } else { return v2; } }); } /* Add results to list and return list of results. */ final CorrespondenceFunction correspondence = qc.getCorrespondenceFunction().orElse(this.correspondence); return ScoreElement.filterMaximumScores(map.entrySet().stream().map((e) -> e.getValue().toScore(correspondence))); }
Example 3
Source File: LatencyMetricGroupSummaryCollector.java From cassandra-exporter with Apache License 2.0 | 5 votes |
@Override public MBeanGroupMetricFamilyCollector merge(final MBeanGroupMetricFamilyCollector rawOther) { if (!(rawOther instanceof LatencyMetricGroupSummaryCollector)) { throw new IllegalStateException(); } final LatencyMetricGroupSummaryCollector other = (LatencyMetricGroupSummaryCollector) rawOther; final HashMap<Labels, LatencyMetricGroup> newLatencyMetricGroups = new HashMap<>(latencyMetricGroups); for (final Map.Entry<Labels, LatencyMetricGroup> group : other.latencyMetricGroups.entrySet()) { newLatencyMetricGroups.merge(group.getKey(), group.getValue(), LatencyMetricGroup::merge); } return new LatencyMetricGroupSummaryCollector(name, help, newLatencyMetricGroups); }
Example 4
Source File: soln.java From HackerRank-solutions with MIT License | 5 votes |
public static void main(String[] args) { HashMap<Integer, Integer> map = new HashMap<>(); ArrayDeque<Integer> deque = new ArrayDeque<>(); Scanner scan = new Scanner(System.in); int n = scan.nextInt(); int m = scan.nextInt(); int max = 0; for (int i = 0; i < n; i++) { /* Remove old value (if necessary) */ if (i >= m) { int old = deque.removeFirst(); if (map.get(old) == 1) { map.remove(old); } else { map.merge(old, -1, Integer::sum); } } /* Add new value */ int num = scan.nextInt(); deque.addLast(num); map.merge(num, 1, Integer::sum); max = Math.max(max, map.size()); /* If all integers are unique, we have found our largest possible answer, so we can break out of loop */ if (max == m) { break; } } scan.close(); System.out.println(max); }
Example 5
Source File: DocCountVectorizerModelMapper.java From Alink with Apache License 2.0 | 5 votes |
public static SparseVector predictSparseVector(String content, double minTF, HashMap<String, Tuple2<Integer, Double>> wordIdWeight, FeatureType featureType, int featureNum) { HashMap<String, Integer> wordCount = new HashMap<>(0); String[] tokens = content.split(NLPConstant.WORD_DELIMITER); double minTermCount = minTF >= 1.0 ? minTF : minTF * tokens.length; double tokenRatio = 1.0 / tokens.length; for (String token : tokens) { if (wordIdWeight.containsKey(token)) { wordCount.merge(token, 1, Integer::sum); } } int[] indexes = new int[wordCount.size()]; double[] values = new double[indexes.length]; int pos = 0; for (Map.Entry<String, Integer> entry : wordCount.entrySet()) { double count = entry.getValue(); if (count >= minTermCount) { Tuple2<Integer, Double> idWeight = wordIdWeight.get(entry.getKey()); indexes[pos] = idWeight.f0; values[pos++] = featureType.featureValueFunc.apply(idWeight.f1, count, tokenRatio); } } return new SparseVector(featureNum, Arrays.copyOf(indexes, pos), Arrays.copyOf(values, pos)); }
Example 6
Source File: DocHashCountVectorizerModelMapper.java From Alink with Apache License 2.0 | 5 votes |
@Override protected Object predictResult(Object input) { if (null == input) { return null; } HashMap<Integer, Integer> wordCount = new HashMap<>(0); String content = (String) input; String[] tokens = content.split(NLPConstant.WORD_DELIMITER); double minTermCount = model.minTF >= 1.0 ? model.minTF : model.minTF * tokens.length; double tokenRatio = 1.0 / tokens.length; for (String token : tokens) { int hashValue = Math.abs(HASH.hashUnencodedChars(token).asInt()); int index = Math.floorMod(hashValue, model.numFeatures); if(model.idfMap.containsKey(index)) { wordCount.merge(index, 1, Integer::sum); } } int[] indexes = new int[wordCount.size()]; double[] values = new double[indexes.length]; int pos = 0; for (Map.Entry<Integer, Integer> entry : wordCount.entrySet()) { double count = entry.getValue(); if (count >= minTermCount) { indexes[pos] = entry.getKey(); values[pos++] = featureType.featureValueFunc.apply(model.idfMap.get(entry.getKey()), count, tokenRatio); } } return new SparseVector(model.numFeatures, Arrays.copyOf(indexes, pos), Arrays.copyOf(values, pos)); }
Example 7
Source File: ResultsCommand.java From commons-rng with Apache License 2.0 | 5 votes |
/** * Gets the systematic failures (tests that fail in every test result). * * @param results Results. * @return the systematic failures */ private static List<String> getSystematicFailures(List<TestResult> results) { final HashMap<String, Integer> map = new HashMap<>(); for (final TestResult result : results) { // Ignore partial results if (!result.isComplete()) { continue; } // Some named tests can fail more than once on different statistics. // For example TestU01 BigCrush LongestHeadRun can output in the summary: // 86 LongestHeadRun, r = 0 eps // 86 LongestHeadRun, r = 0 1 - eps1 // This will be counted as 2 failed tests. For the purpose of systematic // failures the name of the test is the same and should be counted once. final HashSet<String> unique = new HashSet<>(result.getFailedTests()); for (String test : unique) { map.merge(test, 1, (i, j) -> i + j); } } final int completeCount = (int) results.stream().filter(TestResult::isComplete).count(); final List<String> list = map.entrySet().stream() .filter(e -> e.getValue() == completeCount) .map(Entry::getKey) .collect(Collectors.toCollection( (Supplier<List<String>>) ArrayList::new)); // Special case for PractRand. Add the maximum RNG output length before failure. // This is because some PractRand tests may not be counted as systematic failures // as they have not been run to the same output length due to earlier failure of // another test. final int max = getMaxLengthExponent(results); if (max != 0) { list.add(bytesToString(max)); } return list; }
Example 8
Source File: CENS.java From cineast with MIT License | 5 votes |
/** * This method represents the last step that's executed when processing a query. A list of partial-results (DistanceElements) returned by * the lookup stage is processed based on some internal method and finally converted to a list of ScoreElements. The filtered list of * ScoreElements is returned by the feature module during retrieval. * * @param partialResults List of partial results returned by the lookup stage. * @param qc A ReadableQueryConfig object that contains query-related configuration parameters. * @return List of final results. Is supposed to be de-duplicated and the number of items should not exceed the number of items per module. */ @Override protected List<ScoreElement> postprocessQuery(List<SegmentDistanceElement> partialResults, ReadableQueryConfig qc) { /* Prepare map to build a unique set of results. */ final HashMap<String,DistanceElement> map = new HashMap<>(); for (DistanceElement hit : partialResults) { map.merge(hit.getId(), hit, (o, n) -> o.getDistance() > n.getDistance() ? n : o); } /* Prepare final list of results. */ final CorrespondenceFunction correspondence = qc.getCorrespondenceFunction().orElse(this.correspondence); return ScoreElement.filterMaximumScores(map.entrySet().stream().map((e) -> e.getValue().toScore(correspondence))); }
Example 9
Source File: AudioFingerprint.java From cineast with MIT License | 5 votes |
/** * This method represents the last step that's executed when processing a query. A list of partial-results (DistanceElements) returned by * the lookup stage is processed based on some internal method and finally converted to a list of ScoreElements. The filtered list of * ScoreElements is returned by the feature module during retrieval. * * @param partialResults List of partial results returned by the lookup stage. * @param qc A ReadableQueryConfig object that contains query-related configuration parameters. * @return List of final results. Is supposed to be de-duplicated and the number of items should not exceed the number of items per module. */ @Override protected List<ScoreElement> postprocessQuery(List<SegmentDistanceElement> partialResults, ReadableQueryConfig qc) { /* Prepare empty list of results. */ final ArrayList<ScoreElement> results = new ArrayList<>(); final HashMap<String, DistanceElement> map = new HashMap<>(); /* Merge into map for final results; select the minimum distance. */ for (DistanceElement result : partialResults) { map.merge(result.getId(), result, (d1, d2)-> { if (d1.getDistance() > d2.getDistance()) { return d2; } else { return d1; } }); } /* Return immediately if no partial results are available. */ if (map.isEmpty()) { return results; } /* Prepare final results. */ final CorrespondenceFunction fkt = qc.getCorrespondenceFunction().orElse(this.correspondence); map.forEach((key, value) -> results.add(value.toScore(fkt))); return ScoreElement.filterMaximumScores(results.stream()); }
Example 10
Source File: AverageHPCP.java From cineast with MIT License | 5 votes |
/** * This method represents the last step that's executed when processing a query. A list of partial-results (DistanceElements) returned by * the lookup stage is processed based on some internal method and finally converted to a list of ScoreElements. The filtered list of * ScoreElements is returned by the feature module during retrieval. * * @param partialResults List of partial results returned by the lookup stage. * @param qc A ReadableQueryConfig object that contains query-related configuration parameters. * @return List of final results. Is supposed to be de-duplicated and the number of items should not exceed the number of items per module. */ @Override protected List<ScoreElement> postprocessQuery(List<SegmentDistanceElement> partialResults, ReadableQueryConfig qc) { /* Prepare helper data-structures. */ final HashMap<String,DistanceElement> map = new HashMap<>(); /* Set QueryConfig and extract correspondence function. */ for (DistanceElement hit : partialResults) { map.merge(hit.getId(), hit, (o,n) -> new SegmentDistanceElement(hit.getId(), (o.getDistance() + n.getDistance())/2)); } /* Prepare final result-set. */ final CorrespondenceFunction fkt = qc.getCorrespondenceFunction().orElse(this.correspondence); return ScoreElement.filterMaximumScores(map.entrySet().stream().map(e -> e.getValue().toScore(fkt))); }
Example 11
Source File: HPCPShingle.java From cineast with MIT License | 5 votes |
/** * This method represents the last step that's executed when processing a query. A list of partial-results (DistanceElements) returned by * the lookup stage is processed based on some internal method and finally converted to a list of ScoreElements. The filtered list of * ScoreElements is returned by the feature module during retrieval. * * @param partialResults List of partial results returned by the lookup stage. * @param qc A ReadableQueryConfig object that contains query-related configuration parameters. * @return List of final results. Is supposed to be de-duplicated and the number of items should not exceed the number of items per module. */ @Override protected List<ScoreElement> postprocessQuery(List<SegmentDistanceElement> partialResults, ReadableQueryConfig qc) { /* Prepare helper data-structures. */ final HashMap<String,DistanceElement> map = new HashMap<>(); for (DistanceElement hit : partialResults) { if (hit.getDistance() <= this.distanceThreshold) { map.merge(hit.getId(), hit, (o, n) -> new SegmentDistanceElement(o.getId(), (o.getDistance() + n.getDistance())/2)); } } /* Prepare final result-set. */ final CorrespondenceFunction fkt = qc.getCorrespondenceFunction().orElse(this.correspondence); return ScoreElement.filterMaximumScores(map.entrySet().stream().map(e -> e.getValue().toScore(fkt))); }