Java Code Examples for com.google.common.collect.HashMultiset#add()
The following examples show how to use
com.google.common.collect.HashMultiset#add() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractVisualPart.java From gef with Eclipse Public License 2.0 | 6 votes |
@Override public void attachAnchored(IVisualPart<? extends Node> anchored) { // determine the viewer before adding the anchored IViewer oldViewer = getViewer(); // register if we obtain a link to the viewer HashMultiset<IVisualPart<? extends Node>> newAnchoreds = HashMultiset .create(anchoreds); newAnchoreds.add(anchored); IViewer newViewer = determineViewer(getParent(), newAnchoreds); // unregister from old viewer in case we were registered (oldViewer != // null) and the viewer changes (newViewer != oldViewer) if (oldViewer != null && newViewer != oldViewer) { oldViewer.unsetAdapter(this); } // detach anchoreds (and fire change notifications) anchoreds.add(anchored); // if we obtain a link to the viewer then register at new viewer if (newViewer != null && newViewer != oldViewer) { newViewer.setAdapter(this, String.valueOf(System.identityHashCode(this))); } }
Example 2
Source File: VectorizerMain.java From ner-sequencelearning with Apache License 2.0 | 5 votes |
private String[] prepareNGramDictionary(QGram qgram) throws IOException { final HashMultiset<String> set = HashMultiset.create(); try (BufferedReader reader = new BufferedReader(new FileReader( inputFilePath))) { String line; while ((line = reader.readLine()) != null) { if (line.isEmpty()) { continue; } String[] split = SPLIT_PATTERN.split(line); String tkn = cleanToken(split[0]); Map<String, Integer> profile = qgram.getProfile(tkn); for (Map.Entry<String, Integer> entry : profile.entrySet()) { //noinspection ResultOfMethodCallIgnored set.add(entry.getKey(), entry.getValue()); } } } // do some naive word statistics cut-off return set.entrySet() .stream() .filter(e -> e.getCount() > MIN_CHAR_NGRAM_OCCURRENCE) .map(Multiset.Entry::getElement) .sorted() .toArray(String[]::new); }
Example 3
Source File: ClassesThatTestsExistTest.java From ArchUnit with Apache License 2.0 | 5 votes |
private Multiset<String> getSyntaxElements() { HashMultiset<String> result = HashMultiset.create(); for (Method method : ClassesThat.class.getMethods()) { result.add(method.getName()); } return result; }
Example 4
Source File: TestHardAffinityFragmentParallelizer.java From dremio-oss with Apache License 2.0 | 5 votes |
@Test public void multiNodeCluster2() throws Exception { final Wrapper wrapper = newWrapper(200, 1, 20, ImmutableList.of( new EndpointAffinity(N1_EP2, 0.15, true, 50), new EndpointAffinity(N2_EP2, 0.15, true, 50), new EndpointAffinity(N3_EP1, 0.10, true, 50), new EndpointAffinity(N4_EP2, 0.20, true, 50), new EndpointAffinity(N1_EP1, 0.20, true, 50) )); INSTANCE.parallelizeFragment(wrapper, newParameters(1, 5, 20), null); // Expect the fragment parallelization to be 20 because: // 1. the cost (200) is above the threshold (SLICE_TARGET_DEFAULT) (which gives 200/1=200 width) and // 2. Number of mandatory node assignments are 5 (current width 200 satisfies the requirement) // 3. max fragment width is 20 which limits the width assertEquals(20, wrapper.getWidth()); final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints(); assertEquals(20, assignedEps.size()); final HashMultiset<NodeEndpoint> counts = HashMultiset.create(); for(final NodeEndpoint ep : assignedEps) { counts.add(ep); } // Each node gets at max 5. assertTrue(counts.count(N1_EP2) <= 5); assertTrue(counts.count(N2_EP2) <= 5); assertTrue(counts.count(N3_EP1) <= 5); assertTrue(counts.count(N4_EP2) <= 5); assertTrue(counts.count(N1_EP1) <= 5); }
Example 5
Source File: TestHardAffinityFragmentParallelizer.java From dremio-oss with Apache License 2.0 | 5 votes |
@Test public void multiNodeClusterNonNormalizedAffinities() throws Exception { final Wrapper wrapper = newWrapper(2000, 1, 250, ImmutableList.of( new EndpointAffinity(N1_EP2, 15, true, 50), new EndpointAffinity(N2_EP2, 15, true, 50), new EndpointAffinity(N3_EP1, 10, true, 50), new EndpointAffinity(N4_EP2, 20, true, 50), new EndpointAffinity(N1_EP1, 20, true, 50) )); INSTANCE.parallelizeFragment(wrapper, newParameters(100, 20, 80), null); // Expect the fragment parallelization to be 20 because: // 1. the cost (2000) is above the threshold (SLICE_TARGET_DEFAULT) (which gives 2000/100=20 width) and // 2. Number of mandatory node assignments are 5 (current width 200 satisfies the requirement) // 3. max width per node is 20 which limits the width to 100, but existing width (20) is already less assertEquals(20, wrapper.getWidth()); final List<NodeEndpoint> assignedEps = wrapper.getAssignedEndpoints(); assertEquals(20, assignedEps.size()); final HashMultiset<NodeEndpoint> counts = HashMultiset.create(); for(final NodeEndpoint ep : assignedEps) { counts.add(ep); } // Each node gets at max 5. assertThat(counts.count(N1_EP2), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); assertThat(counts.count(N2_EP2), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); assertThat(counts.count(N3_EP1), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); assertThat(counts.count(N4_EP2), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); assertThat(counts.count(N1_EP1), CoreMatchers.allOf(greaterThan(1), lessThanOrEqualTo(5))); }
Example 6
Source File: LinkArrayWritable.java From wikireverse with MIT License | 5 votes |
public String getMostUsedArticleCasing() { HashMultiset<String> articleNames = HashMultiset.create(); String result; for (Writable writable: super.get()) { LinkWritable link = (LinkWritable)writable; articleNames.add(link.getArticle().toString()); } ImmutableMultiset<String> sorted = Multisets.copyHighestCountFirst(articleNames); result = (String)sorted.elementSet().toArray()[0]; return result; }
Example 7
Source File: TestCombineFileInputFormat.java From hadoop with Apache License 2.0 | 4 votes |
@Test public void testNodeInputSplit() throws IOException, InterruptedException { // Regression test for MAPREDUCE-4892. There are 2 nodes with all blocks on // both nodes. The grouping ensures that both nodes get splits instead of // just the first node DummyInputFormat inFormat = new DummyInputFormat(); int numBlocks = 12; long totLength = 0; long blockSize = 100; long maxSize = 200; long minSizeNode = 50; long minSizeRack = 50; String[] locations = { "h1", "h2" }; String[] racks = new String[0]; Path path = new Path("hdfs://file"); OneBlockInfo[] blocks = new OneBlockInfo[numBlocks]; for(int i=0; i<numBlocks; ++i) { blocks[i] = new OneBlockInfo(path, i*blockSize, blockSize, locations, racks); totLength += blockSize; } List<InputSplit> splits = new ArrayList<InputSplit>(); HashMap<String, Set<String>> rackToNodes = new HashMap<String, Set<String>>(); HashMap<String, List<OneBlockInfo>> rackToBlocks = new HashMap<String, List<OneBlockInfo>>(); HashMap<OneBlockInfo, String[]> blockToNodes = new HashMap<OneBlockInfo, String[]>(); HashMap<String, Set<OneBlockInfo>> nodeToBlocks = new HashMap<String, Set<OneBlockInfo>>(); OneFileInfo.populateBlockInfo(blocks, rackToBlocks, blockToNodes, nodeToBlocks, rackToNodes); inFormat.createSplits(nodeToBlocks, blockToNodes, rackToBlocks, totLength, maxSize, minSizeNode, minSizeRack, splits); int expectedSplitCount = (int)(totLength/maxSize); assertEquals(expectedSplitCount, splits.size()); HashMultiset<String> nodeSplits = HashMultiset.create(); for(int i=0; i<expectedSplitCount; ++i) { InputSplit inSplit = splits.get(i); assertEquals(maxSize, inSplit.getLength()); assertEquals(1, inSplit.getLocations().length); nodeSplits.add(inSplit.getLocations()[0]); } assertEquals(3, nodeSplits.count(locations[0])); assertEquals(3, nodeSplits.count(locations[1])); }
Example 8
Source File: TestCombineFileInputFormat.java From big-c with Apache License 2.0 | 4 votes |
@Test public void testNodeInputSplit() throws IOException, InterruptedException { // Regression test for MAPREDUCE-4892. There are 2 nodes with all blocks on // both nodes. The grouping ensures that both nodes get splits instead of // just the first node DummyInputFormat inFormat = new DummyInputFormat(); int numBlocks = 12; long totLength = 0; long blockSize = 100; long maxSize = 200; long minSizeNode = 50; long minSizeRack = 50; String[] locations = { "h1", "h2" }; String[] racks = new String[0]; Path path = new Path("hdfs://file"); OneBlockInfo[] blocks = new OneBlockInfo[numBlocks]; for(int i=0; i<numBlocks; ++i) { blocks[i] = new OneBlockInfo(path, i*blockSize, blockSize, locations, racks); totLength += blockSize; } List<InputSplit> splits = new ArrayList<InputSplit>(); HashMap<String, Set<String>> rackToNodes = new HashMap<String, Set<String>>(); HashMap<String, List<OneBlockInfo>> rackToBlocks = new HashMap<String, List<OneBlockInfo>>(); HashMap<OneBlockInfo, String[]> blockToNodes = new HashMap<OneBlockInfo, String[]>(); HashMap<String, Set<OneBlockInfo>> nodeToBlocks = new HashMap<String, Set<OneBlockInfo>>(); OneFileInfo.populateBlockInfo(blocks, rackToBlocks, blockToNodes, nodeToBlocks, rackToNodes); inFormat.createSplits(nodeToBlocks, blockToNodes, rackToBlocks, totLength, maxSize, minSizeNode, minSizeRack, splits); int expectedSplitCount = (int)(totLength/maxSize); assertEquals(expectedSplitCount, splits.size()); HashMultiset<String> nodeSplits = HashMultiset.create(); for(int i=0; i<expectedSplitCount; ++i) { InputSplit inSplit = splits.get(i); assertEquals(maxSize, inSplit.getLength()); assertEquals(1, inSplit.getLocations().length); nodeSplits.add(inSplit.getLocations()[0]); } assertEquals(3, nodeSplits.count(locations[0])); assertEquals(3, nodeSplits.count(locations[1])); }