Java Code Examples for com.google.common.collect.Iterators#getOnlyElement()
The following examples show how to use
com.google.common.collect.Iterators#getOnlyElement() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HTMLArtifactTest.java From blueocean-plugin with MIT License | 6 votes |
@Test public void resolveArtifact() throws Exception { WorkflowJob p = j.createProject(WorkflowJob.class, "project"); URL resource = Resources.getResource(getClass(), "HTMLArtifactTest.jenkinsfile"); String jenkinsFile = Resources.toString(resource, Charsets.UTF_8); p.setDefinition(new CpsFlowDefinition(jenkinsFile, true)); p.save(); Run r = p.scheduleBuild2(0).waitForStart(); j.waitForCompletion(r); BluePipeline bluePipeline = (BluePipeline) BluePipelineFactory.resolve(p); BlueArtifactContainer artifacts = bluePipeline.getLatestRun().getArtifacts(); Assert.assertEquals(1, Iterators.size(artifacts.iterator())); BlueArtifact artifact = Iterators.getOnlyElement(artifacts.iterator()); Assert.assertEquals("/blue/rest/organizations/jenkins/pipelines/project/runs/1/artifacts/io.jenkins.blueocean.htmlpublisher.HTMLArtifact%253AMy%252520Cool%252520report/", artifact.getLink().getHref()); Assert.assertEquals("My Cool report", artifact.getName()); Assert.assertEquals("My Cool report", artifact.getPath()); Assert.assertNotNull(artifact.getUrl()); Assert.assertEquals(-1, artifact.getSize()); Assert.assertFalse(artifact.isDownloadable()); }
Example 2
Source File: TraversalPlanFactoryImpl.java From grakn with GNU Affero General Public License v3.0 | 6 votes |
private double getLogInstanceCount(Fragment fragment) { // set the weight of the node as a starting point based on log(number of this node) double logInstanceCount; //this is to make sure we don't end up with log(0) double shardLoadFactor = 0.25; if (fragment instanceof LabelFragment) { // only LabelFragment (corresponding to type vertices) can be sharded LabelFragment labelFragment = (LabelFragment)fragment; Label label = Iterators.getOnlyElement(labelFragment.labels().iterator()); //TODO: this manipulation is to retain the previous behaviour, we need to update the query planner //to remove the sharding threshold dependency and make this more granular double instanceCount = (conceptManager.getSchemaConcept(label).subs() .mapToLong(schemaConcept -> keyspaceStatistics.count(conceptManager, schemaConcept.label())) .sum() / shardingThreshold + shardLoadFactor ) * shardingThreshold; logInstanceCount = Math.log(instanceCount); } else { logInstanceCount = -1D; } return logInstanceCount; }
Example 3
Source File: DamGeneratorTest.java From SeaCloudsPlatform with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") private void testMonitoringConfigurationPolicy(Map<String, Object> monitoringConfigurationGroup, String policyId, String policyType) { assertTrue(monitoringConfigurationGroup.containsKey(DamGenerator.POLICIES)); List<Map<String, Object>> policies = (List<Map<String, Object>>) monitoringConfigurationGroup.get(DamGenerator.POLICIES); assertEquals(policies.size(), 1); Map<String, Object> policy = Iterators.getOnlyElement(policies.iterator()); Map<String, Object> policyValues = (Map<String, Object>) policy.get(policyId); assertTrue(policyValues.containsKey(DamGenerator.ID)); assertFalse(Strings.isBlank((String) policyValues.get(DamGenerator.ID))); assertTrue(policyValues.containsKey(DamGenerator.TYPE)); assertTrue(Strings.isBlank((String) policyValues.get(policyType))); assertTrue(monitoringConfigurationGroup.containsKey(DamGenerator.MEMBERS)); List<String> members = (List<String>) monitoringConfigurationGroup.get(DamGenerator.MEMBERS); assertEquals(members.size(), 1); assertEquals(Iterators.getOnlyElement(members.iterator()), DamGenerator.APPLICATION); }
Example 4
Source File: FunctionAssertions.java From presto with Apache License 2.0 | 5 votes |
public void assertCachedInstanceHasBoundedRetainedSize(String projection) { requireNonNull(projection, "projection is null"); Expression projectionExpression = createExpression(session, projection, metadata, INPUT_TYPES); RowExpression projectionRowExpression = toRowExpression(session, projectionExpression); PageProcessor processor = compiler.compilePageProcessor(Optional.empty(), ImmutableList.of(projectionRowExpression)).get(); // This is a heuristic to detect whether the retained size of cachedInstance is bounded. // * The test runs at least 1000 iterations. // * The test passes if max retained size doesn't refresh after // 4x the number of iterations when max was last updated. // * The test fails if retained size reaches 1MB. // Note that 1MB is arbitrarily chosen and may be increased if a function implementation // legitimately needs more. long maxRetainedSize = 0; int maxIterationCount = 0; for (int iterationCount = 0; iterationCount < Math.max(1000, maxIterationCount * 4); iterationCount++) { Iterator<Optional<Page>> output = processor.process( session.toConnectorSession(), new DriverYieldSignal(), newSimpleAggregatedMemoryContext().newLocalMemoryContext(PageProcessor.class.getSimpleName()), SOURCE_PAGE); // consume the iterator Iterators.getOnlyElement(output); long retainedSize = processor.getProjections().stream() .mapToLong(this::getRetainedSizeOfCachedInstance) .sum(); if (retainedSize > maxRetainedSize) { maxRetainedSize = retainedSize; maxIterationCount = iterationCount; } if (maxRetainedSize >= 1048576) { fail(format("The retained size of cached instance of function invocation is likely unbounded: %s", projection)); } } }
Example 5
Source File: TransactionImpl.java From fluo with Apache License 2.0 | 5 votes |
@Override public Iterator<Result> handleUnknown(CommitData cd, Iterator<Result> results) throws Exception { // the code for handing this is synchronous and needs to be handled in another thread pool // TODO - how do we do the above without return a CF? long commitTs = getStats().getCommitTs(); Result result = Iterators.getOnlyElement(results); Status ms = result.getStatus(); while (ms == Status.UNKNOWN) { // TODO async TxInfo txInfo = TxInfo.getTransactionInfo(env, cd.prow, cd.pcol, startTs); switch (txInfo.status) { case COMMITTED: if (txInfo.commitTs != commitTs) { throw new IllegalStateException( cd.prow + " " + cd.pcol + " " + txInfo.commitTs + "!=" + commitTs); } ms = Status.ACCEPTED; break; case LOCKED: // TODO async ConditionalMutation delLockMutation = result.getMutation(); ms = cd.cw.write(delLockMutation).getStatus(); break; default: ms = Status.REJECTED; } } Result newResult = new Result(ms, result.getMutation(), result.getTabletServer()); return Collections.singletonList(newResult).iterator(); }
Example 6
Source File: MergedTargetNode.java From buck with Apache License 2.0 | 5 votes |
private MergedTargetNode(UnflavoredBuildTarget buildTarget, ImmutableList<TargetNode<?>> nodes) { Preconditions.checkArgument(!nodes.isEmpty()); for (TargetNode<?> node : nodes) { Preconditions.checkArgument( node.getBuildTarget().getUnflavoredBuildTarget().equals(buildTarget)); } this.buildTarget = buildTarget; this.nodes = nodes; // All node with the same unflavored view must have the same rule type this.ruleType = Iterators.getOnlyElement(nodes.stream().map(TargetNode::getRuleType).distinct().iterator()); }
Example 7
Source File: DexFileMergerTest.java From bazel with Apache License 2.0 | 5 votes |
private void assertSingleDexOutput(int expectedClassCount, Path outputArchive, String dexFileName) throws IOException { try (ZipFile output = new ZipFile(outputArchive.toFile())) { ZipEntry entry = Iterators.getOnlyElement(Iterators.forEnumeration(output.entries())); assertThat(entry.getName()).isEqualTo(dexFileName); Dex dex = new Dex(output.getInputStream(entry)); assertThat(dex.classDefs()).hasSize(expectedClassCount); } }
Example 8
Source File: TitanEventualGraphTest.java From titan1withtp3.1 with Apache License 2.0 | 5 votes |
/** * Tests that timestamped edges can be updated */ @Test public void testTimestampedEdgeUpdates() { clopen(option(GraphDatabaseConfiguration.STORE_META_TIMESTAMPS, "edgestore"), true, option(GraphDatabaseConfiguration.STORE_META_TTL, "edgestore"), true); // Transaction 1: Init graph with two vertices and one edge TitanTransaction tx = graph.buildTransaction().commitTime(Instant.ofEpochSecond(100)).start(); TitanVertex v1 = tx.addVertex(); TitanVertex v2 = tx.addVertex(); Edge e = v1.addEdge("related",v2); e.property("time", 25); tx.commit(); tx = graph.buildTransaction().commitTime(Instant.ofEpochSecond(200)).start(); v1 = tx.getVertex(v1.longId()); assertNotNull(v1); e = Iterators.getOnlyElement(v1.edges(Direction.OUT, "related")); assertNotNull(e); assertEquals(Integer.valueOf(25), e.value("time")); e.property("time", 125); tx.commit(); tx = graph.buildTransaction().commitTime(Instant.ofEpochSecond(300)).start(); v1 = tx.getVertex(v1.longId()); assertNotNull(v1); e = Iterators.getOnlyElement(v1.edges(Direction.OUT, "related")); assertEquals(Integer.valueOf(125), e.value("time")); e.remove(); tx.commit(); }
Example 9
Source File: ValidateStreamingAggregations.java From presto with Apache License 2.0 | 5 votes |
@Override public Void visitAggregation(AggregationNode node, Void context) { if (node.getPreGroupedSymbols().isEmpty()) { return null; } StreamProperties properties = derivePropertiesRecursively(node.getSource(), metadata, session, types, typeAnalyzer); List<LocalProperty<Symbol>> desiredProperties = ImmutableList.of(new GroupingProperty<>(node.getPreGroupedSymbols())); Iterator<Optional<LocalProperty<Symbol>>> matchIterator = LocalProperties.match(properties.getLocalProperties(), desiredProperties).iterator(); Optional<LocalProperty<Symbol>> unsatisfiedRequirement = Iterators.getOnlyElement(matchIterator); checkArgument(unsatisfiedRequirement.isEmpty(), "Streaming aggregation with input not grouped on the grouping keys"); return null; }
Example 10
Source File: RedundantDeltaTest.java From emodb with Apache License 2.0 | 4 votes |
@Test public void testPartialCompactionWithRedundancy() throws Exception { InMemoryDataReaderDAO dataDao = new InMemoryDataReaderDAO(); InMemoryTableDAO tableDao = new InMemoryTableDAO(); DefaultDataStore store = new DefaultDataStore(new DatabusEventWriterRegistry(), tableDao, dataDao, dataDao, new NullSlowQueryLog(), new DiscardingExecutorService(), new InMemoryHistoryStore(), Optional.<URI>absent(), new InMemoryCompactionControlSource(), Conditions.alwaysFalse(), new DiscardingAuditWriter(), new InMemoryMapStore<>(), new MetricRegistry(), Clock.systemUTC()); TableOptions options = new TableOptionsBuilder().setPlacement("default").build(); store.createTable(TABLE, options, Collections.<String, Object>emptyMap(), newAudit("create table")); Table table = tableDao.get(TABLE); // Set the full consistency timestamp before the first delta dataDao.setFullConsistencyTimestamp(1408977300000L); // Create an update where the last four updates are redundant UUID unique0 = TimeUUIDs.uuidForTimeMillis(1408977310000L); UUID unique1 = TimeUUIDs.uuidForTimeMillis(1408977320000L); UUID redund0 = TimeUUIDs.uuidForTimeMillis(1408977330000L); UUID redund1 = TimeUUIDs.uuidForTimeMillis(1408977340000L); UUID redund2 = TimeUUIDs.uuidForTimeMillis(1408977350000L); UUID redund3 = TimeUUIDs.uuidForTimeMillis(1408977360000L); store.update(TABLE, KEY, unique0, Deltas.fromString("{\"name\":\"Bob\"}"), newAudit("submit"), WriteConsistency.STRONG); store.update(TABLE, KEY, unique1, Deltas.fromString("{\"name\":\"Ted\"}"), newAudit("resubmit"), WriteConsistency.STRONG); store.update(TABLE, KEY, redund0, Deltas.fromString("{\"name\":\"Ted\"}"), newAudit("resubmit"), WriteConsistency.STRONG); store.update(TABLE, KEY, redund1, Deltas.fromString("{\"name\":\"Ted\"}"), newAudit("resubmit"), WriteConsistency.STRONG); store.update(TABLE, KEY, redund2, Deltas.fromString("{\"name\":\"Ted\"}"), newAudit("resubmit"), WriteConsistency.STRONG); store.update(TABLE, KEY, redund3, Deltas.fromString("{\"name\":\"Ted\"}"), newAudit("resubmit"), WriteConsistency.STRONG); // Set the full consistency timestamp such that no compaction will take place dataDao.setFullConsistencyTimestamp(1408977300000L); store.compact(TABLE, KEY, null, ReadConsistency.STRONG, WriteConsistency.STRONG); Record record = dataDao.read(new Key(table, KEY), ReadConsistency.STRONG); assertFalse(record.passOneIterator().hasNext()); assertEquals(ImmutableList.of(unique0, unique1, redund0, redund1, redund2, redund3), toChangeIds(record.passTwoIterator())); // Set the full consistency timestamp so that only the first two redundant records are compacted dataDao.setFullConsistencyTimestamp(1408977345000L); store.compact(TABLE, KEY, null, ReadConsistency.STRONG, WriteConsistency.STRONG); record = dataDao.read(new Key(table, KEY), ReadConsistency.STRONG); Map.Entry<DeltaClusteringKey, Compaction> compactionEntry = Iterators.getOnlyElement(record.passOneIterator()); Compaction compaction = compactionEntry.getValue(); assertEquals(unique0, compaction.getFirst()); assertEquals(redund1, compaction.getCutoff()); assertEquals(unique1, compaction.getLastMutation()); assertEquals(ImmutableList.of(unique0, unique1, redund0, redund1, redund2, redund3, compactionEntry.getKey().getChangeId()), toChangeIds(record.passTwoIterator())); assertRedundantDelta(store, TABLE, KEY, redund0); assertRedundantDelta(store, TABLE, KEY, redund1); assertRedundantDelta(store, TABLE, KEY, redund2); assertRedundantDelta(store, TABLE, KEY, redund3); // Repeat again such that all deltas are compacted dataDao.setFullConsistencyTimestamp(TimeUUIDs.getTimeMillis(TimeUUIDs.getNext(compactionEntry.getKey().getChangeId())) + 2000L); store.compact(TABLE, KEY, null, ReadConsistency.STRONG, WriteConsistency.STRONG); record = dataDao.read(new Key(table, KEY), ReadConsistency.STRONG); // We still keep the last compaction around since the new owning compaction will be out of FCT. int numOfCompactions = Iterators.advance(record.passOneIterator(), 10); assertEquals(numOfCompactions, 2, "Expect 2 compactions. The more recent is the effective one, " + "but we defer the owned compaction until later"); UUID oldCompactionKey = compactionEntry.getKey().getChangeId(); Map.Entry<DeltaClusteringKey, Compaction> latestCompactionEntry = Iterators.getOnlyElement( Iterators.filter(record.passOneIterator(), input -> !input.getKey().getChangeId().equals(oldCompactionKey))); compaction = latestCompactionEntry.getValue(); assertEquals(unique0, compaction.getFirst()); assertEquals(redund3, compaction.getCutoff()); assertEquals(unique1, compaction.getLastMutation()); assertEquals(ImmutableList.of(redund2, redund3, oldCompactionKey, latestCompactionEntry.getKey().getChangeId()), toChangeIds(record.passTwoIterator())); assertRedundantDelta(store, TABLE, KEY, redund0); assertRedundantDelta(store, TABLE, KEY, redund1); assertRedundantDelta(store, TABLE, KEY, redund2); assertRedundantDelta(store, TABLE, KEY, redund3); }
Example 11
Source File: BeanConstraintValidatorFactory.java From cm_ext with Apache License 2.0 | 4 votes |
private String getBeanName(Class<?> key) { String[] beanNames = beanFactory.getBeanNamesForType(key); return Iterators.getOnlyElement(Iterators.forArray(beanNames)); }
Example 12
Source File: AbstractRelationModifier.java From SeaCloudsPlatform with Apache License 2.0 | 4 votes |
private String getNotHostRequirement(Map<String, Object> requirement) { checkState(requirement.size() == 1, "Not valid Requirement, too properties " + "for non-Host requirement. Expected a map with just an entry"); return Iterators.getOnlyElement(requirement.keySet().iterator()); }
Example 13
Source File: RedundantDeltaTest.java From emodb with Apache License 2.0 | 4 votes |
@Test public void testPartialCompactionWithNoRedundancy() throws Exception { InMemoryDataReaderDAO dataDao = new InMemoryDataReaderDAO(); InMemoryTableDAO tableDao = new InMemoryTableDAO(); DefaultDataStore store = new DefaultDataStore(new DatabusEventWriterRegistry(), tableDao, dataDao, dataDao, new NullSlowQueryLog(), new DiscardingExecutorService(), new InMemoryHistoryStore(), Optional.<URI>absent(), new InMemoryCompactionControlSource(), Conditions.alwaysFalse(), new DiscardingAuditWriter(), new InMemoryMapStore<>(), new MetricRegistry(), Clock.systemUTC()); TableOptions options = new TableOptionsBuilder().setPlacement("default").build(); store.createTable(TABLE, options, Collections.<String, Object>emptyMap(), newAudit("create table")); Table table = tableDao.get(TABLE); // Set the full consistency timestamp before the first delta dataDao.setFullConsistencyTimestamp(1408977300000L); // Create an update where there are no redundant deltas DeltaClusteringKey unique0 = new DeltaClusteringKey(TimeUUIDs.uuidForTimeMillis(1408977310000L), 1); DeltaClusteringKey unique1 = new DeltaClusteringKey(TimeUUIDs.uuidForTimeMillis(1408977320000L), 1); DeltaClusteringKey unique2 = new DeltaClusteringKey(TimeUUIDs.uuidForTimeMillis(1408977330000L), 1); DeltaClusteringKey unique3 = new DeltaClusteringKey(TimeUUIDs.uuidForTimeMillis(1408977340000L), 1); store.update(TABLE, KEY, unique0.getChangeId(), Deltas.fromString("{\"name\":\"Bob\"}"), newAudit("submit"), WriteConsistency.STRONG); store.update(TABLE, KEY, unique1.getChangeId(), Deltas.fromString("{\"name\":\"Carol\"}"), newAudit("resubmit"), WriteConsistency.STRONG); store.update(TABLE, KEY, unique2.getChangeId(), Deltas.fromString("{\"name\":\"Ted\"}"), newAudit("resubmit"), WriteConsistency.STRONG); store.update(TABLE, KEY, unique3.getChangeId(), Deltas.fromString("{\"name\":\"Alice\"}"), newAudit("resubmit"), WriteConsistency.STRONG); // Set the full consistency timestamp such that no compaction will take place dataDao.setFullConsistencyTimestamp(1408977300000L); store.compact(TABLE, KEY, null, ReadConsistency.STRONG, WriteConsistency.STRONG); Record record = dataDao.read(new Key(table, KEY), ReadConsistency.STRONG); assertFalse(record.passOneIterator().hasNext()); assertEquals(ImmutableList.of(unique0, unique1, unique2, unique3), toClusteringKeys(record.passTwoIterator())); // Set the full consistency timestamp so that only the first records are compacted dataDao.setFullConsistencyTimestamp(1408977325000L); store.compact(TABLE, KEY, null, ReadConsistency.STRONG, WriteConsistency.STRONG); record = dataDao.read(new Key(table, KEY), ReadConsistency.STRONG); Map.Entry<DeltaClusteringKey, Compaction> compactionEntry = Iterators.getOnlyElement(record.passOneIterator()); Compaction compaction = compactionEntry.getValue(); assertEquals(unique0.getChangeId(), compaction.getFirst()); assertEquals(unique1.getChangeId(), compaction.getCutoff()); assertEquals(unique1.getChangeId(), compaction.getLastMutation()); // Deltas will not get deleted since compaction is still out of FCT. For this test, we don't need the deltas to be deleted. assertEquals(toClusteringKeys(record.passTwoIterator()), ImmutableList.of(unique0, unique1, unique2, unique3, compactionEntry.getKey())); // Repeat again such that all deltas are compacted dataDao.setFullConsistencyTimestamp(TimeUUIDs.getTimeMillis(TimeUUIDs.getNext(compactionEntry.getKey().getChangeId())) + 2000L); store.compact(TABLE, KEY, null, ReadConsistency.STRONG, WriteConsistency.STRONG); record = dataDao.read(new Key(table, KEY), ReadConsistency.STRONG); // We still keep the last compaction around since the new owning compaction will be out of FCT. int numOfCompactions = Iterators.advance(record.passOneIterator(), 10); assertEquals(numOfCompactions, 2, "Expect 2 compactions. The more recent is the effective one, " + "but we defer the owned compaction until later"); DeltaClusteringKey oldCompactionKey = compactionEntry.getKey(); record = dataDao.read(new Key(table, KEY), ReadConsistency.STRONG); Map.Entry<DeltaClusteringKey, Compaction> latestCompactionEntry = Iterators.getOnlyElement( Iterators.filter(record.passOneIterator(), input -> !input.getKey().equals(oldCompactionKey))); compaction = latestCompactionEntry.getValue(); assertEquals(unique0.getChangeId(), compaction.getFirst()); assertEquals(unique3.getChangeId(), compaction.getCutoff()); assertEquals(unique3.getChangeId(), compaction.getLastMutation()); assertEquals(toClusteringKeys(record.passTwoIterator()), ImmutableList.of(unique2, unique3, oldCompactionKey, latestCompactionEntry.getKey()), "Expecting unique2, and unique3 deltas"); }
Example 14
Source File: GraqlSteps.java From grakn with GNU Affero General Public License v3.0 | 4 votes |
@Given("transaction is initialised") public void transaction_is_initialised() { session = Iterators.getOnlyElement(ConnectionSteps.sessions.iterator()); tx = session.transaction(Transaction.Type.WRITE); assertTrue(tx.isOpen()); }
Example 15
Source File: TransactionImpl.java From fluo with Apache License 2.0 | 4 votes |
@Override public Iterator<Result> handleUnknown(CommitData cd, Iterator<Result> results) throws Exception { Result result = Iterators.getOnlyElement(results); Status mutationStatus = result.getStatus(); // TODO convert this code to async while (mutationStatus == Status.UNKNOWN) { TxInfo txInfo = TxInfo.getTransactionInfo(env, cd.prow, cd.pcol, startTs); switch (txInfo.status) { case LOCKED: return Collections .singleton( new Result(Status.ACCEPTED, result.getMutation(), result.getTabletServer())) .iterator(); case ROLLED_BACK: return Collections .singleton( new Result(Status.REJECTED, result.getMutation(), result.getTabletServer())) .iterator(); case UNKNOWN: // TODO async Result newResult = cd.cw.write(result.getMutation()); mutationStatus = newResult.getStatus(); if (mutationStatus != Status.UNKNOWN) { return Collections.singleton(newResult).iterator(); } // TODO handle case were data other tx has lock break; case COMMITTED: default: throw new IllegalStateException( "unexpected tx state " + txInfo.status + " " + cd.prow + " " + cd.pcol); } } // TODO throw new IllegalStateException(); }
Example 16
Source File: TransactionImpl.java From fluo with Apache License 2.0 | 4 votes |
@Override public boolean processResults(CommitData cd, Iterator<Result> results) throws Exception { Result result = Iterators.getOnlyElement(results); return result.getStatus() == Status.ACCEPTED; }
Example 17
Source File: TransactionImpl.java From fluo with Apache License 2.0 | 4 votes |
@Override public boolean processResults(CommitData cd, Iterator<Result> results) throws Exception { Result result = Iterators.getOnlyElement(results); return result.getStatus() == Status.ACCEPTED; }