Java Code Examples for java.util.Map.Entry#getValue()
The following examples show how to use
java.util.Map.Entry#getValue() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MarshallerServiceClassRegistry.java From dawnsci with Eclipse Public License 1.0 | 6 votes |
private void populateClassMap(IClassRegistry registry) throws ClassRegistryDuplicateIdException { Map<String, Class<?>> registryIdToClassMap = registry.getIdToClassMap(); for (Entry<String, Class<?>> entry : registryIdToClassMap.entrySet()) { Class<?> prevVal = null; prevVal = idToClassMap.put(entry.getKey(), entry.getValue()); // prevVal stores previous value, if there is one if ((prevVal != null) && (prevVal != entry.getValue())){ String message = "Class id clash with registry: " + registry.toString() + ". Previous value for id " + entry.getKey() + " is " + prevVal + ". Overwriting value is: " + entry.getValue() + "."; throw new ClassRegistryDuplicateIdException(message); } } }
Example 2
Source File: ValidatorUtils.java From lams with GNU General Public License v2.0 | 6 votes |
/** * Makes a deep copy of a <code>FastHashMap</code> if the values * are <code>Msg</code>, <code>Arg</code>, * or <code>Var</code>. Otherwise it is a shallow copy. * * @param map <code>FastHashMap</code> to copy. * @return FastHashMap A copy of the <code>FastHashMap</code> that was * passed in. * @deprecated This method is not part of Validator's public API. Validator * will use it internally until FastHashMap references are removed. Use * copyMap() instead. */ @Deprecated public static FastHashMap copyFastHashMap(FastHashMap map) { FastHashMap results = new FastHashMap(); @SuppressWarnings("unchecked") // FastHashMap is not generic Iterator<Entry<String, ?>> i = map.entrySet().iterator(); while (i.hasNext()) { Entry<String, ?> entry = i.next(); String key = entry.getKey(); Object value = entry.getValue(); if (value instanceof Msg) { results.put(key, ((Msg) value).clone()); } else if (value instanceof Arg) { results.put(key, ((Arg) value).clone()); } else if (value instanceof Var) { results.put(key, ((Var) value).clone()); } else { results.put(key, value); } } results.setFast(true); return results; }
Example 3
Source File: AmbariConfigurationMonitor.java From knox with Apache License 2.0 | 6 votes |
/** * Add cluster configuration data to the monitor, which it will use when determining if configuration has changed. * * @param cluster An AmbariCluster object. * @param discoveryConfig The discovery configuration associated with the cluster. */ void addClusterConfigVersions(AmbariCluster cluster, ServiceDiscoveryConfig discoveryConfig) { String clusterName = cluster.getName(); // Register the cluster discovery configuration for the monitor connections persistDiscoveryConfiguration(clusterName, discoveryConfig); addDiscoveryConfig(clusterName, discoveryConfig); // Build the set of configuration versions Map<String, String> configVersions = new HashMap<>(); Map<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfigs = cluster.getServiceConfigurations(); for (Entry<String, Map<String, AmbariCluster.ServiceConfiguration>> serviceConfig : serviceConfigs.entrySet()) { Map<String, AmbariCluster.ServiceConfiguration> configTypeVersionMap = serviceConfig.getValue(); for (AmbariCluster.ServiceConfiguration config : configTypeVersionMap.values()) { String configType = config.getType(); String version = config.getVersion(); configVersions.put(configType, version); } } persistClusterVersionData(discoveryConfig.getAddress(), clusterName, configVersions); addClusterConfigVersions(discoveryConfig.getAddress(), clusterName, configVersions); }
Example 4
Source File: SaltApiRunPostResponse.java From cloudbreak with Apache License 2.0 | 6 votes |
protected Object grainsResponse(String body) throws IOException { ApplyResponse applyResponse = new ApplyResponse(); List<Map<String, JsonNode>> responseList = new ArrayList<>(); Map<String, JsonNode> hostMap = new HashMap<>(); for (Entry<String, CloudVmMetaDataStatus> stringCloudVmMetaDataStatusEntry : instanceMap.entrySet()) { CloudVmMetaDataStatus cloudVmMetaDataStatus = stringCloudVmMetaDataStatusEntry.getValue(); if (InstanceStatus.STARTED == cloudVmMetaDataStatus.getCloudVmInstanceStatus().getStatus()) { String privateIp = cloudVmMetaDataStatus.getMetaData().getPrivateIp(); String hostname = "host-" + privateIp.replace(".", "-") + ".example.com"; if (grains.containsKey(hostname)) { Matcher argMatcher = Pattern.compile(".*(arg=([^&]+)).*").matcher(body); if (argMatcher.matches()) { hostMap.put(hostname, objectMapper.valueToTree(grains.get(hostname).get(argMatcher.group(2)))); } } } } responseList.add(hostMap); applyResponse.setResult(responseList); return objectMapper.writeValueAsString(applyResponse); }
Example 5
Source File: RebuildBreakdownServiceImpl.java From sakai with Educational Community License v2.0 | 6 votes |
/** * rebuildToolSessions() expects to find a Map of Maps. The outer map contains * the ToolSession ID's, and for each ToolSessionId, the inner map contains the * attributes of that tool session * @param mySession a Session that can be resolved to a MySession, giving access to the ToolSession property * @param toolSessionMap a Serialized map of maps. The outer map containing the ToolSession ID, and the inner * map containing the details of the ToolSession */ private void rebuildToolSessions(MySession mySession, Map<String, Serializable> toolSessionMap) { for (Entry<String, Serializable> entry : toolSessionMap.entrySet()) { String toolSessionKey = entry.getKey(); // if a tool session doesn't exist for this key, a new one will be created automatically MyLittleSession toolSession = (MyLittleSession) mySession.getToolSession(toolSessionKey); Serializable serializable = entry.getValue(); if (!(serializable instanceof Map)) { log.warn("inner object for toolSession [" + toolSessionKey + "] should be [Map], found [" + serializable.getClass().getName() + "]"); continue; } @SuppressWarnings("unchecked") Map<String, Serializable> toolAttributes = (Map<String, Serializable>) serializable; processMLSessionMap(toolSession, toolAttributes); } }
Example 6
Source File: NetworkConfig.java From fabric-sdk-java with Apache License 2.0 | 5 votes |
private static Properties extractProperties(JsonObject json, String fieldName) { Properties props = new Properties(); // Extract any other grpc options JsonObject options = getJsonObject(json, fieldName); if (options != null) { for (Entry<String, JsonValue> entry : options.entrySet()) { String key = entry.getKey(); JsonValue value = entry.getValue(); props.setProperty(key, getJsonValue(value)); } } return props; }
Example 7
Source File: BasicObjectSerializer.java From beakerx with Apache License 2.0 | 5 votes |
protected boolean isPrimitiveTypeMap(Object o) { if (!(o instanceof Map<?, ?>)) return false; Map<?, ?> m = (Map<?, ?>) o; Set<?> eset = m.entrySet(); for (Object entry : eset) { Entry<?, ?> e = (Entry<?, ?>) entry; if (e.getValue() != null && !isPrimitiveType(e.getValue().getClass().getName())) return false; } return true; }
Example 8
Source File: EQtlPermutationTranscriptionFactorAnalysisV2.java From systemsgenetics with GNU General Public License v3.0 | 5 votes |
public int getTotalHits(HashMap<String, Integer> countsMap){ int total = 0; Iterator<Entry<String, Integer>> hitsIterator = countsMap.entrySet().iterator(); while(hitsIterator.hasNext()){ Entry<String, Integer> next = hitsIterator.next(); total += next.getValue(); } return total; }
Example 9
Source File: AbstractWALRoller.java From hbase with Apache License 2.0 | 5 votes |
/** * we need to check low replication in period, see HBASE-18132 */ private void checkLowReplication(long now) { try { for (Entry<WAL, Boolean> entry : walNeedsRoll.entrySet()) { WAL wal = entry.getKey(); boolean needRollAlready = entry.getValue(); if (needRollAlready || !(wal instanceof AbstractFSWAL)) { continue; } ((AbstractFSWAL<?>) wal).checkLogLowReplication(checkLowReplicationInterval); } } catch (Throwable e) { LOG.warn("Failed checking low replication", e); } }
Example 10
Source File: DirectoryWatcher.java From git-as-svn with GNU General Public License v2.0 | 5 votes |
private boolean handleFailToReset(@NotNull WatchKey key) { Path path = (Path) key.watchable(); key.cancel(); for (Iterator<Entry<WatchKey, Path>> it = map.entrySet().iterator(); it.hasNext(); ) { Entry<WatchKey, Path> e = it.next(); WatchKey eKey = e.getKey(); Path ePath = e.getValue(); if (ePath.getParent().equals(path)) { eKey.cancel(); it.remove(); } } return path.equals(basePath); }
Example 11
Source File: RootNode.java From birt with Eclipse Public License 1.0 | 5 votes |
private IDBNode[] fetchChildrenFromMap( Map<String, TablesAndProcedures> schemas, int maxSchemaCount ) { List<SchemaNode> result = new ArrayList<SchemaNode>( ); int count = 0; for ( Entry<String, TablesAndProcedures> entry : schemas.entrySet( ) ) { if ( count >= maxSchemaCount ) { break; } SchemaNode schema = new SchemaNode( entry.getKey( ) ); TablesAndProcedures tap = entry.getValue( ); List<IDBNode> children = new ArrayList<IDBNode>( ); children.addAll( Arrays.asList( tap.getTables( ) ) ); if ( tap.getProcedureCount( ) > 0 ) { ProcedureFlagNode procedureFlagNode = new ProcedureFlagNode( entry.getKey( ) ); procedureFlagNode.setChildren( tap.getProcedures( ) ); children.add( procedureFlagNode ); } schema.setChildren( children.toArray( new IDBNode[0] ) ); result.add( schema ); ++count; } if ( result.size( ) == 1 && result.get( 0 ).getSchemaName( ) == null ) { //not support schema; return result.get( 0 ).getChildren( ); } else { Collections.sort( result ); return result.toArray( new IDBNode[0] ); } }
Example 12
Source File: VotedPerceptron.java From SEAL with Apache License 2.0 | 5 votes |
public void addExample(SID instanceID, SparseVector featureVector, double label) { for (Entry<SID, Cell> entry : featureVector) { SID feautreID = entry.getKey(); double value = entry.getValue().value; addExample(instanceID, feautreID, value, label); } }
Example 13
Source File: StudentCurricularPlanPropaeudeuticsEnrolmentManager.java From fenixedu-academic with GNU Lesser General Public License v3.0 | 5 votes |
@Override protected void performEnrolments(Map<EnrolmentResultType, List<IDegreeModuleToEvaluate>> degreeModulesToEnrolMap) { for (final Entry<EnrolmentResultType, List<IDegreeModuleToEvaluate>> entry : degreeModulesToEnrolMap.entrySet()) { for (final IDegreeModuleToEvaluate degreeModuleToEvaluate : entry.getValue()) { if (degreeModuleToEvaluate.isEnroling() && degreeModuleToEvaluate.getDegreeModule().isCurricularCourse()) { final CurricularCourse curricularCourse = (CurricularCourse) degreeModuleToEvaluate.getDegreeModule(); checkIDegreeModuleToEvaluate(curricularCourse); new Enrolment(getStudentCurricularPlan(), degreeModuleToEvaluate.getCurriculumGroup(), curricularCourse, getExecutionSemester(), EnrollmentCondition.VALIDATED, getResponsiblePerson().getUsername()); } } } getRegistration().updateEnrolmentDate(getExecutionYear()); }
Example 14
Source File: Form.java From olat with Apache License 2.0 | 5 votes |
/** * Internal helper to clear the temporary request parameter and file maps. Will delete all uploaded files if they have not been removed by the responsible FormItem. */ private void doClearRequestParameterAndMultipartData() { for (Entry<String, File> entry : requestMultipartFiles.entrySet()) { File tmpFile = entry.getValue(); if (tmpFile.exists()) tmpFile.delete(); } requestMultipartFiles.clear(); requestMultipartFileNames.clear(); requestMultipartFileMimeTypes.clear(); requestParams.clear(); requestError = REQUEST_ERROR_NO_ERROR; }
Example 15
Source File: CachedRecipeData.java From GregTech with GNU Lesser General Public License v3.0 | 5 votes |
private boolean consumeRecipeItems(boolean simulate) { for (Entry<ItemStackKey, Integer> entry : requiredItems.entrySet()) { ItemStackKey itemStackKey = entry.getKey(); if (itemSourceList.extractItem(itemStackKey, entry.getValue(), simulate) != entry.getValue()) { return false; } } return true; }
Example 16
Source File: VetoableChangeSupport.java From openjdk-8 with GNU General Public License v2.0 | 5 votes |
/** * @serialData Null terminated list of <code>VetoableChangeListeners</code>. * <p> * At serialization time we skip non-serializable listeners and * only serialize the serializable listeners. */ private void writeObject(ObjectOutputStream s) throws IOException { Hashtable<String, VetoableChangeSupport> children = null; VetoableChangeListener[] listeners = null; synchronized (this.map) { for (Entry<String, VetoableChangeListener[]> entry : this.map.getEntries()) { String property = entry.getKey(); if (property == null) { listeners = entry.getValue(); } else { if (children == null) { children = new Hashtable<>(); } VetoableChangeSupport vcs = new VetoableChangeSupport(this.source); vcs.map.set(null, entry.getValue()); children.put(property, vcs); } } } ObjectOutputStream.PutField fields = s.putFields(); fields.put("children", children); fields.put("source", this.source); fields.put("vetoableChangeSupportSerializedDataVersion", 2); s.writeFields(); if (listeners != null) { for (VetoableChangeListener l : listeners) { if (l instanceof Serializable) { s.writeObject(l); } } } s.writeObject(null); }
Example 17
Source File: TezOperDependencyParallelismEstimator.java From spork with Apache License 2.0 | 4 votes |
@Override public int estimateParallelism(TezOperPlan plan, TezOperator tezOper, Configuration conf) throws IOException { if (tezOper.isVertexGroup()) { return -1; } boolean intermediateReducer = TezCompilerUtil.isIntermediateReducer(tezOper); // TODO: If map opts and reduce opts are same estimate higher parallelism // for tasks based on the count of number of map tasks else be conservative as now maxTaskCount = conf.getInt(PigReducerEstimator.MAX_REDUCER_COUNT_PARAM, PigReducerEstimator.DEFAULT_MAX_REDUCER_COUNT_PARAM); // If parallelism is set explicitly, respect it if (!intermediateReducer && tezOper.getRequestedParallelism()!=-1) { return tezOper.getRequestedParallelism(); } // If we have already estimated parallelism, use that one if (tezOper.getEstimatedParallelism()!=-1) { return tezOper.getEstimatedParallelism(); } List<TezOperator> preds = plan.getPredecessors(tezOper); if (preds==null) { throw new IOException("Cannot estimate parallelism for source vertex"); } double estimatedParallelism = 0; for (Entry<OperatorKey, TezEdgeDescriptor> entry : tezOper.inEdges.entrySet()) { TezOperator pred = getPredecessorWithKey(plan, tezOper, entry.getKey().toString()); // Don't include broadcast edge, broadcast edge is used for // replicated join (covered in TezParallelismFactorVisitor.visitFRJoin) // and sample/scalar (does not impact parallelism) if (entry.getValue().dataMovementType==DataMovementType.SCATTER_GATHER || entry.getValue().dataMovementType==DataMovementType.ONE_TO_ONE) { double predParallelism = pred.getEffectiveParallelism(); if (predParallelism==-1) { throw new IOException("Cannot estimate parallelism for " + tezOper.getOperatorKey().toString() + ", effective parallelism for predecessor " + tezOper.getOperatorKey().toString() + " is -1"); } //For cases like Union we can just limit to sum of pred vertices parallelism boolean applyFactor = !tezOper.isUnion(); if (pred.plan!=null && applyFactor) { // pred.plan can be null if it is a VertexGroup TezParallelismFactorVisitor parallelismFactorVisitor = new TezParallelismFactorVisitor(pred.plan, tezOper.getOperatorKey().toString()); parallelismFactorVisitor.visit(); predParallelism = predParallelism * parallelismFactorVisitor.getFactor(); } estimatedParallelism += predParallelism; } } int roundedEstimatedParallelism = (int)Math.ceil(estimatedParallelism); if (intermediateReducer && tezOper.isOverrideIntermediateParallelism()) { // Estimated reducers should not be more than the configured limit roundedEstimatedParallelism = Math.min(roundedEstimatedParallelism, maxTaskCount); int userSpecifiedParallelism = pc.defaultParallel; if (tezOper.getRequestedParallelism() != -1) { userSpecifiedParallelism = tezOper.getRequestedParallelism(); } int intermediateParallelism = Math.max(userSpecifiedParallelism, roundedEstimatedParallelism); if (userSpecifiedParallelism != -1 && (intermediateParallelism > 200 && intermediateParallelism > (2 * userSpecifiedParallelism))) { // Estimated reducers shall not be more than 2x of requested parallelism // if greater than 200 and we are overriding user specified values intermediateParallelism = 2 * userSpecifiedParallelism; } roundedEstimatedParallelism = intermediateParallelism; } else { roundedEstimatedParallelism = Math.min(roundedEstimatedParallelism, maxTaskCount); } return roundedEstimatedParallelism; }
Example 18
Source File: ShortCircuitCache.java From hadoop with Apache License 2.0 | 4 votes |
/** * Run the CacheCleaner thread. * * Whenever a thread requests a ShortCircuitReplica object, we will make * sure it gets one. That ShortCircuitReplica object can then be re-used * when another thread requests a ShortCircuitReplica object for the same * block. So in that sense, there is no maximum size to the cache. * * However, when a ShortCircuitReplica object is unreferenced by the * thread(s) that are using it, it becomes evictable. There are two * separate eviction lists-- one for mmaped objects, and another for * non-mmaped objects. We do this in order to avoid having the regular * files kick the mmaped files out of the cache too quickly. Reusing * an already-existing mmap gives a huge performance boost, since the * page table entries don't have to be re-populated. Both the mmap * and non-mmap evictable lists have maximum sizes and maximum lifespans. */ @Override public void run() { ShortCircuitCache.this.lock.lock(); try { if (ShortCircuitCache.this.closed) return; long curMs = Time.monotonicNow(); if (LOG.isDebugEnabled()) { LOG.debug(this + ": cache cleaner running at " + curMs); } int numDemoted = demoteOldEvictableMmaped(curMs); int numPurged = 0; Long evictionTimeNs = Long.valueOf(0); while (true) { Entry<Long, ShortCircuitReplica> entry = evictable.ceilingEntry(evictionTimeNs); if (entry == null) break; evictionTimeNs = entry.getKey(); long evictionTimeMs = TimeUnit.MILLISECONDS.convert(evictionTimeNs, TimeUnit.NANOSECONDS); if (evictionTimeMs + maxNonMmappedEvictableLifespanMs >= curMs) break; ShortCircuitReplica replica = entry.getValue(); if (LOG.isTraceEnabled()) { LOG.trace("CacheCleaner: purging " + replica + ": " + StringUtils.getStackTrace(Thread.currentThread())); } purge(replica); numPurged++; } if (LOG.isDebugEnabled()) { LOG.debug(this + ": finishing cache cleaner run started at " + curMs + ". Demoted " + numDemoted + " mmapped replicas; " + "purged " + numPurged + " replicas."); } } finally { ShortCircuitCache.this.lock.unlock(); } }
Example 19
Source File: ShardShardRecordProcessorCheckpointerTest.java From amazon-kinesis-client with Apache License 2.0 | 4 votes |
/** * A utility function to simplify various sequences of intermixed updates to the checkpointer, and calls to * checpoint() and checkpoint(sequenceNumber). Takes a map where the key is a new sequence number to set in the * checkpointer and the value is a CheckpointAction indicating an action to take: NONE -> Set the sequence number, * don't do anything else NO_SEQUENCE_NUMBER -> Set the sequence number and call checkpoint() WITH_SEQUENCE_NUMBER * -> Set the sequence number and call checkpoint(sequenceNumber) with that sequence number * * @param processingCheckpointer * @param checkpointValueAndAction * A map describing which checkpoint value to set in the checkpointer, and what action to take * @throws Exception */ private void testMixedCheckpointCalls(ShardRecordProcessorCheckpointer processingCheckpointer, LinkedHashMap<String, CheckpointAction> checkpointValueAndAction, CheckpointerType checkpointerType) throws Exception { for (Entry<String, CheckpointAction> entry : checkpointValueAndAction.entrySet()) { PreparedCheckpointer preparedCheckpoint = null; ExtendedSequenceNumber lastCheckpointValue = processingCheckpointer.lastCheckpointValue(); if (SentinelCheckpoint.SHARD_END.toString().equals(entry.getKey())) { // Before shard end, we will pretend to do what we expect the shutdown task to do processingCheckpointer.sequenceNumberAtShardEnd(processingCheckpointer .largestPermittedCheckpointValue()); } // Advance the largest checkpoint and check that it is updated. processingCheckpointer.largestPermittedCheckpointValue(new ExtendedSequenceNumber(entry.getKey())); assertThat("Expected the largest checkpoint value to be updated after setting it", processingCheckpointer.largestPermittedCheckpointValue(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); switch (entry.getValue()) { case NONE: // We were told to not checkpoint, so lets just make sure the last checkpoint value is the same as // when this block started then continue to the next instruction assertThat("Expected the last checkpoint value to stay the same if we didn't checkpoint", processingCheckpointer.lastCheckpointValue(), equalTo(lastCheckpointValue)); continue; case NO_SEQUENCE_NUMBER: switch (checkpointerType) { case CHECKPOINTER: processingCheckpointer.checkpoint(); break; case PREPARED_CHECKPOINTER: preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); preparedCheckpoint.checkpoint(); case PREPARE_THEN_CHECKPOINTER: preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); processingCheckpointer.checkpoint( preparedCheckpoint.pendingCheckpoint().sequenceNumber(), preparedCheckpoint.pendingCheckpoint().subSequenceNumber()); } break; case WITH_SEQUENCE_NUMBER: switch (checkpointerType) { case CHECKPOINTER: processingCheckpointer.checkpoint(entry.getKey()); break; case PREPARED_CHECKPOINTER: preparedCheckpoint = processingCheckpointer.prepareCheckpoint(entry.getKey()); preparedCheckpoint.checkpoint(); case PREPARE_THEN_CHECKPOINTER: preparedCheckpoint = processingCheckpointer.prepareCheckpoint(entry.getKey()); processingCheckpointer.checkpoint( preparedCheckpoint.pendingCheckpoint().sequenceNumber(), preparedCheckpoint.pendingCheckpoint().subSequenceNumber()); } break; } // We must have checkpointed to get here, so let's make sure our last checkpoint value is up to date assertThat("Expected the last checkpoint value to change after checkpointing", processingCheckpointer.lastCheckpointValue(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); assertThat("Expected the largest checkpoint value to remain the same since the last set", processingCheckpointer.largestPermittedCheckpointValue(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); assertThat(checkpoint.getCheckpoint(shardId), equalTo(new ExtendedSequenceNumber(entry.getKey()))); assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); } }
Example 20
Source File: AbstractMap.java From j2objc with Apache License 2.0 | 2 votes |
/** * Creates an entry representing the same mapping as the * specified entry. * * @param entry the entry to copy */ public SimpleEntry(Entry<? extends K, ? extends V> entry) { this.key = entry.getKey(); this.value = entry.getValue(); }