Java Code Examples for org.apache.helix.HelixDataAccessor#getChildValuesMap()
The following examples show how to use
org.apache.helix.HelixDataAccessor#getChildValuesMap() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JobQueuesResource.java From helix with Apache License 2.0 | 5 votes |
StringRepresentation getHostedEntitiesRepresentation(String clusterName) throws JsonGenerationException, JsonMappingException, IOException { // Get all resources ZkClient zkClient = ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT); HelixDataAccessor accessor = ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); Map<String, HelixProperty> resourceConfigMap = accessor.getChildValuesMap(keyBuilder.resourceConfigs()); // Create the result ZNRecord hostedEntitiesRecord = new ZNRecord("JobQueues"); // Filter out non-workflow resources Iterator<Map.Entry<String, HelixProperty>> it = resourceConfigMap.entrySet().iterator(); while (it.hasNext()) { Map.Entry<String, HelixProperty> e = it.next(); HelixProperty resource = e.getValue(); Map<String, String> simpleFields = resource.getRecord().getSimpleFields(); boolean isTerminable = resource.getRecord() .getBooleanField(WorkflowConfig.WorkflowConfigProperty.Terminable.name(), true); if (!simpleFields.containsKey(WorkflowConfig.WorkflowConfigProperty.TargetState.name()) || !simpleFields.containsKey(WorkflowConfig.WorkflowConfigProperty.Dag.name()) || isTerminable) { it.remove(); } } // Populate the result List<String> allResources = Lists.newArrayList(resourceConfigMap.keySet()); hostedEntitiesRecord.setListField("JobQueues", allResources); StringRepresentation representation = new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(hostedEntitiesRecord), MediaType.APPLICATION_JSON); return representation; }
Example 2
Source File: WorkflowsResource.java From helix with Apache License 2.0 | 5 votes |
StringRepresentation getHostedEntitiesRepresentation(String clusterName) throws JsonGenerationException, JsonMappingException, IOException { // Get all resources ZkClient zkClient = (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT); HelixDataAccessor accessor = ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); Map<String, HelixProperty> resourceConfigMap = accessor.getChildValuesMap(keyBuilder.resourceConfigs()); // Create the result ZNRecord hostedEntitiesRecord = new ZNRecord("Workflows"); // Filter out non-workflow resources Iterator<Map.Entry<String, HelixProperty>> it = resourceConfigMap.entrySet().iterator(); while (it.hasNext()) { Map.Entry<String, HelixProperty> e = it.next(); HelixProperty resource = e.getValue(); Map<String, String> simpleFields = resource.getRecord().getSimpleFields(); if (!simpleFields.containsKey(WorkflowConfig.WorkflowConfigProperty.TargetState.name()) || !simpleFields.containsKey(WorkflowConfig.WorkflowConfigProperty.Dag.name())) { it.remove(); } } // Populate the result List<String> allResources = Lists.newArrayList(resourceConfigMap.keySet()); hostedEntitiesRecord.setListField("WorkflowList", allResources); StringRepresentation representation = new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(hostedEntitiesRecord), MediaType.APPLICATION_JSON); return representation; }
Example 3
Source File: ClusterStateVerifier.java From helix with Apache License 2.0 | 4 votes |
private boolean verifyMasterNbInExtView(HelixDataAccessor accessor) { Builder keyBuilder = accessor.keyBuilder(); Map<String, IdealState> idealStates = accessor.getChildValuesMap(keyBuilder.idealStates(), true); if (idealStates == null || idealStates.size() == 0) { LOG.info("No resource idealState"); return true; } Map<String, ExternalView> extViews = accessor.getChildValuesMap(keyBuilder.externalViews(), true); if (extViews == null || extViews.size() < idealStates.size()) { LOG.info("No externalViews | externalView.size() < idealState.size()"); return false; } for (String resource : extViews.keySet()) { int partitions = idealStates.get(resource).getNumPartitions(); Map<String, Map<String, String>> instanceStateMap = extViews.get(resource).getRecord().getMapFields(); if (instanceStateMap.size() < partitions) { LOG.info("Number of externalViews (" + instanceStateMap.size() + ") < partitions (" + partitions + ")"); return false; } for (String partition : instanceStateMap.keySet()) { boolean foundMaster = false; for (String instance : instanceStateMap.get(partition).keySet()) { if (instanceStateMap.get(partition).get(instance).equalsIgnoreCase("MASTER")) { foundMaster = true; break; } } if (!foundMaster) { LOG.info("No MASTER for partition: " + partition); return false; } } } return true; }
Example 4
Source File: PropertyCache.java From helix with Apache License 2.0 | 4 votes |
private void doSimpleCacheRefresh(final HelixDataAccessor accessor) { _objCache = accessor.getChildValuesMap(_keyFuncs.getRootKey(accessor), true); _objMap = new HashMap<>(_objCache); }
Example 5
Source File: WorkflowDispatcher.java From helix with Apache License 2.0 | 4 votes |
/** * Create a new workflow based on an existing one * @param manager connection to Helix * @param origWorkflowName the name of the existing workflow * @param newWorkflowName the name of the new workflow * @param newStartTime a provided start time that deviates from the desired start time * @return the cloned workflow, or null if there was a problem cloning the existing one */ public static Workflow cloneWorkflow(HelixManager manager, String origWorkflowName, String newWorkflowName, Date newStartTime) { // Read all resources, including the workflow and jobs of interest HelixDataAccessor accessor = manager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); Map<String, HelixProperty> resourceConfigMap = accessor.getChildValuesMap(keyBuilder.resourceConfigs(), true); if (!resourceConfigMap.containsKey(origWorkflowName)) { LOG.error("No such workflow named " + origWorkflowName); return null; } if (resourceConfigMap.containsKey(newWorkflowName)) { LOG.error("Workflow with name " + newWorkflowName + " already exists!"); return null; } // Create a new workflow with a new name Map<String, String> workflowConfigsMap = resourceConfigMap.get(origWorkflowName).getRecord().getSimpleFields(); WorkflowConfig.Builder workflowConfigBlder = WorkflowConfig.Builder.fromMap(workflowConfigsMap); // Set the schedule, if applicable if (newStartTime != null) { ScheduleConfig scheduleConfig = ScheduleConfig.oneTimeDelayedStart(newStartTime); workflowConfigBlder.setScheduleConfig(scheduleConfig); } workflowConfigBlder.setTerminable(true); WorkflowConfig workflowConfig = workflowConfigBlder.build(); JobDag jobDag = workflowConfig.getJobDag(); Map<String, Set<String>> parentsToChildren = jobDag.getParentsToChildren(); Workflow.Builder workflowBuilder = new Workflow.Builder(newWorkflowName); workflowBuilder.setWorkflowConfig(workflowConfig); // Add each job back as long as the original exists Set<String> namespacedJobs = jobDag.getAllNodes(); for (String namespacedJob : namespacedJobs) { if (resourceConfigMap.containsKey(namespacedJob)) { // Copy over job-level and task-level configs String job = TaskUtil.getDenamespacedJobName(origWorkflowName, namespacedJob); HelixProperty jobConfig = resourceConfigMap.get(namespacedJob); Map<String, String> jobSimpleFields = jobConfig.getRecord().getSimpleFields(); JobConfig.Builder jobCfgBuilder = JobConfig.Builder.fromMap(jobSimpleFields); jobCfgBuilder.setWorkflow(newWorkflowName); // overwrite workflow name Map<String, Map<String, String>> rawTaskConfigMap = jobConfig.getRecord().getMapFields(); List<TaskConfig> taskConfigs = Lists.newLinkedList(); for (Map<String, String> rawTaskConfig : rawTaskConfigMap.values()) { TaskConfig taskConfig = TaskConfig.Builder.from(rawTaskConfig); taskConfigs.add(taskConfig); } jobCfgBuilder.addTaskConfigs(taskConfigs); workflowBuilder.addJob(job, jobCfgBuilder); // Add dag dependencies Set<String> children = parentsToChildren.get(namespacedJob); if (children != null) { for (String namespacedChild : children) { String child = TaskUtil.getDenamespacedJobName(origWorkflowName, namespacedChild); workflowBuilder.addParentChildDependency(job, child); } } } } return workflowBuilder.build(); }
Example 6
Source File: DeprecatedTaskRebalancer.java From helix with Apache License 2.0 | 4 votes |
/** * Create a new workflow based on an existing one * @param manager connection to Helix * @param origWorkflowName the name of the existing workflow * @param newWorkflowName the name of the new workflow * @param newStartTime a provided start time that deviates from the desired start time * @return the cloned workflow, or null if there was a problem cloning the existing one */ private Workflow cloneWorkflow(HelixManager manager, String origWorkflowName, String newWorkflowName, Date newStartTime) { // Read all resources, including the workflow and jobs of interest HelixDataAccessor accessor = manager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); Map<String, HelixProperty> resourceConfigMap = accessor.getChildValuesMap(keyBuilder.resourceConfigs()); if (!resourceConfigMap.containsKey(origWorkflowName)) { LOG.error("No such workflow named " + origWorkflowName); return null; } if (resourceConfigMap.containsKey(newWorkflowName)) { LOG.error("Workflow with name " + newWorkflowName + " already exists!"); return null; } // Create a new workflow with a new name HelixProperty workflowConfig = resourceConfigMap.get(origWorkflowName); Map<String, String> wfSimpleFields = workflowConfig.getRecord().getSimpleFields(); JobDag jobDag = JobDag.fromJson(wfSimpleFields.get(WorkflowConfig.WorkflowConfigProperty.Dag.name())); Map<String, Set<String>> parentsToChildren = jobDag.getParentsToChildren(); Workflow.Builder builder = new Workflow.Builder(newWorkflowName); // Set the workflow expiry builder.setExpiry( Long.parseLong(wfSimpleFields.get(WorkflowConfig.WorkflowConfigProperty.Expiry.name()))); // Set the schedule, if applicable ScheduleConfig scheduleConfig; if (newStartTime != null) { scheduleConfig = ScheduleConfig.oneTimeDelayedStart(newStartTime); } else { scheduleConfig = WorkflowConfig.parseScheduleFromConfigMap(wfSimpleFields); } if (scheduleConfig != null) { builder.setScheduleConfig(scheduleConfig); } // Add each job back as long as the original exists Set<String> namespacedJobs = jobDag.getAllNodes(); for (String namespacedJob : namespacedJobs) { if (resourceConfigMap.containsKey(namespacedJob)) { // Copy over job-level and task-level configs String job = TaskUtil.getDenamespacedJobName(origWorkflowName, namespacedJob); HelixProperty jobConfig = resourceConfigMap.get(namespacedJob); Map<String, String> jobSimpleFields = jobConfig.getRecord().getSimpleFields(); jobSimpleFields.put(JobConfig.JobConfigProperty.WorkflowID.name(), newWorkflowName); // overwrite workflow name for (Map.Entry<String, String> e : jobSimpleFields.entrySet()) { builder.addConfig(job, e.getKey(), e.getValue()); } Map<String, Map<String, String>> rawTaskConfigMap = jobConfig.getRecord().getMapFields(); List<TaskConfig> taskConfigs = Lists.newLinkedList(); for (Map<String, String> rawTaskConfig : rawTaskConfigMap.values()) { TaskConfig taskConfig = TaskConfig.Builder.from(rawTaskConfig); taskConfigs.add(taskConfig); } builder.addTaskConfigs(job, taskConfigs); // Add dag dependencies Set<String> children = parentsToChildren.get(namespacedJob); if (children != null) { for (String namespacedChild : children) { String child = TaskUtil.getDenamespacedJobName(origWorkflowName, namespacedChild); builder.addParentChildDependency(job, child); } } } } return builder.build(); }
Example 7
Source File: TestMessagePartitionStateMismatch.java From helix with Apache License 2.0 | 4 votes |
@Test public void testStateMismatch() throws InterruptedException { // String controllerName = CONTROLLER_PREFIX + "_0"; HelixManager manager = _controller; // _startCMResultMap.get(controllerName)._manager; HelixDataAccessor accessor = manager.getHelixDataAccessor(); Builder kb = accessor.keyBuilder(); ExternalView ev = accessor.getProperty(kb.externalView(TEST_DB)); Map<String, LiveInstance> liveinstanceMap = accessor.getChildValuesMap(accessor.keyBuilder().liveInstances(), true); for (String instanceName : liveinstanceMap.keySet()) { String sessionid = liveinstanceMap.get(instanceName).getEphemeralOwner(); for (String partition : ev.getPartitionSet()) { if (ev.getStateMap(partition).containsKey(instanceName)) { String uuid = UUID.randomUUID().toString(); Message message = new Message(MessageType.STATE_TRANSITION, uuid); boolean rand = new Random().nextInt(10) > 5; if (ev.getStateMap(partition).get(instanceName).equals("MASTER")) { message.setSrcName(manager.getInstanceName()); message.setTgtName(instanceName); message.setMsgState(MessageState.NEW); message.setPartitionName(partition); message.setResourceName(TEST_DB); message.setFromState(rand ? "SLAVE" : "OFFLINE"); message.setToState(rand ? "MASTER" : "SLAVE"); message.setTgtSessionId(sessionid); message.setSrcSessionId(manager.getSessionId()); message.setStateModelDef("MasterSlave"); message.setStateModelFactoryName("DEFAULT"); } else if (ev.getStateMap(partition).get(instanceName).equals("SLAVE")) { message.setSrcName(manager.getInstanceName()); message.setTgtName(instanceName); message.setMsgState(MessageState.NEW); message.setPartitionName(partition); message.setResourceName(TEST_DB); message.setFromState(rand ? "MASTER" : "OFFLINE"); message.setToState(rand ? "SLAVE" : "SLAVE"); message.setTgtSessionId(sessionid); message.setSrcSessionId(manager.getSessionId()); message.setStateModelDef("MasterSlave"); message.setStateModelFactoryName("DEFAULT"); } accessor.setProperty(accessor.keyBuilder().message(instanceName, message.getMsgId()), message); } } } Thread.sleep(3000); ExternalView ev2 = accessor.getProperty(kb.externalView(TEST_DB)); Assert.assertTrue(ev.equals(ev2)); }
Example 8
Source File: MockHelixTaskExecutor.java From helix with Apache License 2.0 | 4 votes |
void checkDuplicatedMessages(List<Message> messages) { HelixDataAccessor accessor = manager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); PropertyKey path = keyBuilder.currentStates(manager.getInstanceName(), manager.getSessionId()); Map<String, CurrentState> currentStateMap = accessor.getChildValuesMap(path, true); Set<String> seenPartitions = new HashSet<>(); for (Message message : messages) { if (message.getMsgType().equals(Message.MessageType.STATE_TRANSITION.name())) { String resource = message.getResourceName(); String partition = message.getPartitionName(); //System.err.println(message.getMsgId()); String key = resource + "-" + partition; if (seenPartitions.contains(key)) { //System.err.println("Duplicated message received for " + resource + ":" + partition); duplicatedMessages++; } seenPartitions.add(key); String toState = message.getToState(); String state = null; if (currentStateMap.containsKey(resource)) { CurrentState currentState = currentStateMap.get(resource); state = currentState.getState(partition); } if (toState.equals(state) && message.getMsgState() == Message.MessageState.NEW) { // logger.error( // "Extra message: " + message.getMsgId() + ", Partition is already in target state " // + toState + " for " + resource + ":" + partition); extraStateTransition++; } String messageTarget = getMessageTarget(message.getResourceName(), message.getPartitionName()); if (message.getMsgState() == Message.MessageState.NEW && _messageTaskMap.containsKey(messageTarget)) { String taskId = _messageTaskMap.get(messageTarget); MessageTaskInfo messageTaskInfo = _taskMap.get(taskId); Message existingMsg = messageTaskInfo.getTask().getMessage(); if (existingMsg.getMsgId() != message.getMsgId()) // logger.error("Duplicated message In Progress: " + message.getMsgId() // + ", state transition in progress with message " + existingMsg.getMsgId() // + " to " + toState + " for " + resource + ":" + partition); duplicatedMessagesInProgress ++; } } } }
Example 9
Source File: TestAssignableInstanceManagerControllerSwitch.java From helix with Apache License 2.0 | 4 votes |
/** * Tests the duality of two AssignableInstanceManager instances to model the * situation where there is a Controller switch and AssignableInstanceManager is * built back from scratch. * @throws InterruptedException */ @Test public void testControllerSwitch() throws InterruptedException { setupAndRunJobs(); Map<String, LiveInstance> liveInstanceMap = new HashMap<>(); Map<String, InstanceConfig> instanceConfigMap = new HashMap<>(); RoutingTableProvider routingTableProvider = new RoutingTableProvider(_manager); Collection<LiveInstance> liveInstances = routingTableProvider.getLiveInstances(); for (LiveInstance liveInstance : liveInstances) { String instanceName = liveInstance.getInstanceName(); liveInstanceMap.put(instanceName, liveInstance); instanceConfigMap.put(instanceName, _gSetupTool.getClusterManagementTool().getInstanceConfig(CLUSTER_NAME, instanceName)); } // Get ClusterConfig ClusterConfig clusterConfig = _manager.getConfigAccessor().getClusterConfig(CLUSTER_NAME); // Initialize TaskDataCache HelixDataAccessor accessor = _manager.getHelixDataAccessor(); TaskDataCache taskDataCache = new TaskDataCache(CLUSTER_NAME); Map<String, ResourceConfig> resourceConfigMap = accessor.getChildValuesMap(accessor.keyBuilder().resourceConfigs(), true); // Wait for the job pipeline Thread.sleep(1000); taskDataCache.refresh(accessor, resourceConfigMap); // Create prev manager and build AssignableInstanceManager prevAssignableInstanceManager = new AssignableInstanceManager(); prevAssignableInstanceManager.buildAssignableInstances(clusterConfig, taskDataCache, liveInstanceMap, instanceConfigMap); Map<String, AssignableInstance> prevAssignableInstanceMap = new HashMap<>(prevAssignableInstanceManager.getAssignableInstanceMap()); Map<String, TaskAssignResult> prevTaskAssignResultMap = new HashMap<>(prevAssignableInstanceManager.getTaskAssignResultMap()); // Generate a new AssignableInstanceManager taskDataCache.refresh(accessor, resourceConfigMap); AssignableInstanceManager newAssignableInstanceManager = new AssignableInstanceManager(); newAssignableInstanceManager.buildAssignableInstances(clusterConfig, taskDataCache, liveInstanceMap, instanceConfigMap); Map<String, AssignableInstance> newAssignableInstanceMap = new HashMap<>(newAssignableInstanceManager.getAssignableInstanceMap()); Map<String, TaskAssignResult> newTaskAssignResultMap = new HashMap<>(newAssignableInstanceManager.getTaskAssignResultMap()); // Compare prev and new - they should match up exactly Assert.assertEquals(prevAssignableInstanceMap.size(), newAssignableInstanceMap.size()); Assert.assertEquals(prevTaskAssignResultMap.size(), newTaskAssignResultMap.size()); for (Map.Entry<String, AssignableInstance> assignableInstanceEntry : newAssignableInstanceMap .entrySet()) { String instance = assignableInstanceEntry.getKey(); Assert.assertEquals(prevAssignableInstanceMap.get(instance).getCurrentAssignments(), assignableInstanceEntry.getValue().getCurrentAssignments()); Assert.assertEquals(prevAssignableInstanceMap.get(instance).getTotalCapacity(), assignableInstanceEntry.getValue().getTotalCapacity()); Assert.assertEquals(prevAssignableInstanceMap.get(instance).getUsedCapacity(), assignableInstanceEntry.getValue().getUsedCapacity()); } for (Map.Entry<String, TaskAssignResult> taskAssignResultEntry : newTaskAssignResultMap .entrySet()) { String taskID = taskAssignResultEntry.getKey(); Assert.assertEquals(prevTaskAssignResultMap.get(taskID).toString(), taskAssignResultEntry.getValue().toString()); } // Shut down RoutingTableProvider so periodic update gets shut down routingTableProvider.shutdown(); }
Example 10
Source File: InstancesResource.java From helix with Apache License 2.0 | 4 votes |
StringRepresentation getInstancesRepresentation(String clusterName) throws JsonGenerationException, JsonMappingException, IOException { ZkClient zkClient = (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT); HelixDataAccessor accessor = ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName); Map<String, LiveInstance> liveInstancesMap = accessor.getChildValuesMap(accessor.keyBuilder().liveInstances()); Map<String, InstanceConfig> instanceConfigsMap = accessor.getChildValuesMap(accessor.keyBuilder().instanceConfigs()); Map<String, List<String>> tagInstanceLists = new TreeMap<String, List<String>>(); for (String instanceName : instanceConfigsMap.keySet()) { boolean isAlive = liveInstancesMap.containsKey(instanceName); instanceConfigsMap.get(instanceName).getRecord().setSimpleField("Alive", isAlive + ""); InstanceConfig config = instanceConfigsMap.get(instanceName); for (String tag : config.getTags()) { if (!tagInstanceLists.containsKey(tag)) { tagInstanceLists.put(tag, new LinkedList<String>()); } if (!tagInstanceLists.get(tag).contains(instanceName)) { tagInstanceLists.get(tag).add(instanceName); } } } // Wrap raw data into an object, then serialize it List<ZNRecord> recordList = Lists.newArrayList(); for (InstanceConfig instanceConfig : instanceConfigsMap.values()) { recordList.add(instanceConfig.getRecord()); } ListInstancesWrapper wrapper = new ListInstancesWrapper(); wrapper.instanceInfo = recordList; wrapper.tagInfo = tagInstanceLists; StringRepresentation representation = new StringRepresentation(ClusterRepresentationUtil.ObjectToJson(wrapper), MediaType.APPLICATION_JSON); return representation; }