Java Code Examples for org.apache.helix.PropertyKey#Builder
The following examples show how to use
org.apache.helix.PropertyKey#Builder .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ZKHelixAdmin.java From helix with Apache License 2.0 | 6 votes |
@Override public void addCustomizedStateConfig(String clusterName, CustomizedStateConfig customizedStateConfig) { logger.info( "Add CustomizedStateConfig to cluster {}, CustomizedStateConfig is {}", clusterName, customizedStateConfig.toString()); if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) { throw new HelixException("cluster " + clusterName + " is not setup yet"); } CustomizedStateConfig.Builder builder = new CustomizedStateConfig.Builder(customizedStateConfig); CustomizedStateConfig customizedStateConfigFromBuilder = builder.build(); ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient)); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); accessor.setProperty(keyBuilder.customizedStateConfig(), customizedStateConfigFromBuilder); }
Example 2
Source File: ZKHelixAdmin.java From helix with Apache License 2.0 | 6 votes |
@Override public void addCloudConfig(String clusterName, CloudConfig cloudConfig) { logger.info("Add CloudConfig to cluster {}, CloudConfig is {}.", clusterName, cloudConfig.toString()); if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) { throw new HelixException("cluster " + clusterName + " is not setup yet"); } CloudConfig.Builder builder = new CloudConfig.Builder(cloudConfig); CloudConfig cloudConfigBuilder = builder.build(); ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient)); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); accessor.setProperty(keyBuilder.cloudConfig(), cloudConfigBuilder); }
Example 3
Source File: ErrorResource.java From helix with Apache License 2.0 | 6 votes |
StringRepresentation getInstanceErrorsRepresentation(String clusterName, String instanceName, String resourceGroup) throws JsonGenerationException, JsonMappingException, IOException { ZkClient zkClient = (ZkClient) getContext().getAttributes().get(RestAdminApplication.ZKCLIENT); String instanceSessionId = ClusterRepresentationUtil.getInstanceSessionId(zkClient, clusterName, instanceName); Builder keyBuilder = new PropertyKey.Builder(clusterName); String message = ClusterRepresentationUtil.getInstancePropertiesAsString(zkClient, clusterName, keyBuilder.stateTransitionErrors(instanceName, instanceSessionId, resourceGroup), // instanceSessionId // + "__" // + resourceGroup, MediaType.APPLICATION_JSON); StringRepresentation representation = new StringRepresentation(message, MediaType.APPLICATION_JSON); return representation; }
Example 4
Source File: TestNoThrottleDisabledPartitions.java From helix with Apache License 2.0 | 6 votes |
/** * Set throttle limits only for load balance so that none of them would happen. */ private void setThrottleConfigForLoadBalance() { PropertyKey.Builder keyBuilder = _accessor.keyBuilder(); ClusterConfig clusterConfig = _accessor.getProperty(_accessor.keyBuilder().clusterConfig()); clusterConfig.setResourcePriorityField("Name"); List<StateTransitionThrottleConfig> throttleConfigs = new ArrayList<>(); // Add throttling at cluster-level throttleConfigs.add( new StateTransitionThrottleConfig(StateTransitionThrottleConfig.RebalanceType.LOAD_BALANCE, StateTransitionThrottleConfig.ThrottleScope.CLUSTER, 0)); // Add throttling at instance level throttleConfigs.add( new StateTransitionThrottleConfig(StateTransitionThrottleConfig.RebalanceType.LOAD_BALANCE, StateTransitionThrottleConfig.ThrottleScope.INSTANCE, 0)); clusterConfig.setStateTransitionThrottleConfigs(throttleConfigs); _accessor.setProperty(keyBuilder.clusterConfig(), clusterConfig); }
Example 5
Source File: ZKHelixAdmin.java From helix with Apache License 2.0 | 6 votes |
@Override public void removeTypeFromCustomizedStateConfig(String clusterName, String type) { logger.info("Remove type {} to CustomizedStateConfig of cluster {}", type, clusterName); if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) { throw new HelixException("cluster " + clusterName + " is not setup yet"); } CustomizedStateConfig.Builder builder = new CustomizedStateConfig.Builder( _configAccessor.getCustomizedStateConfig(clusterName)); if (!builder.getAggregationEnabledTypes().contains(type)) { throw new HelixException("Type " + type + " is missing from the CustomizedStateConfig of cluster " + clusterName); } builder.removeAggregationEnabledType(type); CustomizedStateConfig customizedStateConfigFromBuilder = builder.build(); ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient)); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); accessor.setProperty(keyBuilder.customizedStateConfig(), customizedStateConfigFromBuilder); }
Example 6
Source File: ControllerLeaderLocatorTest.java From incubator-pinot with Apache License 2.0 | 6 votes |
@Test public void testNoControllerLeader() { HelixManager helixManager = mock(HelixManager.class); HelixDataAccessor helixDataAccessor = mock(HelixDataAccessor.class); // Mock that there is no helix leader. when(helixManager.getHelixDataAccessor()).thenReturn(helixDataAccessor); PropertyKey.Builder keyBuilder = mock(PropertyKey.Builder.class); when(helixDataAccessor.keyBuilder()).thenReturn(keyBuilder); PropertyKey controllerLeader = mock(PropertyKey.class); when(keyBuilder.controllerLeader()).thenReturn(controllerLeader); when(helixDataAccessor.getProperty(controllerLeader)).thenReturn(null); // Lead controller resource disabled. ConfigAccessor configAccessor = mock(ConfigAccessor.class); ResourceConfig resourceConfig = mock(ResourceConfig.class); when(helixManager.getConfigAccessor()).thenReturn(configAccessor); when(configAccessor.getResourceConfig(any(), any())).thenReturn(resourceConfig); when(resourceConfig.getSimpleConfig(anyString())).thenReturn("false"); // Create Controller Leader Locator FakeControllerLeaderLocator.create(helixManager); ControllerLeaderLocator controllerLeaderLocator = FakeControllerLeaderLocator.getInstance(); Assert.assertNull(controllerLeaderLocator.getControllerLeader(testTable)); }
Example 7
Source File: TestPreferenceListAsQueue.java From helix with Apache License 2.0 | 6 votes |
/** * Update an ideal state so that partitions will have an instance removed from their preference * lists * @param accessor * @param instanceName * @param resourceName * @param partitionName */ private void removeInstanceFromPreferences(HelixDataAccessor accessor, final String instanceName, final String resourceName, final String partitionName) { PropertyKey.Builder keyBuilder = accessor.keyBuilder(); String idealStatePath = keyBuilder.idealStates(resourceName).getPath(); synchronized (_prefListHistory) { // Updater for ideal state final List<String> prefList = Lists.newLinkedList(); DataUpdater<ZNRecord> idealStateUpdater = currentData -> { List<String> preferenceList = currentData.getListField(partitionName); int numReplicas = Integer.valueOf(currentData.getSimpleField(IdealStateProperty.REPLICAS.toString())); List<String> newPrefList = removeInstanceFromPreferenceList(preferenceList, instanceName, numReplicas); currentData.setListField(partitionName, newPrefList); prefList.clear(); prefList.addAll(newPrefList); return currentData; }; List<DataUpdater<ZNRecord>> updaters = Lists.newArrayList(); updaters.add(idealStateUpdater); accessor.updateChildren(Collections.singletonList(idealStatePath), updaters, AccessOption.PERSISTENT); _prefListHistory.add(prefList); } }
Example 8
Source File: ParticipantManager.java From helix with Apache License 2.0 | 6 votes |
public ParticipantManager(HelixManager manager, RealmAwareZkClient zkclient, int sessionTimeout, LiveInstanceInfoProvider liveInstanceInfoProvider, List<PreConnectCallback> preConnectCallbacks, final String sessionId, HelixManagerProperty helixManagerProperty) { _zkclient = zkclient; _manager = manager; _clusterName = manager.getClusterName(); _instanceName = manager.getInstanceName(); _keyBuilder = new PropertyKey.Builder(_clusterName); _sessionId = sessionId; _sessionTimeout = sessionTimeout; _configAccessor = manager.getConfigAccessor(); _instanceType = manager.getInstanceType(); _helixAdmin = manager.getClusterManagmentTool(); _dataAccessor = (ZKHelixDataAccessor) manager.getHelixDataAccessor(); _messagingService = (DefaultMessagingService) manager.getMessagingService(); _stateMachineEngine = manager.getStateMachineEngine(); _liveInstanceInfoProvider = liveInstanceInfoProvider; _preConnectCallbacks = preConnectCallbacks; _helixManagerProperty = helixManagerProperty; }
Example 9
Source File: ControllerManagerHelper.java From helix with Apache License 2.0 | 6 votes |
public void removeListenersFromController(GenericHelixController controller) { PropertyKey.Builder keyBuilder = new PropertyKey.Builder(_manager.getClusterName()); /** * reset generic-controller */ _manager.removeListener(keyBuilder.idealStates(), controller); _manager.removeListener(keyBuilder.liveInstances(), controller); _manager.removeListener(keyBuilder.customizedStateConfig(), controller); _manager.removeListener(keyBuilder.clusterConfig(), controller); _manager.removeListener(keyBuilder.resourceConfigs(), controller); _manager.removeListener(keyBuilder.instanceConfigs(), controller); _manager.removeListener(keyBuilder.controller(), controller); /** * reset controller message listener and unregister all message handlers */ _manager.removeListener(keyBuilder.controllerMessages(), _messagingService.getExecutor()); }
Example 10
Source File: ZKHelixAdmin.java From helix with Apache License 2.0 | 5 votes |
@Override public void addClusterToGrandCluster(String clusterName, String grandCluster) { logger.info("Add cluster {} to grand cluster {}.", clusterName, grandCluster); if (!ZKUtil.isClusterSetup(grandCluster, _zkClient)) { throw new HelixException("Grand cluster " + grandCluster + " is not setup yet"); } if (!ZKUtil.isClusterSetup(clusterName, _zkClient)) { throw new HelixException("Cluster " + clusterName + " is not setup yet"); } IdealState idealState = new IdealState(clusterName); idealState.setNumPartitions(1); idealState.setStateModelDefRef("LeaderStandby"); idealState.setRebalanceMode(RebalanceMode.FULL_AUTO); idealState.setRebalancerClassName(DelayedAutoRebalancer.class.getName()); idealState.setRebalanceStrategy(CrushEdRebalanceStrategy.class.getName()); // TODO: Give user an option, say from RestAPI to config the number of replicas. idealState.setReplicas(Integer.toString(DEFAULT_SUPERCLUSTER_REPLICA)); idealState.getRecord().setListField(clusterName, new ArrayList<String>()); List<String> controllers = getInstancesInCluster(grandCluster); if (controllers.size() == 0) { throw new HelixException("Grand cluster " + grandCluster + " has no instances"); } ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(grandCluster, new ZkBaseDataAccessor<ZNRecord>(_zkClient)); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); accessor.setProperty(keyBuilder.idealStates(idealState.getResourceName()), idealState); }
Example 11
Source File: ZKHelixAdmin.java From helix with Apache License 2.0 | 5 votes |
@Override public void removeCustomizedStateConfig(String clusterName) { logger.info( "Remove CustomizedStateConfig from cluster {}.", clusterName); ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient)); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); accessor.removeProperty(keyBuilder.customizedStateConfig()); }
Example 12
Source File: JobQueueResource.java From helix with Apache License 2.0 | 5 votes |
StringRepresentation getHostedEntitiesRepresentation(String clusterName, String jobQueueName) throws Exception { ZkClient zkClient = ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT); HelixDataAccessor accessor = ClusterRepresentationUtil.getClusterDataAccessor(zkClient, clusterName); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); TaskDriver taskDriver = new TaskDriver(zkClient, clusterName); // Get job queue config // TODO: fix this to use workflowConfig. ResourceConfig jobQueueConfig = accessor.getProperty(keyBuilder.resourceConfig(jobQueueName)); // Get job queue context WorkflowContext ctx = taskDriver.getWorkflowContext(jobQueueName); // Create the result ZNRecord hostedEntitiesRecord = new ZNRecord(jobQueueName); if (jobQueueConfig != null) { hostedEntitiesRecord.merge(jobQueueConfig.getRecord()); } if (ctx != null) { hostedEntitiesRecord.merge(ctx.getRecord()); } StringRepresentation representation = new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(hostedEntitiesRecord), MediaType.APPLICATION_JSON); return representation; }
Example 13
Source File: ZKHelixAdmin.java From helix with Apache License 2.0 | 5 votes |
@Override public void removeCloudConfig(String clusterName) { logger.info("Remove Cloud Config for cluster {}.", clusterName); HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient)); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); accessor.removeProperty(keyBuilder.cloudConfig()); }
Example 14
Source File: ZkTestBase.java From helix with Apache License 2.0 | 5 votes |
@Override public boolean verify() { BaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<ZNRecord>(_zkClient); HelixDataAccessor accessor = new ZKHelixDataAccessor(_clusterName, baseAccessor); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); ExternalView externalView = accessor.getProperty(keyBuilder.externalView(_resourceName)); // verify external view empty if (externalView != null) { for (String partition : externalView.getPartitionSet()) { Map<String, String> stateMap = externalView.getStateMap(partition); if (stateMap != null && !stateMap.isEmpty()) { LOG.error("External view not empty for " + partition); return false; } } } // verify current state empty List<String> liveParticipants = accessor.getChildNames(keyBuilder.liveInstances()); for (String participant : liveParticipants) { List<String> sessionIds = accessor.getChildNames(keyBuilder.sessions(participant)); for (String sessionId : sessionIds) { CurrentState currentState = accessor.getProperty(keyBuilder.currentState(participant, sessionId, _resourceName)); Map<String, String> partitionStateMap = currentState.getPartitionStateMap(); if (partitionStateMap != null && !partitionStateMap.isEmpty()) { LOG.error("Current state not empty for " + participant); return false; } } } return true; }
Example 15
Source File: TestHelixTaskExecutor.java From helix with Apache License 2.0 | 4 votes |
@Test() public void testDuplicatedMessage() throws InterruptedException { System.out.println("START TestHelixTaskExecutor.testDuplicatedMessage()"); HelixTaskExecutor executor = new HelixTaskExecutor(); HelixManager manager = new MockClusterManager(); HelixDataAccessor dataAccessor = manager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder(); TestStateTransitionHandlerFactory stateTransitionFactory = new TestStateTransitionHandlerFactory(Message.MessageType.STATE_TRANSITION.name(), 1000); executor.registerMessageHandlerFactory(Message.MessageType.STATE_TRANSITION.name(), stateTransitionFactory); NotificationContext changeContext = new NotificationContext(manager); List<Message> msgList = new ArrayList<Message>(); int nMsgs = 3; String instanceName = manager.getInstanceName(); for (int i = 0; i < nMsgs; i++) { Message msg = new Message(Message.MessageType.STATE_TRANSITION.name(), UUID.randomUUID().toString()); msg.setTgtSessionId(manager.getSessionId()); msg.setCreateTimeStamp((long) i); msg.setTgtName("Localhost_1123"); msg.setSrcName("127.101.1.23_2234"); msg.setPartitionName("Partition"); msg.setResourceName("Resource"); msg.setStateModelDef("DummyMasterSlave"); msg.setFromState("SLAVE"); msg.setToState("MASTER"); dataAccessor.setProperty(msg.getKey(keyBuilder, instanceName), msg); msgList.add(msg); } AssertJUnit .assertEquals(dataAccessor.getChildValues(keyBuilder.messages(instanceName), true).size(), nMsgs); changeContext.setChangeType(HelixConstants.ChangeType.MESSAGE); executor.onMessage(instanceName, msgList, changeContext); Thread.sleep(200); // only 1 message is left over - state transition takes 1sec Assert.assertEquals(dataAccessor.getChildValues(keyBuilder.messages(instanceName), true).size(), 1); // While a state transition message is going on, another state transition message for same // resource / partition comes in, it should be discarded by message handler // Mock accessor is modifying message state in memory so we set it back to NEW msgList.get(2).setMsgState(MessageState.NEW); dataAccessor.setProperty(msgList.get(2).getKey(keyBuilder, instanceName), msgList.get(2)); executor.onMessage(instanceName, Arrays.asList(msgList.get(2)), changeContext); Thread.sleep(200); Assert.assertEquals(dataAccessor.getChildValues(keyBuilder.messages(instanceName), true).size(), 1); Thread.sleep(1000); Assert.assertEquals(dataAccessor.getChildValues(keyBuilder.messages(instanceName), true).size(), 0); System.out.println("END TestHelixTaskExecutor.testDuplicatedMessage()"); }
Example 16
Source File: TestStateTransitionThrottle.java From helix with Apache License 2.0 | 4 votes |
@Test public void testTransitionThrottleOnErrorPartition() throws Exception { String clusterName = getShortClassName() + "testMaxErrorPartition"; MockParticipantManager[] participants = new MockParticipantManager[participantCount]; System.out.println("START " + clusterName + " at " + new Date(System.currentTimeMillis())); final ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_gZkClient)); setupCluster(clusterName, accessor); // Set throttle config to enable throttling PropertyKey.Builder keyBuilder = accessor.keyBuilder(); ClusterConfig clusterConfig = accessor.getProperty(accessor.keyBuilder().clusterConfig()); clusterConfig.setResourcePriorityField("Name"); List<StateTransitionThrottleConfig> throttleConfigs = new ArrayList<>(); throttleConfigs.add( new StateTransitionThrottleConfig(StateTransitionThrottleConfig.RebalanceType.LOAD_BALANCE, StateTransitionThrottleConfig.ThrottleScope.CLUSTER, 100)); throttleConfigs.add(new StateTransitionThrottleConfig( StateTransitionThrottleConfig.RebalanceType.RECOVERY_BALANCE, StateTransitionThrottleConfig.ThrottleScope.CLUSTER, 100)); clusterConfig.setStateTransitionThrottleConfigs(throttleConfigs); accessor.setProperty(keyBuilder.clusterConfig(), clusterConfig); // set one partition to be always Error, so load balance won't be triggered Map<String, Set<String>> errPartitions = new HashMap<>(); errPartitions.put("OFFLINE-SLAVE", TestHelper.setOf(resourceName + "_0")); // start part of participants for (int i = 0; i < participantCount - 1; i++) { participants[i] = new MockParticipantManager(ZK_ADDR, clusterName, "localhost_" + (12918 + i)); if (i == 0) { participants[i].setTransition(new ErrTransition(errPartitions)); } participants[i].syncStart(); } ClusterControllerManager controller = new ClusterControllerManager(ZK_ADDR, clusterName, "controller_0"); controller.syncStart(); BestPossibleExternalViewVerifier verifier = new BestPossibleExternalViewVerifier.Builder(clusterName).setZkClient(_gZkClient).build(); Assert.assertTrue(verifier.verify(3000)); // Adding one more participant. participants[participantCount - 1] = new MockParticipantManager(ZK_ADDR, clusterName, "localhost_" + (12918 + participantCount - 1)); participants[participantCount - 1].syncStart(); // Even though there is an error partition, downward load balance will take place Assert.assertTrue(pollForPartitionAssignment(accessor, participants[participantCount - 1], resourceName, 5000)); // Update cluster config to tolerate error partition, so load balance transition will be done clusterConfig = accessor.getProperty(accessor.keyBuilder().clusterConfig()); clusterConfig.setErrorPartitionThresholdForLoadBalance(1); accessor.setProperty(keyBuilder.clusterConfig(), clusterConfig); _gSetupTool.rebalanceResource(clusterName, resourceName, 3); Assert.assertTrue(pollForPartitionAssignment(accessor, participants[participantCount - 1], resourceName, 3000)); // clean up controller.syncStop(); for (int i = 0; i < participantCount; i++) { participants[i].syncStop(); } deleteCluster(clusterName); System.out.println("END " + clusterName + " at " + new Date(System.currentTimeMillis())); }
Example 17
Source File: TaskDriver.java From helix with Apache License 2.0 | 4 votes |
/** * Public synchronized method to wait for a delete operation to fully complete with timeout. * When this method returns, it means that a queue (workflow) has been completely deleted, meaning * its IdealState, WorkflowConfig, and WorkflowContext have all been deleted. * @param workflow workflow/jobqueue name * @param timeout duration to give to delete operation to completion */ public void deleteAndWaitForCompletion(String workflow, long timeout) throws InterruptedException { delete(workflow); long endTime = System.currentTimeMillis() + timeout; // For checking whether delete completed BaseDataAccessor baseDataAccessor = _accessor.getBaseDataAccessor(); PropertyKey.Builder keyBuilder = _accessor.keyBuilder(); String idealStatePath = keyBuilder.idealStates(workflow).getPath(); String workflowConfigPath = keyBuilder.resourceConfig(workflow).getPath(); String workflowContextPath = keyBuilder.workflowContext(workflow).getPath(); while (System.currentTimeMillis() <= endTime) { if (baseDataAccessor.exists(idealStatePath, AccessOption.PERSISTENT) || baseDataAccessor.exists(workflowConfigPath, AccessOption.PERSISTENT) || baseDataAccessor.exists(workflowContextPath, AccessOption.PERSISTENT)) { Thread.sleep(1000); } else { return; } } // Deletion failed: check which step of deletion failed to complete and create an error message StringBuilder failed = new StringBuilder(); if (baseDataAccessor.exists(idealStatePath, AccessOption.PERSISTENT)) { failed.append("IdealState "); } if (baseDataAccessor.exists(workflowConfigPath, AccessOption.PERSISTENT)) { failed.append("WorkflowConfig "); } if (baseDataAccessor.exists(workflowContextPath, AccessOption.PERSISTENT)) { failed.append("WorkflowContext "); } throw new HelixException( String.format( "Failed to delete the workflow/queue %s within %d milliseconds. " + "The following components still remain: %s", workflow, timeout, failed.toString())); }
Example 18
Source File: ZKHelixDataAccessor.java From helix with Apache License 2.0 | 4 votes |
public ZKHelixDataAccessor(ZKHelixDataAccessor dataAccessor) { _clusterName = dataAccessor._clusterName; _instanceType = dataAccessor._instanceType; _baseDataAccessor = dataAccessor._baseDataAccessor; _propertyKeyBuilder = new PropertyKey.Builder(_clusterName); }
Example 19
Source File: MockHelixTaskExecutor.java From helix with Apache License 2.0 | 4 votes |
void checkDuplicatedMessages(List<Message> messages) { HelixDataAccessor accessor = manager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); PropertyKey path = keyBuilder.currentStates(manager.getInstanceName(), manager.getSessionId()); Map<String, CurrentState> currentStateMap = accessor.getChildValuesMap(path, true); Set<String> seenPartitions = new HashSet<>(); for (Message message : messages) { if (message.getMsgType().equals(Message.MessageType.STATE_TRANSITION.name())) { String resource = message.getResourceName(); String partition = message.getPartitionName(); //System.err.println(message.getMsgId()); String key = resource + "-" + partition; if (seenPartitions.contains(key)) { //System.err.println("Duplicated message received for " + resource + ":" + partition); duplicatedMessages++; } seenPartitions.add(key); String toState = message.getToState(); String state = null; if (currentStateMap.containsKey(resource)) { CurrentState currentState = currentStateMap.get(resource); state = currentState.getState(partition); } if (toState.equals(state) && message.getMsgState() == Message.MessageState.NEW) { // logger.error( // "Extra message: " + message.getMsgId() + ", Partition is already in target state " // + toState + " for " + resource + ":" + partition); extraStateTransition++; } String messageTarget = getMessageTarget(message.getResourceName(), message.getPartitionName()); if (message.getMsgState() == Message.MessageState.NEW && _messageTaskMap.containsKey(messageTarget)) { String taskId = _messageTaskMap.get(messageTarget); MessageTaskInfo messageTaskInfo = _taskMap.get(taskId); Message existingMsg = messageTaskInfo.getTask().getMessage(); if (existingMsg.getMsgId() != message.getMsgId()) // logger.error("Duplicated message In Progress: " + message.getMsgId() // + ", state transition in progress with message " + existingMsg.getMsgId() // + " to " + toState + " for " + resource + ":" + partition); duplicatedMessagesInProgress ++; } } } }
Example 20
Source File: TestTaskRebalancer.java From helix with Apache License 2.0 | 4 votes |
@Test public void testNamedQueue() throws Exception { String queueName = TestHelper.getTestMethodName(); // Create a queue JobQueue queue = new JobQueue.Builder(queueName).build(); _driver.createQueue(queue); // Enqueue jobs Set<String> master = Sets.newHashSet("MASTER"); Set<String> slave = Sets.newHashSet("SLAVE"); JobConfig.Builder job1 = new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND) .setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB).setTargetPartitionStates(master); JobConfig.Builder job2 = new JobConfig.Builder().setCommand(MockTask.TASK_COMMAND) .setTargetResource(WorkflowGenerator.DEFAULT_TGT_DB).setTargetPartitionStates(slave); _driver.enqueueJob(queueName, "masterJob", job1); _driver.enqueueJob(queueName, "slaveJob", job2); // Ensure successful completion String namespacedJob1 = queueName + "_masterJob"; String namespacedJob2 = queueName + "_slaveJob"; _driver.pollForJobState(queueName, namespacedJob1, TaskState.COMPLETED); _driver.pollForJobState(queueName, namespacedJob2, TaskState.COMPLETED); JobContext masterJobContext = _driver.getJobContext(namespacedJob1); JobContext slaveJobContext = _driver.getJobContext(namespacedJob2); // Ensure correct ordering long job1Finish = masterJobContext.getFinishTime(); long job2Start = slaveJobContext.getStartTime(); Assert.assertTrue(job2Start >= job1Finish); // Flush queue and check cleanup _driver.flushQueue(queueName); HelixDataAccessor accessor = _manager.getHelixDataAccessor(); PropertyKey.Builder keyBuilder = accessor.keyBuilder(); Assert.assertNull(accessor.getProperty(keyBuilder.idealStates(namespacedJob1))); Assert.assertNull(accessor.getProperty(keyBuilder.resourceConfig(namespacedJob1))); Assert.assertNull(accessor.getProperty(keyBuilder.idealStates(namespacedJob2))); Assert.assertNull(accessor.getProperty(keyBuilder.resourceConfig(namespacedJob2))); WorkflowConfig workflowCfg = _driver.getWorkflowConfig(queueName); JobDag dag = workflowCfg.getJobDag(); Assert.assertFalse(dag.getAllNodes().contains(namespacedJob1)); Assert.assertFalse(dag.getAllNodes().contains(namespacedJob2)); Assert.assertFalse(dag.getChildrenToParents().containsKey(namespacedJob1)); Assert.assertFalse(dag.getChildrenToParents().containsKey(namespacedJob2)); Assert.assertFalse(dag.getParentsToChildren().containsKey(namespacedJob1)); Assert.assertFalse(dag.getParentsToChildren().containsKey(namespacedJob2)); }