Java Code Examples for org.apache.helix.model.InstanceConfig#setPort()
The following examples show how to use
org.apache.helix.model.InstanceConfig#setPort() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Quickstart.java From helix with Apache License 2.0 | 6 votes |
private static void addNode() throws Exception { NUM_NODES = NUM_NODES + 1; int port = 12000 + NUM_NODES - 1; InstanceConfig instanceConfig = new InstanceConfig("localhost_" + port); instanceConfig.setHostName("localhost"); instanceConfig.setPort("" + port); instanceConfig.setInstanceEnabled(true); echo("ADDING NEW NODE :" + instanceConfig.getInstanceName() + ". Partitions will move from old nodes to the new node."); admin.addInstance(CLUSTER_NAME, instanceConfig); INSTANCE_CONFIG_LIST.add(instanceConfig); MyProcess process = new MyProcess(instanceConfig.getInstanceName()); PROCESS_LIST.add(process); admin.rebalance(CLUSTER_NAME, RESOURCE_NAME, 3); process.start(); }
Example 2
Source File: ManagerControllerHelix.java From uReplicator with Apache License 2.0 | 6 votes |
public synchronized void start() throws Exception{ LOGGER.info("Trying to start ManagerControllerHelix!"); _helixZkManager = HelixManagerFactory.getZKHelixManager(_helixClusterName, _instanceId, InstanceType.PARTICIPANT, _helixZkURL); _helixZkManager.getStateMachineEngine().registerStateModelFactory("OnlineOffline", new ControllerStateModelFactory(this)); try { _helixZkManager.connect(); InstanceConfig instanceConfig = new InstanceConfig(_instanceId); instanceConfig.setHostName(_hostname); instanceConfig.setPort(_port); instanceConfig.setInstanceEnabled(true); _helixZkManager.getConfigAccessor().setInstanceConfig(_helixClusterName, _instanceId, instanceConfig); } catch (Exception e) { LOGGER.error("Failed to start ManagerControllerHelix " + _helixClusterName, e); throw e; } }
Example 3
Source File: StorageManagerTest.java From ambry with Apache License 2.0 | 5 votes |
/** * Test success case when updating InstanceConfig in Helix after new replica is added in storage manager. */ @Test public void updateInstanceConfigSuccessTest() throws Exception { generateConfigs(true, true); MockDataNodeId localNode = clusterMap.getDataNodes().get(0); List<ReplicaId> localReplicas = clusterMap.getReplicaIds(localNode); MockClusterParticipant mockHelixParticipant = new MockClusterParticipant(); StorageManager storageManager = createStorageManager(localNode, metricRegistry, Collections.singletonList(mockHelixParticipant)); storageManager.start(); // create a new partition and get its replica on local node PartitionId newPartition = clusterMap.createNewPartition(Collections.singletonList(localNode)); ReplicaId newReplica = newPartition.getReplicaIds().get(0); // for updating instanceConfig test, we first add an empty InstanceConfig of current node String instanceName = ClusterMapUtils.getInstanceName(clusterMapConfig.clusterMapHostName, clusterMapConfig.clusterMapPort); InstanceConfig instanceConfig = new InstanceConfig(instanceName); instanceConfig.setHostName(localNode.getHostname()); instanceConfig.setPort(Integer.toString(localNode.getPort())); // for current test, we initial InstanceConfig empty, non-empty case will be tested in HelixParticipantTest Map<String, Map<String, String>> diskInfos = new HashMap<>(); instanceConfig.getRecord().setMapFields(diskInfos); HelixAdmin helixAdmin = mockHelixParticipant.getHelixAdmin(); helixAdmin.addCluster(CLUSTER_NAME); helixAdmin.addInstance(CLUSTER_NAME, instanceConfig); // test success case mockHelixParticipant.onPartitionBecomeBootstrapFromOffline(newPartition.toPathString()); instanceConfig = helixAdmin.getInstanceConfig(CLUSTER_NAME, instanceName); // verify that new replica info is present in InstanceConfig Map<String, Map<String, String>> mountPathToDiskInfos = instanceConfig.getRecord().getMapFields(); Map<String, String> diskInfo = mountPathToDiskInfos.get(newReplica.getMountPath()); String replicasStr = diskInfo.get("Replicas"); Set<String> partitionStrs = new HashSet<>(); for (String replicaInfo : replicasStr.split(",")) { String[] infos = replicaInfo.split(":"); partitionStrs.add(infos[0]); } assertTrue("New replica info is not found in InstanceConfig", partitionStrs.contains(newPartition.toPathString())); shutdownAndAssertStoresInaccessible(storageManager, localReplicas); }
Example 4
Source File: HelixParticipantTest.java From ambry with Apache License 2.0 | 5 votes |
/** * Generate {@link InstanceConfig} for given data node. * @param clusterMap {@link MockClusterMap} to use * @param dataNode the data node associated with InstanceConfig. * @param sealedReplicas the sealed replicas that should be placed into sealed list. This can be null. * @return {@link InstanceConfig} of given data node. */ private InstanceConfig generateInstanceConfig(MockClusterMap clusterMap, MockDataNodeId dataNode, List<String> sealedReplicas) { String instanceName = ClusterMapUtils.getInstanceName(dataNode.getHostname(), dataNode.getPort()); InstanceConfig instanceConfig = new InstanceConfig(instanceName); instanceConfig.setHostName(dataNode.getHostname()); instanceConfig.setPort(Integer.toString(dataNode.getPort())); instanceConfig.getRecord().setSimpleField(DATACENTER_STR, dataNode.getDatacenterName()); instanceConfig.getRecord().setSimpleField(RACKID_STR, dataNode.getRackId()); instanceConfig.getRecord().setSimpleField(SCHEMA_VERSION_STR, Integer.toString(CURRENT_SCHEMA_VERSION)); Map<String, SortedSet<ReplicaId>> mountPathToReplicas = new HashMap<>(); for (ReplicaId replicaId : clusterMap.getReplicaIds(dataNode)) { mountPathToReplicas.computeIfAbsent(replicaId.getMountPath(), k -> new TreeSet<>(Comparator.comparing(ReplicaId::getPartitionId))).add(replicaId); } Map<String, Map<String, String>> mountPathToDiskInfos = new HashMap<>(); for (Map.Entry<String, SortedSet<ReplicaId>> entry : mountPathToReplicas.entrySet()) { String mountPath = entry.getKey(); StringBuilder replicaStrBuilder = new StringBuilder(); DiskId diskId = null; for (ReplicaId replica : entry.getValue()) { replicaStrBuilder.append(replica.getPartitionId().toPathString()) .append(REPLICAS_STR_SEPARATOR) .append(replica.getCapacityInBytes()) .append(REPLICAS_STR_SEPARATOR) .append(replica.getPartitionId().getPartitionClass()) .append(REPLICAS_DELIM_STR); diskId = replica.getDiskId(); } Map<String, String> diskInfo = new HashMap<>(); diskInfo.put(REPLICAS_STR, replicaStrBuilder.toString()); diskInfo.put(DISK_CAPACITY_STR, String.valueOf(diskId.getRawCapacityInBytes())); diskInfo.put(DISK_STATE, AVAILABLE_STR); mountPathToDiskInfos.put(mountPath, diskInfo); } instanceConfig.getRecord().setMapFields(mountPathToDiskInfos); instanceConfig.getRecord() .setListField(ClusterMapUtils.SEALED_STR, sealedReplicas == null ? new ArrayList<>() : sealedReplicas); return instanceConfig; }
Example 5
Source File: ValidationManagerTest.java From incubator-pinot with Apache License 2.0 | 5 votes |
@Test public void testRebuildBrokerResourceWhenBrokerAdded() throws Exception { // Check that the first table we added doesn't need to be rebuilt(case where ideal state brokers and brokers in broker resource are the same. String partitionName = _offlineTableConfig.getTableName(); HelixAdmin helixAdmin = _helixManager.getClusterManagmentTool(); IdealState idealState = HelixHelper.getBrokerIdealStates(helixAdmin, getHelixClusterName()); // Ensure that the broker resource is not rebuilt. Assert.assertTrue(idealState.getInstanceSet(partitionName) .equals(_helixResourceManager.getAllInstancesForBrokerTenant(TagNameUtils.DEFAULT_TENANT_NAME))); _helixResourceManager.rebuildBrokerResourceFromHelixTags(partitionName); // Add another table that needs to be rebuilt TableConfig offlineTableConfigTwo = new TableConfigBuilder(TableType.OFFLINE).setTableName(TEST_TABLE_TWO).build(); _helixResourceManager.addTable(offlineTableConfigTwo); String partitionNameTwo = offlineTableConfigTwo.getTableName(); // Add a new broker manually such that the ideal state is not updated and ensure that rebuild broker resource is called final String brokerId = "Broker_localhost_2"; InstanceConfig instanceConfig = new InstanceConfig(brokerId); instanceConfig.setInstanceEnabled(true); instanceConfig.setHostName("Broker_localhost"); instanceConfig.setPort("2"); helixAdmin.addInstance(getHelixClusterName(), instanceConfig); helixAdmin.addInstanceTag(getHelixClusterName(), instanceConfig.getInstanceName(), TagNameUtils.getBrokerTagForTenant(TagNameUtils.DEFAULT_TENANT_NAME)); idealState = HelixHelper.getBrokerIdealStates(helixAdmin, getHelixClusterName()); // Assert that the two don't equal before the call to rebuild the broker resource. Assert.assertTrue(!idealState.getInstanceSet(partitionNameTwo) .equals(_helixResourceManager.getAllInstancesForBrokerTenant(TagNameUtils.DEFAULT_TENANT_NAME))); _helixResourceManager.rebuildBrokerResourceFromHelixTags(partitionNameTwo); idealState = HelixHelper.getBrokerIdealStates(helixAdmin, getHelixClusterName()); // Assert that the two do equal after being rebuilt. Assert.assertTrue(idealState.getInstanceSet(partitionNameTwo) .equals(_helixResourceManager.getAllInstancesForBrokerTenant(TagNameUtils.DEFAULT_TENANT_NAME))); }
Example 6
Source File: LockProcess.java From helix with Apache License 2.0 | 5 votes |
/** * Configure the instance, the configuration of each node is available to * other nodes. * @param instanceName */ private void configureInstance(String instanceName) { ZKHelixAdmin helixAdmin = new ZKHelixAdmin(zkAddress); List<String> instancesInCluster = helixAdmin.getInstancesInCluster(clusterName); if (instancesInCluster == null || !instancesInCluster.contains(instanceName)) { InstanceConfig config = new InstanceConfig(instanceName); config.setHostName("localhost"); config.setPort("12000"); helixAdmin.addInstance(clusterName, config); } }
Example 7
Source File: ZkTestBase.java From helix with Apache License 2.0 | 5 votes |
protected void setupInstances(String clusterName, int[] instances) { HelixAdmin admin = new ZKHelixAdmin(_gZkClient); for (int i = 0; i < instances.length; i++) { String instance = "localhost_" + instances[i]; InstanceConfig instanceConfig = new InstanceConfig(instance); instanceConfig.setHostName("localhost"); instanceConfig.setPort("" + instances[i]); instanceConfig.setInstanceEnabled(true); admin.addInstance(clusterName, instanceConfig); } }
Example 8
Source File: BaseStageTest.java From helix with Apache License 2.0 | 5 votes |
protected void setupInstances(int numInstances) { // setup liveInstances for (int i = 0; i < numInstances; i++) { String instance = HOSTNAME_PREFIX + i; InstanceConfig config = new InstanceConfig(instance); config.setHostName(instance); config.setPort("12134"); admin.addInstance(manager.getClusterName(), config); } }
Example 9
Source File: HelixUtil.java From helix with Apache License 2.0 | 5 votes |
/** * Compose the config for an instance * @param instanceName * @return InstanceConfig */ public static InstanceConfig composeInstanceConfig(String instanceName) { InstanceConfig instanceConfig = new InstanceConfig(instanceName); String hostName = instanceName; String port = ""; int lastPos = instanceName.lastIndexOf("_"); if (lastPos > 0) { hostName = instanceName.substring(0, lastPos); port = instanceName.substring(lastPos + 1); } instanceConfig.setHostName(hostName); instanceConfig.setPort(port); instanceConfig.setInstanceEnabled(true); return instanceConfig; }
Example 10
Source File: YAMLClusterSetup.java From helix with Apache License 2.0 | 5 votes |
private static InstanceConfig getInstanceCfg(ParticipantConfig participant) { if (participant == null || participant.name == null || participant.host == null || participant.port == null) { throw new HelixException("Participant must have a specified name, host, and port!"); } InstanceConfig instanceCfg = new InstanceConfig(participant.name); instanceCfg.setHostName(participant.host); instanceCfg.setPort(participant.port.toString()); return instanceCfg; }
Example 11
Source File: TestTopology.java From helix with Apache License 2.0 | 4 votes |
@Test public void testCreateClusterTopologyWithDefaultTopology() { ClusterConfig clusterConfig = new ClusterConfig("Test_Cluster"); clusterConfig.setTopologyAwareEnabled(true); List<String> allNodes = new ArrayList<String>(); List<String> liveNodes = new ArrayList<String>(); Map<String, InstanceConfig> instanceConfigMap = new HashMap<String, InstanceConfig>(); Map<String, Integer> nodeToWeightMap = new HashMap<String, Integer>(); for (int i = 0; i < 100; i++) { String instance = "localhost_" + i; InstanceConfig config = new InstanceConfig(instance); String zoneId = "rack_" + i / 10; config.setZoneId(zoneId); config.setHostName(instance); config.setPort("9000"); allNodes.add(instance); int weight = 0; if (i % 10 != 0) { liveNodes.add(instance); weight = 1000; if (i % 3 == 0) { // set random instance weight. weight = (i + 1) * 100; config.setWeight(weight); } } instanceConfigMap.put(instance, config); if (!nodeToWeightMap.containsKey(zoneId)) { nodeToWeightMap.put(zoneId, 0); } nodeToWeightMap.put(zoneId, nodeToWeightMap.get(zoneId) + weight); } Topology topo = new Topology(allNodes, liveNodes, instanceConfigMap, clusterConfig); Assert.assertTrue(topo.getEndNodeType().equals(Topology.Types.INSTANCE.name())); Assert.assertTrue(topo.getFaultZoneType().equals(Topology.Types.ZONE.name())); List<Node> faultZones = topo.getFaultZones(); Assert.assertEquals(faultZones.size(), 10); Node root = topo.getRootNode(); Assert.assertEquals(root.getChildrenCount(Topology.Types.ZONE.name()), 10); Assert.assertEquals(root.getChildrenCount(topo.getEndNodeType()), 100); // validate weights. for (Node rack : root.getChildren()) { Assert.assertEquals(rack.getWeight(), (long) nodeToWeightMap.get(rack.getName())); } }
Example 12
Source File: TestTopology.java From helix with Apache License 2.0 | 4 votes |
@Test public void testCreateClusterTopology() { ClusterConfig clusterConfig = new ClusterConfig("Test_Cluster"); String topology = "/Rack/Sub-Rack/Host/Instance"; clusterConfig.setTopology(topology); clusterConfig.setFaultZoneType("Sub-Rack"); clusterConfig.setTopologyAwareEnabled(true); List<String> allNodes = new ArrayList<String>(); List<String> liveNodes = new ArrayList<String>(); Map<String, InstanceConfig> instanceConfigMap = new HashMap<String, InstanceConfig>(); Map<String, Integer> nodeToWeightMap = new HashMap<String, Integer>(); for (int i = 0; i < 100; i++) { String instance = "localhost_" + i; InstanceConfig config = new InstanceConfig(instance); String rack_id = "rack_" + i/25; String sub_rack_id = "subrack-" + i/5; String domain = String.format("Rack=%s, Sub-Rack=%s, Host=%s", rack_id, sub_rack_id, instance); config.setDomain(domain); config.setHostName(instance); config.setPort("9000"); allNodes.add(instance); int weight = 0; if (i % 10 != 0) { liveNodes.add(instance); weight = 1000; if (i % 3 == 0) { // set random instance weight. weight = (i+1) * 100; config.setWeight(weight); } } instanceConfigMap.put(instance, config); if (!nodeToWeightMap.containsKey(rack_id)) { nodeToWeightMap.put(rack_id, 0); } nodeToWeightMap.put(rack_id, nodeToWeightMap.get(rack_id) + weight); if (!nodeToWeightMap.containsKey(sub_rack_id)) { nodeToWeightMap.put(sub_rack_id, 0); } nodeToWeightMap.put(sub_rack_id, nodeToWeightMap.get(sub_rack_id) + weight); } Topology topo = new Topology(allNodes, liveNodes, instanceConfigMap, clusterConfig); Assert.assertTrue(topo.getEndNodeType().equals("Instance")); Assert.assertTrue(topo.getFaultZoneType().equals("Sub-Rack")); List<Node> faultZones = topo.getFaultZones(); Assert.assertEquals(faultZones.size(), 20); Node root = topo.getRootNode(); Assert.assertEquals(root.getChildrenCount("Rack"), 4); Assert.assertEquals(root.getChildrenCount("Sub-Rack"), 20); Assert.assertEquals(root.getChildrenCount("Host"), 100); Assert.assertEquals(root.getChildrenCount("Instance"), 100); // validate weights. for (Node rack : root.getChildren()) { Assert.assertEquals(rack.getWeight(), (long)nodeToWeightMap.get(rack.getName())); for (Node subRack : rack.getChildren()) { Assert.assertEquals(subRack.getWeight(), (long)nodeToWeightMap.get(subRack.getName())); } } }
Example 13
Source File: SetupCluster.java From helix with Apache License 2.0 | 4 votes |
public static void main(String[] args) { if (args.length < 2) { System.err .println("USAGE: java SetupCluster zookeeperAddress(e.g. localhost:2181) numberOfNodes"); System.exit(1); } final String zkAddr = args[0]; final int numNodes = Integer.parseInt(args[1]); final String clusterName = DEFAULT_CLUSTER_NAME; ZkClient zkclient = null; try { zkclient = new ZkClient(zkAddr, ZkClient.DEFAULT_SESSION_TIMEOUT, ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer()); ZKHelixAdmin admin = new ZKHelixAdmin(zkclient); // add cluster admin.addCluster(clusterName, true); // add state model definition StateModelConfigGenerator generator = new StateModelConfigGenerator(); admin.addStateModelDef(clusterName, DEFAULT_STATE_MODEL, new StateModelDefinition(generator.generateConfigForOnlineOffline())); // addNodes for (int i = 0; i < numNodes; i++) { String port = "" + (12001 + i); String serverId = "localhost_" + port; InstanceConfig config = new InstanceConfig(serverId); config.setHostName("localhost"); config.setPort(port); config.setInstanceEnabled(true); admin.addInstance(clusterName, config); } // add resource "repository" which has 1 partition String resourceName = DEFAULT_RESOURCE_NAME; admin.addResource(clusterName, resourceName, DEFAULT_PARTITION_NUMBER, DEFAULT_STATE_MODEL, RebalanceMode.SEMI_AUTO.toString()); admin.rebalance(clusterName, resourceName, 1); } finally { if (zkclient != null) { zkclient.close(); } } }
Example 14
Source File: TestZkHelixAdmin.java From helix with Apache License 2.0 | 4 votes |
@Test public void testGetResourcesWithTag() { String TEST_TAG = "TestTAG"; String className = TestHelper.getTestClassName(); String methodName = TestHelper.getTestMethodName(); String clusterName = className + "_" + methodName; HelixAdmin tool = new ZKHelixAdmin(_gZkClient); tool.addCluster(clusterName, true); Assert.assertTrue(ZKUtil.isClusterSetup(clusterName, _gZkClient)); tool.addStateModelDef(clusterName, "OnlineOffline", new StateModelDefinition(StateModelConfigGenerator.generateConfigForOnlineOffline())); for (int i = 0; i < 4; i++) { String instanceName = "host" + i + "_9999"; InstanceConfig config = new InstanceConfig(instanceName); config.setHostName("host" + i); config.setPort("9999"); // set tag to two instances if (i < 2) { config.addTag(TEST_TAG); } tool.addInstance(clusterName, config); tool.enableInstance(clusterName, instanceName, true); String path = PropertyPathBuilder.instance(clusterName, instanceName); AssertJUnit.assertTrue(_gZkClient.exists(path)); } for (int i = 0; i < 4; i++) { String resourceName = "database_" + i; IdealState is = new IdealState(resourceName); is.setStateModelDefRef("OnlineOffline"); is.setNumPartitions(2); is.setRebalanceMode(IdealState.RebalanceMode.FULL_AUTO); is.setReplicas("1"); is.enable(true); if (i < 2) { is.setInstanceGroupTag(TEST_TAG); } tool.addResource(clusterName, resourceName, is); } List<String> allResources = tool.getResourcesInCluster(clusterName); List<String> resourcesWithTag = tool.getResourcesInClusterWithTag(clusterName, TEST_TAG); AssertJUnit.assertEquals(allResources.size(), 4); AssertJUnit.assertEquals(resourcesWithTag.size(), 2); tool.dropCluster(clusterName); }
Example 15
Source File: HelixServerStarter.java From incubator-pinot with Apache License 2.0 | 4 votes |
private void updateInstanceConfigIfNeeded(String host, int port) { InstanceConfig instanceConfig = _helixAdmin.getInstanceConfig(_helixClusterName, _instanceId); boolean needToUpdateInstanceConfig = false; // Add default instance tags if not exist List<String> instanceTags = instanceConfig.getTags(); if (instanceTags == null || instanceTags.size() == 0) { if (ZKMetadataProvider.getClusterTenantIsolationEnabled(_helixManager.getHelixPropertyStore())) { instanceConfig.addTag(TagNameUtils.getOfflineTagForTenant(null)); instanceConfig.addTag(TagNameUtils.getRealtimeTagForTenant(null)); } else { instanceConfig.addTag(UNTAGGED_SERVER_INSTANCE); } needToUpdateInstanceConfig = true; } // Update host and port if needed if (!host.equals(instanceConfig.getHostName())) { instanceConfig.setHostName(host); needToUpdateInstanceConfig = true; } String portStr = Integer.toString(port); if (!portStr.equals(instanceConfig.getPort())) { instanceConfig.setPort(portStr); needToUpdateInstanceConfig = true; } if (needToUpdateInstanceConfig) { LOGGER.info("Updating instance config for instance: {} with instance tags: {}, host: {}, port: {}", _instanceId, instanceTags, host, port); } else { LOGGER.info("Instance config for instance: {} has instance tags: {}, host: {}, port: {}, no need to update", _instanceId, instanceTags, host, port); return; } // NOTE: Use HelixDataAccessor.setProperty() instead of HelixAdmin.setInstanceConfig() because the latter explicitly // forbids instance host/port modification HelixDataAccessor helixDataAccessor = _helixManager.getHelixDataAccessor(); Preconditions.checkState( helixDataAccessor.setProperty(helixDataAccessor.keyBuilder().instanceConfig(_instanceId), instanceConfig), "Failed to update instance config"); }
Example 16
Source File: HelixBootstrapUpgradeUtil.java From ambry with Apache License 2.0 | 4 votes |
/** * Create an {@link InstanceConfig} for the given node from the static cluster information. * @param node the {@link DataNodeId} * @param partitionToInstances the map of partitions to instances that will be populated for this instance. * @param instanceToDiskReplicasMap the map of instances to the map of disk to set of replicas. * @param referenceInstanceConfig the InstanceConfig used to set the fields that are not derived from the json files. * These are the SEALED state and STOPPED_REPLICAS configurations. If this field is null, * then these fields are derived from the json files. This can happen if this is a newly * added node. * @return the constructed {@link InstanceConfig} */ static InstanceConfig createInstanceConfigFromStaticInfo(DataNodeId node, Map<String, Set<String>> partitionToInstances, ConcurrentHashMap<String, Map<DiskId, SortedSet<Replica>>> instanceToDiskReplicasMap, InstanceConfig referenceInstanceConfig) { String instanceName = getInstanceName(node); InstanceConfig instanceConfig = new InstanceConfig(instanceName); instanceConfig.setHostName(node.getHostname()); instanceConfig.setPort(Integer.toString(node.getPort())); if (node.hasSSLPort()) { instanceConfig.getRecord().setSimpleField(SSL_PORT_STR, Integer.toString(node.getSSLPort())); } if (node.hasHttp2Port()) { instanceConfig.getRecord().setSimpleField(HTTP2_PORT_STR, Integer.toString(node.getHttp2Port())); } instanceConfig.getRecord().setSimpleField(DATACENTER_STR, node.getDatacenterName()); instanceConfig.getRecord().setSimpleField(RACKID_STR, node.getRackId()); long xid = node.getXid(); if (xid != DEFAULT_XID) { // Set the XID only if it is not the default, in order to avoid unnecessary updates. instanceConfig.getRecord().setSimpleField(XID_STR, Long.toString(node.getXid())); } instanceConfig.getRecord().setSimpleField(SCHEMA_VERSION_STR, Integer.toString(CURRENT_SCHEMA_VERSION)); List<String> sealedPartitionsList = new ArrayList<>(); List<String> stoppedReplicasList = new ArrayList<>(); if (instanceToDiskReplicasMap.containsKey(instanceName)) { Map<String, Map<String, String>> diskInfos = new HashMap<>(); for (HashMap.Entry<DiskId, SortedSet<Replica>> diskToReplicas : instanceToDiskReplicasMap.get(instanceName) .entrySet()) { DiskId disk = diskToReplicas.getKey(); SortedSet<Replica> replicasInDisk = diskToReplicas.getValue(); // Note: An instance config has to contain the information for each disk about the replicas it hosts. // This information will be initialized to the empty string - but will be updated whenever the partition // is added to the cluster. StringBuilder replicasStrBuilder = new StringBuilder(); for (ReplicaId replicaId : replicasInDisk) { Replica replica = (Replica) replicaId; replicasStrBuilder.append(replica.getPartition().getId()) .append(REPLICAS_STR_SEPARATOR) .append(replica.getCapacityInBytes()) .append(REPLICAS_STR_SEPARATOR) .append(replica.getPartition().getPartitionClass()) .append(REPLICAS_DELIM_STR); if (referenceInstanceConfig == null && replica.isSealed()) { sealedPartitionsList.add(Long.toString(replica.getPartition().getId())); } partitionToInstances.computeIfAbsent(Long.toString(replica.getPartition().getId()), k -> new HashSet<>()) .add(instanceName); } Map<String, String> diskInfo = new HashMap<>(); diskInfo.put(DISK_CAPACITY_STR, Long.toString(disk.getRawCapacityInBytes())); diskInfo.put(DISK_STATE, AVAILABLE_STR); diskInfo.put(REPLICAS_STR, replicasStrBuilder.toString()); diskInfos.put(disk.getMountPath(), diskInfo); } instanceConfig.getRecord().setMapFields(diskInfos); } // Set the fields that need to be preserved from the referenceInstanceConfig. if (referenceInstanceConfig != null) { sealedPartitionsList = ClusterMapUtils.getSealedReplicas(referenceInstanceConfig); stoppedReplicasList = ClusterMapUtils.getStoppedReplicas(referenceInstanceConfig); } instanceConfig.getRecord().setListField(SEALED_STR, sealedPartitionsList); instanceConfig.getRecord().setListField(STOPPED_REPLICAS_STR, stoppedReplicasList); return instanceConfig; }
Example 17
Source File: ClusterChangeHandlerTest.java From ambry with Apache License 2.0 | 4 votes |
/** * Test that {@link DynamicClusterChangeHandler} is able to handle invalid info entry in the InstanceConfig at runtime * or during initialization. */ @Test public void instanceConfigInvalidInfoEntryTest() { Properties properties = new Properties(); properties.putAll(props); properties.setProperty("clustermap.cluster.change.handler.type", "DynamicClusterChangeHandler"); ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(properties)); HelixClusterManager.HelixClusterManagerCallback mockManagerCallback = Mockito.mock(HelixClusterManager.HelixClusterManagerCallback.class); HelixClusterManager.ClusterChangeHandlerCallback mockHandlerCallback = Mockito.mock(HelixClusterManager.ClusterChangeHandlerCallback.class); doAnswer(returnsFirstArg()).when(mockHandlerCallback).addPartitionIfAbsent(any(), anyLong()); Counter initFailureCount = new Counter(); DynamicClusterChangeHandler dynamicChangeHandler = new DynamicClusterChangeHandler(clusterMapConfig, localDc, selfInstanceName, Collections.emptyMap(), mockManagerCallback, mockHandlerCallback, new HelixClusterManagerMetrics(new MetricRegistry(), mockManagerCallback), e -> initFailureCount.inc(), new AtomicLong()); // create an InstanceConfig with invalid entry that mocks error info added by Helix controller PartitionId selectedPartition = testPartitionLayout.getPartitionLayout().getPartitions(null).get(0); Replica testReplica = (Replica) selectedPartition.getReplicaIds().get(0); DataNode testNode = (DataNode) testReplica.getDataNodeId(); InstanceConfig instanceConfig = new InstanceConfig(getInstanceName(testNode.getHostname(), testNode.getPort())); instanceConfig.setHostName(testNode.getHostname()); instanceConfig.setPort(Integer.toString(testNode.getPort())); instanceConfig.getRecord().setSimpleField(ClusterMapUtils.DATACENTER_STR, testNode.getDatacenterName()); instanceConfig.getRecord().setSimpleField(ClusterMapUtils.RACKID_STR, testNode.getRackId()); instanceConfig.getRecord() .setSimpleField(ClusterMapUtils.SCHEMA_VERSION_STR, Integer.toString(ClusterMapUtils.CURRENT_SCHEMA_VERSION)); instanceConfig.getRecord().setListField(ClusterMapUtils.SEALED_STR, Collections.emptyList()); instanceConfig.getRecord().setListField(ClusterMapUtils.STOPPED_REPLICAS_STR, Collections.emptyList()); Map<String, Map<String, String>> diskInfos = new HashMap<>(); assertNotNull("testReplica should not be null", testReplica); Map<String, String> diskInfo = new HashMap<>(); diskInfo.put(ClusterMapUtils.DISK_CAPACITY_STR, Long.toString(testReplica.getDiskId().getRawCapacityInBytes())); diskInfo.put(ClusterMapUtils.DISK_STATE, ClusterMapUtils.AVAILABLE_STR); String replicasStrBuilder = testReplica.getPartition().getId() + ClusterMapUtils.REPLICAS_STR_SEPARATOR + testReplica.getCapacityInBytes() + ClusterMapUtils.REPLICAS_STR_SEPARATOR + testReplica.getPartition().getPartitionClass() + ClusterMapUtils.REPLICAS_DELIM_STR; diskInfo.put(ClusterMapUtils.REPLICAS_STR, replicasStrBuilder); diskInfos.put(testReplica.getDiskId().getMountPath(), diskInfo); // add an invalid entry at the end of diskInfos Map<String, String> invalidEntry = new HashMap<>(); invalidEntry.put("INVALID_KEY", "INVALID_VALUE"); diskInfos.put("INVALID_MOUNT_PATH", invalidEntry); instanceConfig.getRecord().setMapFields(diskInfos); // we call onInstanceConfigChange() twice // 1st call, to verify initialization code path dynamicChangeHandler.onDataNodeConfigChange( Collections.singleton(InstanceConfigToDataNodeConfigAdapter.convert(instanceConfig, clusterMapConfig))); // 2nd call, to verify dynamic update code path dynamicChangeHandler.onDataNodeConfigChange( Collections.singletonList(InstanceConfigToDataNodeConfigAdapter.convert(instanceConfig, clusterMapConfig))); assertEquals("There shouldn't be initialization errors", 0, initFailureCount.getCount()); }