Java Code Examples for org.apache.helix.HelixDataAccessor#getChildNames()

The following examples show how to use org.apache.helix.HelixDataAccessor#getChildNames() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PerInstanceAccessor.java    From helix with Apache License 2.0 6 votes vote down vote up
@GET
@Path("healthreports")
public Response getHealthReportsOnInstance(@PathParam("clusterId") String clusterId,
    @PathParam("instanceName") String instanceName) throws IOException {
  HelixDataAccessor accessor = getDataAccssor(clusterId);

  ObjectNode root = JsonNodeFactory.instance.objectNode();
  root.put(Properties.id.name(), instanceName);
  ArrayNode healthReportsNode = root.putArray(PerInstanceProperties.healthreports.name());

  List<String> healthReports =
      accessor.getChildNames(accessor.keyBuilder().healthReports(instanceName));

  if (healthReports != null && healthReports.size() > 0) {
    healthReportsNode.addAll((ArrayNode) OBJECT_MAPPER.valueToTree(healthReports));
  }

  return JSONRepresentation(root);
}
 
Example 2
Source File: PerInstanceAccessor.java    From helix with Apache License 2.0 6 votes vote down vote up
@GET @Path("resources/{resourceName}")
public Response getResourceOnInstance(@PathParam("clusterId") String clusterId,
    @PathParam("instanceName") String instanceName,
    @PathParam("resourceName") String resourceName) throws IOException {
  HelixDataAccessor accessor = getDataAccssor(clusterId);
  List<String> sessionIds = accessor.getChildNames(accessor.keyBuilder().sessions(instanceName));
  if (sessionIds == null || sessionIds.size() == 0) {
    return notFound();
  }

  // Only get resource list from current session id
  String currentSessionId = sessionIds.get(0);
  CurrentState resourceCurrentState = accessor.getProperty(
      accessor.keyBuilder().currentState(instanceName, currentSessionId, resourceName));
  if (resourceCurrentState != null) {
    return JSONRepresentation(resourceCurrentState.getRecord());
  }

  return notFound();
}
 
Example 3
Source File: PropertyCache.java    From helix with Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
SelectivePropertyRefreshInputs<T> genSelectiveUpdateInput(HelixDataAccessor accessor,
    Map<String, T> oldCache, PropertyCache.PropertyCacheKeyFuncs<T> propertyKeyFuncs) {
  // Generate keys for all current live instances
  Set<PropertyKey> latestKeys = Sets.newHashSet();
  for (String liveInstanceName : accessor.getChildNames(propertyKeyFuncs.getRootKey(accessor))) {
    latestKeys.add(propertyKeyFuncs.getObjPropertyKey(accessor, liveInstanceName));
  }

  Set<PropertyKey> oldCachedKeys = Sets.newHashSet();
  Map<PropertyKey, T> cachedObjs = new HashMap<>();
  for (String objName : oldCache.keySet()) {
    PropertyKey objKey = propertyKeyFuncs.getObjPropertyKey(accessor, objName);
    oldCachedKeys.add(objKey);
    cachedObjs.put(objKey, oldCache.get(objName));
  }
  Set<PropertyKey> cachedKeys = Sets.intersection(oldCachedKeys, latestKeys);
  Set<PropertyKey> reloadKeys = Sets.difference(latestKeys, cachedKeys);

  return new SelectivePropertyRefreshInputs<>(new ArrayList<>(reloadKeys),
      new ArrayList<>(cachedKeys), cachedObjs);
}
 
Example 4
Source File: CurrentStateCache.java    From helix with Apache License 2.0 6 votes vote down vote up
@Override
protected Set<PropertyKey> PopulateParticipantKeys(HelixDataAccessor accessor,
    Map<String, LiveInstance> liveInstanceMap) {
  Set<PropertyKey> participantStateKeys = new HashSet<>();
  PropertyKey.Builder keyBuilder = accessor.keyBuilder();
  for (String instanceName : liveInstanceMap.keySet()) {
    LiveInstance liveInstance = liveInstanceMap.get(instanceName);
    String sessionId = liveInstance.getEphemeralOwner();
    List<String> currentStateNames =
        accessor.getChildNames(keyBuilder.currentStates(instanceName, sessionId));
    for (String currentStateName : currentStateNames) {
      participantStateKeys
          .add(keyBuilder.currentState(instanceName, sessionId, currentStateName));
    }
  }
  return participantStateKeys;
}
 
Example 5
Source File: ZKPathDataDumpTask.java    From helix with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {
  // For each record in status update and error node
  // TODO: for now the status updates are dumped to cluster manager log4j log.
  // We need to think if we should create per-instance log files that contains
  // per-instance statusUpdates
  // and errors
  LOG.info("Scan statusUpdates and errors for cluster: " + _manager.getClusterName()
      + ", by controller: " + _manager);
  HelixDataAccessor accessor = _manager.getHelixDataAccessor();
  Builder keyBuilder = accessor.keyBuilder();
  BaseDataAccessor<ZNRecord> baseAccessor = accessor.getBaseDataAccessor();

  List<String> instances = accessor.getChildNames(keyBuilder.instanceConfigs());
  for (String instance : instances) {
    // dump participant status updates
    String statusUpdatePath = PropertyPathBuilder.instanceStatusUpdate(_manager.getClusterName(), instance);
    dump(baseAccessor, statusUpdatePath, _thresholdNoChangeMsForStatusUpdates, _maxLeafCount);

    // dump participant errors
    String errorPath = PropertyPathBuilder.instanceError(_manager.getClusterName(), instance);
    dump(baseAccessor, errorPath, _thresholdNoChangeMsForErrors, _maxLeafCount);
  }
  // dump controller status updates
  String controllerStatusUpdatePath = PropertyPathBuilder.controllerStatusUpdate(_manager.getClusterName());
  dump(baseAccessor, controllerStatusUpdatePath, _thresholdNoChangeMsForStatusUpdates, _maxLeafCount);

  // dump controller errors
  String controllerErrorPath = PropertyPathBuilder.controllerError(_manager.getClusterName());
  dump(baseAccessor, controllerErrorPath, _thresholdNoChangeMsForErrors, _maxLeafCount);
}
 
Example 6
Source File: PerInstanceAccessor.java    From helix with Apache License 2.0 5 votes vote down vote up
@GET
@Path("errors")
public Response getErrorsOnInstance(@PathParam("clusterId") String clusterId,
    @PathParam("instanceName") String instanceName) throws IOException {
  HelixDataAccessor accessor = getDataAccssor(clusterId);

  ObjectNode root = JsonNodeFactory.instance.objectNode();
  root.put(Properties.id.name(), instanceName);
  ObjectNode errorsNode = JsonNodeFactory.instance.objectNode();

  List<String> sessionIds = accessor.getChildNames(accessor.keyBuilder().errors(instanceName));

  if (sessionIds == null || sessionIds.size() == 0) {
    return notFound();
  }

  for (String sessionId : sessionIds) {
    List<String> resources =
        accessor.getChildNames(accessor.keyBuilder().errors(instanceName, sessionId));
    if (resources != null) {
      ObjectNode resourcesNode = JsonNodeFactory.instance.objectNode();
      for (String resourceName : resources) {
        List<String> partitions = accessor
            .getChildNames(accessor.keyBuilder().errors(instanceName, sessionId, resourceName));
        if (partitions != null) {
          ArrayNode partitionsNode = resourcesNode.putArray(resourceName);
          partitionsNode.addAll((ArrayNode) OBJECT_MAPPER.valueToTree(partitions));
        }
      }
      errorsNode.put(sessionId, resourcesNode);
    }
  }
  root.put(PerInstanceProperties.errors.name(), errorsNode);

  return JSONRepresentation(root);
}
 
Example 7
Source File: ClusterAccessor.java    From helix with Apache License 2.0 5 votes vote down vote up
@GET
@Path("{clusterId}/statemodeldefs")
public Response getClusterStateModelDefinitions(@PathParam("clusterId") String clusterId) {
  HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
  List<String> stateModelDefs =
      dataAccessor.getChildNames(dataAccessor.keyBuilder().stateModelDefs());

  Map<String, Object> clusterStateModelDefs = new HashMap<>();
  clusterStateModelDefs.put(Properties.id.name(), clusterId);
  clusterStateModelDefs.put(ClusterProperties.stateModelDefinitions.name(), stateModelDefs);

  return JSONRepresentation(clusterStateModelDefs);
}
 
Example 8
Source File: ClusterAccessor.java    From helix with Apache License 2.0 5 votes vote down vote up
@GET
@Path("{clusterId}/controller/messages")
public Response getClusterControllerMessages(@PathParam("clusterId") String clusterId) {
  HelixDataAccessor dataAccessor = getDataAccssor(clusterId);

  Map<String, Object> controllerMessages = new HashMap<>();
  controllerMessages.put(Properties.id.name(), clusterId);

  List<String> messages =
      dataAccessor.getChildNames(dataAccessor.keyBuilder().controllerMessages());
  controllerMessages.put(ClusterProperties.messages.name(), messages);
  controllerMessages.put(Properties.count.name(), messages.size());

  return JSONRepresentation(controllerMessages);
}
 
Example 9
Source File: ClusterAccessor.java    From helix with Apache License 2.0 5 votes vote down vote up
@GET
@Path("{clusterId}")
public Response getClusterInfo(@PathParam("clusterId") String clusterId) {
  if (!doesClusterExist(clusterId)) {
    return notFound();
  }

  HelixDataAccessor dataAccessor = getDataAccssor(clusterId);
  PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();

  Map<String, Object> clusterInfo = new HashMap<>();
  clusterInfo.put(Properties.id.name(), clusterId);

  LiveInstance controller = dataAccessor.getProperty(keyBuilder.controllerLeader());
  if (controller != null) {
    clusterInfo.put(ClusterProperties.controller.name(), controller.getInstanceName());
  } else {
    clusterInfo.put(ClusterProperties.controller.name(), "No Lead Controller!");
  }

  boolean paused = dataAccessor.getBaseDataAccessor()
      .exists(keyBuilder.pause().getPath(), AccessOption.PERSISTENT);
  clusterInfo.put(ClusterProperties.paused.name(), paused);
  boolean maintenance = getHelixAdmin().isInMaintenanceMode(clusterId);
  clusterInfo.put(ClusterProperties.maintenance.name(), maintenance);

  List<String> idealStates = dataAccessor.getChildNames(keyBuilder.idealStates());
  clusterInfo.put(ClusterProperties.resources.name(), idealStates);
  List<String> instances = dataAccessor.getChildNames(keyBuilder.instanceConfigs());
  clusterInfo.put(ClusterProperties.instances.name(), instances);
  List<String> liveInstances = dataAccessor.getChildNames(keyBuilder.liveInstances());
  clusterInfo.put(ClusterProperties.liveInstances.name(), liveInstances);

  return JSONRepresentation(clusterInfo);
}
 
Example 10
Source File: ZkTestBase.java    From helix with Apache License 2.0 5 votes vote down vote up
@Override
public boolean verify() {
  BaseDataAccessor<ZNRecord> baseAccessor = new ZkBaseDataAccessor<ZNRecord>(_zkClient);
  HelixDataAccessor accessor = new ZKHelixDataAccessor(_clusterName, baseAccessor);
  PropertyKey.Builder keyBuilder = accessor.keyBuilder();
  ExternalView externalView = accessor.getProperty(keyBuilder.externalView(_resourceName));

  // verify external view empty
  if (externalView != null) {
    for (String partition : externalView.getPartitionSet()) {
      Map<String, String> stateMap = externalView.getStateMap(partition);
      if (stateMap != null && !stateMap.isEmpty()) {
        LOG.error("External view not empty for " + partition);
        return false;
      }
    }
  }

  // verify current state empty
  List<String> liveParticipants = accessor.getChildNames(keyBuilder.liveInstances());
  for (String participant : liveParticipants) {
    List<String> sessionIds = accessor.getChildNames(keyBuilder.sessions(participant));
    for (String sessionId : sessionIds) {
      CurrentState currentState =
          accessor.getProperty(keyBuilder.currentState(participant, sessionId, _resourceName));
      Map<String, String> partitionStateMap = currentState.getPartitionStateMap();
      if (partitionStateMap != null && !partitionStateMap.isEmpty()) {
        LOG.error("Current state not empty for " + participant);
        return false;
      }
    }
  }
  return true;
}
 
Example 11
Source File: ZKHelixAdmin.java    From helix with Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, Boolean> validateResourcesForWagedRebalance(String clusterName,
    List<String> resourceNames) {
  // Null checks
  if (clusterName == null || clusterName.isEmpty()) {
    throw new HelixException("Cluster name is invalid!");
  }
  if (resourceNames == null || resourceNames.isEmpty()) {
    throw new HelixException("Resource name list is invalid!");
  }

  // Ensure that all instances are valid
  HelixDataAccessor accessor =
      new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<>(_zkClient));
  PropertyKey.Builder keyBuilder = accessor.keyBuilder();
  List<String> instances = accessor.getChildNames(keyBuilder.instanceConfigs());
  if (validateInstancesForWagedRebalance(clusterName, instances).containsValue(false)) {
    throw new HelixException(String
        .format("Instance capacities haven't been configured properly for cluster %s",
            clusterName));
  }

  Map<String, Boolean> result = new HashMap<>();
  ClusterConfig clusterConfig = _configAccessor.getClusterConfig(clusterName);
  for (String resourceName : resourceNames) {
    IdealState idealState = getResourceIdealState(clusterName, resourceName);
    if (idealState == null || !idealState.isValid()) {
      result.put(resourceName, false);
      continue;
    }
    ResourceConfig resourceConfig = _configAccessor.getResourceConfig(clusterName, resourceName);
    result.put(resourceName,
        validateWeightForResourceConfig(clusterConfig, resourceConfig, idealState));
  }
  return result;
}
 
Example 12
Source File: TestWagedRebalance.java    From helix with Apache License 2.0 4 votes vote down vote up
/**
 * Use HelixUtil.getIdealAssignmentForWagedFullAuto() to compute the cluster-wide assignment and
 * verify that it matches with the result from the original WAGED rebalancer's algorithm result.
 */
@Test(dependsOnMethods = "test")
public void testRebalanceTool() throws InterruptedException {
  // Create resources for testing
  int i = 0;
  for (String stateModel : _testModels) {
    String db = "Test-DB-" + TestHelper.getTestMethodName() + i++;
    createResourceWithWagedRebalance(CLUSTER_NAME, db, stateModel, PARTITIONS, _replica,
        _replica);
    _gSetupTool.rebalanceStorageCluster(CLUSTER_NAME, db, _replica);
    _allDBs.add(db);
  }
  Thread.sleep(300);

  validate(_replica);

  // Read cluster parameters from ZK
  HelixDataAccessor dataAccessor = new ZKHelixDataAccessor(CLUSTER_NAME, _baseAccessor);
  ClusterConfig clusterConfig =
      dataAccessor.getProperty(dataAccessor.keyBuilder().clusterConfig());
  List<InstanceConfig> instanceConfigs =
      dataAccessor.getChildValues(dataAccessor.keyBuilder().instanceConfigs(), true);
  List<String> liveInstances =
      dataAccessor.getChildNames(dataAccessor.keyBuilder().liveInstances());
  List<IdealState> idealStates =
      dataAccessor.getChildValues(dataAccessor.keyBuilder().idealStates(), true);
  List<ResourceConfig> resourceConfigs =
      dataAccessor.getChildValues(dataAccessor.keyBuilder().resourceConfigs(), true);

  // Verify that utilResult contains the assignment for the resources added
  Map<String, ResourceAssignment> utilResult = HelixUtil
      .getIdealAssignmentForWagedFullAuto(ZK_ADDR, clusterConfig, instanceConfigs, liveInstances,
          idealStates, resourceConfigs);
  Assert.assertNotNull(utilResult);
  Assert.assertEquals(utilResult.size(), _allDBs.size());
  for (IdealState idealState : idealStates) {
    Assert.assertTrue(utilResult.containsKey(idealState.getResourceName()));
    Assert.assertEquals(utilResult.get(idealState.getResourceName()).getRecord().getMapFields(),
        idealState.getRecord().getMapFields());
  }

  // Try to add a few extra instances
  String instance_0 = "instance_0";
  String instance_1 = "instance_1";
  Set<String> newInstances = new HashSet<>();
  newInstances.add(instance_0);
  newInstances.add(instance_1);
  liveInstances.addAll(newInstances);
  for (String instance : newInstances) {
    InstanceConfig instanceConfig = new InstanceConfig(instance);
    instanceConfigs.add(instanceConfig);
  }

  utilResult = HelixUtil
      .getIdealAssignmentForWagedFullAuto(ZK_ADDR, clusterConfig, instanceConfigs, liveInstances,
          idealStates, resourceConfigs);

  Set<String> instancesWithAssignments = new HashSet<>();
  utilResult.values().forEach(
      resourceAssignment -> resourceAssignment.getRecord().getMapFields().values()
          .forEach(entry -> instancesWithAssignments.addAll(entry.keySet())));
  // The newly added instances should contain some partitions
  Assert.assertTrue(instancesWithAssignments.contains(instance_0));
  Assert.assertTrue(instancesWithAssignments.contains(instance_1));
}
 
Example 13
Source File: TaskUtil.java    From helix with Apache License 2.0 4 votes vote down vote up
/**
 * Clean up all jobs that are COMPLETED and passes its expiry time.
 * @param workflowConfig
 * @param workflowContext
 */
public static void purgeExpiredJobs(String workflow, WorkflowConfig workflowConfig,
    WorkflowContext workflowContext, HelixManager manager,
    RebalanceScheduler rebalanceScheduler) {
  if (workflowContext == null) {
    LOG.warn(String.format("Workflow %s context does not exist!", workflow));
    return;
  }
  long purgeInterval = workflowConfig.getJobPurgeInterval();
  long currentTime = System.currentTimeMillis();
  final Set<String> expiredJobs = Sets.newHashSet();
  if (purgeInterval > 0 && workflowContext.getLastJobPurgeTime() + purgeInterval <= currentTime) {
    expiredJobs.addAll(TaskUtil.getExpiredJobs(manager.getHelixDataAccessor(),
        manager.getHelixPropertyStore(), workflowConfig, workflowContext));
    if (expiredJobs.isEmpty()) {
      LOG.info("No job to purge for the queue " + workflow);
    } else {
      LOG.info("Purge jobs " + expiredJobs + " from queue " + workflow);
      Set<String> failedJobRemovals = new HashSet<>();
      for (String job : expiredJobs) {
        if (!TaskUtil.removeJob(manager.getHelixDataAccessor(), manager.getHelixPropertyStore(),
            job)) {
          failedJobRemovals.add(job);
          LOG.warn("Failed to clean up expired and completed jobs from workflow " + workflow);
        }
        rebalanceScheduler.removeScheduledRebalance(job);
      }

      // If the job removal failed, make sure we do NOT prematurely delete it from DAG so that the
      // removal will be tried again at next purge
      expiredJobs.removeAll(failedJobRemovals);

      if (!TaskUtil.removeJobsFromDag(manager.getHelixDataAccessor(), workflow, expiredJobs,
          true)) {
        LOG.warn("Error occurred while trying to remove jobs + " + expiredJobs
            + " from the workflow " + workflow);
      }

      if (expiredJobs.size() > 0) {
        // Update workflow context will be in main pipeline not here. Otherwise, it will cause
        // concurrent write issue. It is possible that jobs got purged but there is no event to
        // trigger the pipeline to clean context.
        HelixDataAccessor accessor = manager.getHelixDataAccessor();
        List<String> resourceConfigs =
            accessor.getChildNames(accessor.keyBuilder().resourceConfigs());
        if (resourceConfigs.size() > 0) {
          RebalanceUtil.scheduleOnDemandPipeline(manager.getClusterName(), 0L);
        } else {
          LOG.warn(
              "No resource config to trigger rebalance for clean up contexts for" + expiredJobs);
        }
      }
    }
  }
  setNextJobPurgeTime(workflow, currentTime, purgeInterval, rebalanceScheduler, manager);
}
 
Example 14
Source File: StatusUpdateUtil.java    From helix with Apache License 2.0 4 votes vote down vote up
public static StatusUpdateContents getStatusUpdateContents(HelixDataAccessor accessor,
    String instance, String resourceGroup, String sessionID, String partition) {
  Builder keyBuilder = accessor.keyBuilder();

  List<ZNRecord> instances =
      HelixProperty.convertToList(accessor.getChildValues(keyBuilder.instanceConfigs(), true));
  List<ZNRecord> partitionRecords = new ArrayList<ZNRecord>();
  for (ZNRecord znRecord : instances) {
    String instanceName = znRecord.getId();
    if (!instanceName.equals(instance)) {
      continue;
    }

    List<String> sessions = accessor.getChildNames(keyBuilder.sessions(instanceName));
    for (String session : sessions) {
      if (sessionID != null && !session.equals(sessionID)) {
        continue;
      }

      List<String> resourceGroups =
          accessor.getChildNames(keyBuilder.stateTransitionStatus(instanceName, session));
      for (String resourceGroupName : resourceGroups) {
        if (!resourceGroupName.equals(resourceGroup)) {
          continue;
        }

        List<String> partitionStrings =
            accessor.getChildNames(keyBuilder.stateTransitionStatus(instanceName, session,
                resourceGroupName));

        for (String partitionString : partitionStrings) {
          ZNRecord partitionRecord =
              accessor.getProperty(
                  keyBuilder.stateTransitionStatus(instanceName, session, resourceGroupName,
                      partitionString)).getRecord();
          if (!partitionString.equals(partition)) {
            continue;
          }
          partitionRecords.add(partitionRecord);
        }
      }
    }
  }

  return new StatusUpdateContents(getSortedTransitions(partitionRecords),
      getTaskMessages(partitionRecords));
}
 
Example 15
Source File: InstanceValidationUtil.java    From helix with Apache License 2.0 4 votes vote down vote up
/**
 * Check if sibling nodes of the instance meet min active replicas constraint
 * Two instances are sibling of each other if they host the same partition
 * WARNING: The check uses ExternalView to reduce network traffic but suffer from accuracy
 * due to external view propagation latency
 *
 * TODO: Use in memory cache and query instance's currentStates
 *
 * @param dataAccessor
 * @param instanceName
 * @return
 */
public static boolean siblingNodesActiveReplicaCheck(HelixDataAccessor dataAccessor, String instanceName) {
  PropertyKey.Builder propertyKeyBuilder = dataAccessor.keyBuilder();
  List<String> resources = dataAccessor.getChildNames(propertyKeyBuilder.idealStates());

  for (String resourceName : resources) {
    IdealState idealState = dataAccessor.getProperty(propertyKeyBuilder.idealStates(resourceName));
    if (idealState == null || !idealState.isEnabled() || !idealState.isValid()
        || TaskConstants.STATE_MODEL_NAME.equals(idealState.getStateModelDefRef())) {
      continue;
    }
    ExternalView externalView =
        dataAccessor.getProperty(propertyKeyBuilder.externalView(resourceName));
    if (externalView == null) {
      throw new HelixException(
          String.format("Resource %s does not have external view!", resourceName));
    }
    // Get the minActiveReplicas constraint for the resource
    int minActiveReplicas = externalView.getMinActiveReplicas();
    if (minActiveReplicas == -1) {
      _logger.warn("Resource " + resourceName
          + " is missing minActiveReplica field. Skip the sibling check");
      continue;
    }
    String stateModeDef = externalView.getStateModelDefRef();
    StateModelDefinition stateModelDefinition =
        dataAccessor.getProperty(propertyKeyBuilder.stateModelDef(stateModeDef));
    Set<String> unhealthyStates = new HashSet<>(UNHEALTHY_STATES);
    if (stateModelDefinition != null) {
      unhealthyStates.add(stateModelDefinition.getInitialState());
    }
    for (String partition : externalView.getPartitionSet()) {
      Map<String, String> stateByInstanceMap = externalView.getStateMap(partition);
      // found the resource hosted on the instance
      if (stateByInstanceMap.containsKey(instanceName)) {
        int numHealthySiblings = 0;
        for (Map.Entry<String, String> entry : stateByInstanceMap.entrySet()) {
          if (!entry.getKey().equals(instanceName)
              && !unhealthyStates.contains(entry.getValue())) {
            numHealthySiblings++;
          }
        }
        if (numHealthySiblings < minActiveReplicas) {
          _logger.info(
              "Partition {} doesn't have enough active replicas in sibling nodes. NumHealthySiblings: {}, minActiveReplicas: {}",
              partition, numHealthySiblings, minActiveReplicas);
          return false;
        }
      }
    }
  }

  return true;
}
 
Example 16
Source File: InstanceValidationUtil.java    From helix with Apache License 2.0 4 votes vote down vote up
/**
 * Check instance is already in the stable state. Here stable means all the ideal state mapping
 * matches external view (view of current state).
 * It requires PERSIST_INTERMEDIATE_ASSIGNMENT turned on!
 * @param dataAccessor
 * @param instanceName
 * @return
 */
public static boolean isInstanceStable(HelixDataAccessor dataAccessor, String instanceName) {
  PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();
  ClusterConfig clusterConfig = dataAccessor.getProperty(keyBuilder.clusterConfig());
  if (clusterConfig == null) {
    throw new HelixException("Missing cluster config!");
  }
  if (!clusterConfig.isPersistIntermediateAssignment()) {
    throw new HelixException("isInstanceStable needs persist assignment on!");
  }

  List<String> idealStateNames = dataAccessor.getChildNames(keyBuilder.idealStates());
  for (String idealStateName : idealStateNames) {
    IdealState idealState = dataAccessor.getProperty(keyBuilder.idealStates(idealStateName));
    if (idealState == null || !idealState.isEnabled() || !idealState.isValid()
        || TaskConstants.STATE_MODEL_NAME.equals(idealState.getStateModelDefRef())) {
      continue;
    }

    ExternalView externalView = dataAccessor.getProperty(keyBuilder.externalView(idealStateName));
    if (externalView == null) {
      throw new HelixException(
          String.format("Resource %s does not have external view!", idealStateName));
    }
    for (String partition : idealState.getPartitionSet()) {
      Map<String, String> isPartitionMap = idealState.getInstanceStateMap(partition);
      if (isPartitionMap == null) {
        throw new HelixException(String
            .format("Partition %s of resource %s does not have an ideal state partition map",
                partition, idealStateName));
      }
      if (isPartitionMap.containsKey(instanceName)) {
        Map<String, String> evPartitionMap = externalView.getStateMap(partition);
        if (evPartitionMap == null) {
          throw new HelixException(String
              .format("Partition %s of resource %s does not have an external view partition map",
                  partition, idealStateName));
        }
        if (!evPartitionMap.containsKey(instanceName)
            || !evPartitionMap.get(instanceName).equals(isPartitionMap.get(instanceName))) {
          // only checks the state from IS matches EV. Return false when
          // 1. This partition not has current state on this instance
          // 2. The state does not match the state on ideal state
          return false;
        }
      }
    }
  }
  return true;
}
 
Example 17
Source File: HelixCustomCodeRunner.java    From helix with Apache License 2.0 4 votes vote down vote up
/**
 * This method will be invoked when there is a change in any subscribed
 * notificationTypes
 * @throws Exception
 */
public void start() throws Exception {
  if (_callback == null || _notificationTypes == null || _notificationTypes.size() == 0
      || _resourceName == null) {
    throw new IllegalArgumentException("Require callback | notificationTypes | resourceName");
  }

  LOG.info("Register participantLeader on " + _notificationTypes + " using " + _resourceName);

  _stateModelFty = new GenericLeaderStandbyStateModelFactory(_callback, _notificationTypes);

  StateMachineEngine stateMach = _manager.getStateMachineEngine();
  stateMach.registerStateModelFactory(LEADER_STANDBY, _stateModelFty, _resourceName);
  HelixZkClient zkClient = null;
  try {
    // manually add ideal state for participant leader using LeaderStandby
    // model
    HelixZkClient.ZkClientConfig clientConfig = new HelixZkClient.ZkClientConfig();
    clientConfig.setZkSerializer(new ZNRecordSerializer());
    zkClient = SharedZkClientFactory.getInstance()
        .buildZkClient(new HelixZkClient.ZkConnectionConfig(_zkAddr), clientConfig);

    HelixDataAccessor accessor =
        new ZKHelixDataAccessor(_manager.getClusterName(), new ZkBaseDataAccessor<>(zkClient));
    Builder keyBuilder = accessor.keyBuilder();

    IdealState idealState = new IdealState(_resourceName);
    idealState.setRebalanceMode(RebalanceMode.SEMI_AUTO);
    idealState.setReplicas(IdealState.IdealStateConstants.ANY_LIVEINSTANCE.toString());
    idealState.setNumPartitions(1);
    idealState.setStateModelDefRef(LEADER_STANDBY);
    idealState.setStateModelFactoryName(_resourceName);
    List<String> prefList = new ArrayList<String>(
        Arrays.asList(IdealState.IdealStateConstants.ANY_LIVEINSTANCE.toString()));
    idealState.getRecord().setListField(_resourceName + "_0", prefList);

    List<String> idealStates = accessor.getChildNames(keyBuilder.idealStates());
    while (idealStates == null || !idealStates.contains(_resourceName)) {
      accessor.setProperty(keyBuilder.idealStates(_resourceName), idealState);
      idealStates = accessor.getChildNames(keyBuilder.idealStates());
    }

    LOG.info(
        "Set idealState for participantLeader:" + _resourceName + ", idealState:" + idealState);
  } finally {
    if (zkClient != null && !zkClient.isClosed()) {
      zkClient.close();
    }
  }
}
 
Example 18
Source File: PerInstanceAccessor.java    From helix with Apache License 2.0 4 votes vote down vote up
@GET
@Path("messages")
public Response getMessagesOnInstance(@PathParam("clusterId") String clusterId,
    @PathParam("instanceName") String instanceName,
    @QueryParam("stateModelDef") String stateModelDef) {
  HelixDataAccessor accessor = getDataAccssor(clusterId);

  ObjectNode root = JsonNodeFactory.instance.objectNode();
  root.put(Properties.id.name(), instanceName);
  ArrayNode newMessages = root.putArray(PerInstanceProperties.new_messages.name());
  ArrayNode readMessages = root.putArray(PerInstanceProperties.read_messages.name());

  List<String> messageNames =
      accessor.getChildNames(accessor.keyBuilder().messages(instanceName));
  if (messageNames == null || messageNames.size() == 0) {
    LOG.warn("Unable to get any messages on instance: " + instanceName);
    return notFound();
  }

  for (String messageName : messageNames) {
    Message message = accessor.getProperty(accessor.keyBuilder().message(instanceName, messageName));
    if (message == null) {
      LOG.warn("Message is deleted given message name: ", messageName);
      continue;
    }
    // if stateModelDef is valid, keep messages with StateModelDef equals to the parameter
    if (StringUtil.isNotBlank(stateModelDef) && !stateModelDef.equals(message.getStateModelDef())) {
      continue;
    }

    if (Message.MessageState.NEW.equals(message.getMsgState())) {
      newMessages.add(messageName);
    } else if (Message.MessageState.READ.equals(message.getMsgState())) {
      readMessages.add(messageName);
    }
  }

  root.put(PerInstanceProperties.total_message_count.name(),
      newMessages.size() + readMessages.size());
  root.put(PerInstanceProperties.read_message_count.name(), readMessages.size());

  return JSONRepresentation(root);
}
 
Example 19
Source File: HelixUtils.java    From incubator-gobblin with Apache License 2.0 2 votes vote down vote up
/**
 * A utility method that returns all current live instances in a given Helix cluster. This method assumes that
 * the passed {@link HelixManager} instance is already connected.
 * @param helixManager
 * @return all live instances in the Helix cluster.
 */
public static List<String> getLiveInstances(HelixManager helixManager) {
  HelixDataAccessor accessor = helixManager.getHelixDataAccessor();
  PropertyKey liveInstancesKey = accessor.keyBuilder().liveInstances();
  return accessor.getChildNames(liveInstancesKey);
}