Java Code Examples for org.apache.commons.collections.CollectionUtils#isEmpty()
The following examples show how to use
org.apache.commons.collections.CollectionUtils#isEmpty() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: VertexLabelAPI.java From hugegraph with Apache License 2.0 | 6 votes |
@GET @Timed @Produces(APPLICATION_JSON_WITH_CHARSET) @RolesAllowed({"admin", "$owner=$graph $action=schema_read"}) public String list(@Context GraphManager manager, @PathParam("graph") String graph, @QueryParam("names") List<String> names) { boolean listAll = CollectionUtils.isEmpty(names); if (listAll) { LOG.debug("Graph [{}] list vertex labels", graph); } else { LOG.debug("Graph [{}] get vertex labels by names {}", graph, names); } HugeGraph g = graph(manager, graph); List<VertexLabel> labels; if (listAll) { labels = g.schema().getVertexLabels(); } else { labels = new ArrayList<>(names.size()); for (String name : names) { labels.add(g.schema().getVertexLabel(name)); } } return manager.serializer(g).writeVertexLabels(labels); }
Example 2
Source File: AgentStatHbaseOperationFactory.java From pinpoint with Apache License 2.0 | 6 votes |
public <T extends AgentStatDataPoint> List<Put> createPuts(String agentId, AgentStatType agentStatType, List<T> agentStatDataPoints, HbaseSerializer<List<T>, Put> agentStatSerializer) { if (CollectionUtils.isEmpty(agentStatDataPoints)) { return Collections.emptyList(); } Map<Long, List<T>> timeslots = slotAgentStatDataPoints(agentStatDataPoints); List<Put> puts = new ArrayList<Put>(); for (Map.Entry<Long, List<T>> timeslot : timeslots.entrySet()) { long baseTimestamp = timeslot.getKey(); List<T> slottedAgentStatDataPoints = timeslot.getValue(); final AgentStatRowKeyComponent rowKeyComponent = new AgentStatRowKeyComponent(agentId, agentStatType, baseTimestamp); byte[] rowKey = this.rowKeyEncoder.encodeRowKey(rowKeyComponent); byte[] distributedRowKey = this.rowKeyDistributor.getDistributedKey(rowKey); Put put = new Put(distributedRowKey); agentStatSerializer.serialize(slottedAgentStatDataPoints, put, null); puts.add(put); } return puts; }
Example 3
Source File: MetaDataClientImpl.java From Qualitis with Apache License 2.0 | 6 votes |
@Override public DataInfo<ClusterInfoDetail> getClusterByUser(GetClusterByUserRequest request) { Long total = clusterInfoDao.countAll(); List<ClusterInfo> allCluster = clusterInfoDao.findAllClusterInfo(request.getStartIndex(), request.getPageSize()); DataInfo<ClusterInfoDetail> dataInfo = new DataInfo<>(total.intValue()); if (CollectionUtils.isEmpty(allCluster)) { return dataInfo; } List<ClusterInfoDetail> details = new ArrayList<>(); for (ClusterInfo clusterInfo : allCluster) { ClusterInfoDetail detail = new ClusterInfoDetail(clusterInfo.getClusterName()); details.add(detail); } dataInfo.setContent(details); return dataInfo; }
Example 4
Source File: FetchAssignedPartitionRequestHandler.java From joyqueue with Apache License 2.0 | 6 votes |
protected FetchAssignedPartitionAckData assignPartition(FetchAssignedPartitionData fetchAssignedPartitionData, String app, String region, String connectionId, String connectionHost) { TopicName topicName = TopicName.parse(fetchAssignedPartitionData.getTopic()); TopicConfig topicConfig = nameService.getTopicConfig(topicName); if (topicConfig == null) { return null; } List<PartitionGroup> topicPartitionGroups = null; if (fetchAssignedPartitionData.isNearby()) { topicPartitionGroups = getTopicRegionPartitionGroup(topicConfig, region); } else { topicPartitionGroups = Lists.newArrayList(topicConfig.getPartitionGroups().values()); } if (CollectionUtils.isEmpty(topicPartitionGroups)) { return new FetchAssignedPartitionAckData(JoyQueueCode.FW_COORDINATOR_PARTITION_ASSIGNOR_NO_PARTITIONS); } PartitionAssignment partitionAssignment = partitionAssignmentHandler.assign(fetchAssignedPartitionData.getTopic(), app, connectionId, connectionHost, fetchAssignedPartitionData.getSessionTimeout(), topicPartitionGroups); return new FetchAssignedPartitionAckData(partitionAssignment.getPartitions(), JoyQueueCode.SUCCESS); }
Example 5
Source File: ZKV4ConfigServiceImpl.java From DDMQ with Apache License 2.0 | 6 votes |
@Override public void onlyUpdateGroupConfig(Long groupId) throws Exception { ConsumeGroup group = consumeGroupService.findById(groupId); if (group == null) { LOGGER.warn("[ZK_V4_Group] group not found, groupId={}", groupId); throw new ZkConfigException(String.format("[Group] group not found, groupId=%s", groupId)); } if (group.getIsDelete() == IsDelete.YES.getIndex()) { LOGGER.warn("[ZK_V4_Group] group is deleted, skip update zk, groupId={}, group={}", groupId, group.getGroupName()); return; } List<ConsumeSubscription> subList = consumeSubscriptionService.findByGroupId(groupId); if (CollectionUtils.isEmpty(subList)) { LOGGER.warn("[ZK_V4_Group] sub is empty, skip update zk, groupId={}, group={}", groupId, group.getGroupName()); return; } GroupConfig groupConfig = buildGroupConfig(group, subList); zkService.createOrUpdateGroup(groupConfig); LOGGER.debug("[ZK_V4_Group] update group success, groupId={}, groupConfig={}", groupId, groupConfig); LOGGER.info("[ZK_V4_Group] update group success, groupId={}, group={}, sub size={}", groupId, group.getGroupName(), subList.size()); }
Example 6
Source File: AtlasStructDef.java From atlas with Apache License 2.0 | 5 votes |
public void setConstraints(List<AtlasConstraintDef> constraints) { if (this.constraints != null && this.constraints == constraints) { return; } if (CollectionUtils.isEmpty(constraints)) { this.constraints = null; } else { this.constraints = new ArrayList<>(constraints); } }
Example 7
Source File: ManagementServiceImpl.java From yes-cart with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} */ @Override public List<Long> getAssignedManagerCatalogHierarchy(final String userId) throws UnmappedInterfaceException, UnableToCreateInstanceException { final Manager manager = managerService.findSingleByCriteria(" where e.email = ?1", userId); if (manager == null) { return new ArrayList<>(); } if (CollectionUtils.isEmpty(manager.getCategoryCatalogs())) { final Set<Long> allFromShops = new TreeSet<>(); if (CollectionUtils.isNotEmpty(manager.getShops())) { for (final ManagerShop ms : manager.getShops()) { final Shop shop = shopService.getById(ms.getShop().getShopId()); if (CollectionUtils.isNotEmpty(shop.getShopCategory())) { for (final ShopCategory sc : shop.getShopCategory()) { allFromShops.add(sc.getCategory().getCategoryId()); } } } } return new ArrayList<>(allFromShops); } final Set<Long> allFromCatalog = new TreeSet<>(); for (final String catGuid : manager.getCategoryCatalogs()) { allFromCatalog.add(categoryService.findCategoryIdByGUID(catGuid)); } return new ArrayList<>(allFromCatalog); }
Example 8
Source File: AtlasEntityChangeNotifier.java From atlas with Apache License 2.0 | 5 votes |
private void updateFullTextMapping(String entityId, List<AtlasClassification> classifications) { if(AtlasRepositoryConfiguration.isFreeTextSearchEnabled() || !AtlasRepositoryConfiguration.isFullTextSearchEnabled()) { return; } if (StringUtils.isEmpty(entityId) || CollectionUtils.isEmpty(classifications)) { return; } AtlasVertex atlasVertex = AtlasGraphUtilsV2.findByGuid(entityId); if(atlasVertex == null || GraphHelper.isInternalType(atlasVertex)) { return; } MetricRecorder metric = RequestContext.get().startMetricRecord("fullTextMapping"); try { String classificationFullText = fullTextMapperV2.getIndexTextForClassifications(entityId, classifications); String existingFullText = AtlasGraphUtilsV2.getEncodedProperty(atlasVertex, ENTITY_TEXT_PROPERTY_KEY, String.class); String newFullText = existingFullText + " " + classificationFullText; AtlasGraphUtilsV2.setEncodedProperty(atlasVertex, ENTITY_TEXT_PROPERTY_KEY, newFullText); } catch (AtlasBaseException e) { LOG.error("FullText mapping failed for Vertex[ guid = {} ]", entityId, e); } RequestContext.get().endMetricRecord(metric); }
Example 9
Source File: JsonOutputFilter.java From emissary with Apache License 2.0 | 5 votes |
public IbdoParameterFilter() { // if all collections are empty, then output everything this.whitelistStar = (whitelistFields.contains("*") || whitelistFields.contains("ALL")); this.blacklistStar = (blacklistFields.contains("*") || blacklistFields.contains("ALL")); this.emptyBlacklist = CollectionUtils.isEmpty(blacklistFields) && CollectionUtils.isEmpty(blacklistPrefixes); this.emptyWhitelist = CollectionUtils.isEmpty(whitelistFields) && CollectionUtils.isEmpty(whitelistPrefixes); this.outputAll = emptyBlacklist && (whitelistStar || emptyWhitelist); }
Example 10
Source File: AbstractRechnungsPositionDO.java From projectforge-webapp with GNU General Public License v3.0 | 5 votes |
/** * @return The total net sum of all assigned cost entries. */ @Transient public BigDecimal getKostZuweisungNetSum() { if (CollectionUtils.isEmpty(this.kostZuweisungen) == true) { return BigDecimal.ZERO; } final BigDecimal result = BigDecimal.ZERO; for (final KostZuweisungDO zuweisung : this.kostZuweisungen) { if (zuweisung.getNetto() != null) { result.add(zuweisung.getNetto()); } } return result; }
Example 11
Source File: FileRecoverTransactionServiceImpl.java From Raincat with GNU Lesser General Public License v3.0 | 5 votes |
@Override public Boolean batchRemove(final List<String> ids, final String applicationName) { if (CollectionUtils.isEmpty(ids) || StringUtils.isBlank(applicationName)) { return Boolean.FALSE; } final String filePath = RepositoryPathUtils.buildFilePath(applicationName); ids.stream().map(id -> new File(getFullFileName(filePath, id))).forEach(File::delete); return Boolean.TRUE; }
Example 12
Source File: RangerDefaultRequestProcessor.java From ranger with Apache License 2.0 | 5 votes |
@Override public void enrich(RangerAccessRequest request) { List<RangerContextEnricher> enrichers = policyEngine.getAllContextEnrichers(); if (!CollectionUtils.isEmpty(enrichers)) { for(RangerContextEnricher enricher : enrichers) { enricher.enrich(request); } } }
Example 13
Source File: ConsumeSubscriptionServiceImpl.java From DDMQ with Apache License 2.0 | 5 votes |
private ConsoleBaseResponse<PageModel<SubscriptionOrderListVo>> findAllByCondition(String user, String text, Long clusterId, Long groupId, Integer consumeType, Integer state, Integer curPage, Integer pageSize) { if (StringUtils.isNotEmpty(user)) { user = DaoUtil.getLikeField(user); } if (StringUtils.isNotEmpty(text)) { text = DaoUtil.getLikeField(text); } Integer totalCount = consumeSubscriptionCustomMapper.selectCountByCondition(user, groupId, clusterId, consumeType, state, text); PageModel<SubscriptionOrderListVo> pageModel = new PageModel<>(curPage, pageSize, totalCount); if (totalCount == 0) { pageModel.setList(Collections.emptyList()); return ConsoleBaseResponse.success(pageModel); } List<ConsumeSubscription> list = consumeSubscriptionCustomMapper.selectByCondition(user, groupId, clusterId, consumeType, state, text, pageModel.getPageIndex(), pageModel.getPageSize()); if (CollectionUtils.isEmpty(list)) { pageModel.setList(Collections.emptyList()); return ConsoleBaseResponse.success(pageModel); } Map<Long, Cluster> clusterMap = getClusterMap(); List<SubscriptionOrderListVo> retList = Lists.newArrayList(); list.forEach(sub -> retList.add(SubscriptionOrderListVo.buildSubscriptionListVo(sub, clusterMap.get(sub.getClusterId()).getIdcId(), clusterMap.get(sub.getClusterId()).getIdc(), clusterMap.get(sub.getClusterId()).getDescription()))); pageModel.setList(retList); return ConsoleBaseResponse.success(pageModel); }
Example 14
Source File: ConsumeSubscriptionServiceImpl.java From DDMQ with Apache License 2.0 | 5 votes |
@Transactional(isolation = Isolation.SERIALIZABLE, propagation = Propagation.REQUIRED, rollbackFor = Exception.class) public ConsoleBaseResponse<?> changeState(Long groupId, Integer state) throws Exception { List<ConsumeSubscription> list = findByGroupId(groupId); if (CollectionUtils.isEmpty(list)) { return ConsoleBaseResponse.error(ConsoleBaseResponse.Status.INVALID_PARAM, "not subscription info found"); } for (ConsumeSubscription sub : list) { updateSubStateById(sub, state); } return ConsoleBaseResponse.success(); }
Example 15
Source File: JdbcRecoverTransactionServiceImpl.java From Raincat with GNU Lesser General Public License v3.0 | 5 votes |
@Override public Boolean batchRemove(final List<String> ids, final String applicationName) { if (CollectionUtils.isEmpty(ids) || StringUtils.isBlank(applicationName)) { return Boolean.FALSE; } final String tableName = RepositoryPathUtils.buildDbTableName(applicationName); ids.stream() .map(id -> buildDelSql(tableName, id)) .forEach(sql -> jdbcTemplate.execute(sql)); return Boolean.TRUE; }
Example 16
Source File: RangerServicePoliciesCache.java From ranger with Apache License 2.0 | 4 votes |
private boolean checkCacheSanity(String serviceName, ServiceStore serviceStore, List<RangerPolicy> policiesFromIncrementalComputation) { return serviceStore.getPoliciesCount(serviceName) == (CollectionUtils.isEmpty(policiesFromIncrementalComputation) ? 0 : policiesFromIncrementalComputation.size()); }
Example 17
Source File: ServiceREST.java From ranger with Apache License 2.0 | 4 votes |
private void validateUsersGroupsAndRoles(Set<String> users, Set<String> groups, Set<String> roles){ if(CollectionUtils.isEmpty(users) && CollectionUtils.isEmpty(groups) && CollectionUtils.isEmpty(roles)) { throw restErrorUtil.createGrantRevokeRESTException("Grantee users/groups/roles list is empty"); } }
Example 18
Source File: FetchRequestHandler.java From joyqueue with Apache License 2.0 | 4 votes |
private FetchResponse.PartitionResponse fetchMessage(Transport transport, Consumer consumer, org.joyqueue.domain.Consumer.ConsumerPolicy consumerPolicy, TopicName topic, int partition, String clientId, long offset, int maxBytes) { long minIndex = consume.getMinIndex(consumer, (short) partition); long maxIndex = consume.getMaxIndex(consumer, (short) partition); if (offset < minIndex || offset > maxIndex) { logger.warn("fetch message exception, index out of range, transport: {}, consumer: {}, partition: {}, offset: {}, minOffset: {}, maxOffset: {}", transport, consumer, partition, offset, minIndex, maxIndex); return new FetchResponse.PartitionResponse(partition, KafkaErrorCode.OFFSET_OUT_OF_RANGE.getCode()); } List<KafkaBrokerMessage> kafkaBrokerMessages = Lists.newLinkedList(); int batchSize = consumerPolicy.getBatchSize(); int currentBytes = 0; // 判断总体长度 while (currentBytes < maxBytes && offset < maxIndex) { List<BrokerMessage> messages = null; try { messages = doFetchMessage(consumer, partition, offset, batchSize); if (CollectionUtils.isEmpty(messages)) { break; } short skipOffset = 0; int currentBatchSize = 0; // 消息转换 for (BrokerMessage message : messages) { currentBytes += message.getSize(); KafkaBrokerMessage kafkaBrokerMessage = KafkaMessageConverter.toKafkaBrokerMessage(topic.getFullName(), partition, message); kafkaBrokerMessages.add(kafkaBrokerMessage); // 如果是批量,跳过批量条数 if (kafkaBrokerMessage.isBatch()) { skipOffset += kafkaBrokerMessage.getFlag(); currentBatchSize += kafkaBrokerMessage.getFlag(); } else { skipOffset += 1; currentBatchSize += 1; } } // 不满足一批消息量 if (currentBatchSize < batchSize) { break; } offset += skipOffset; } catch (Exception e) { logger.error("fetch message exception, consumer: {}, partition: {}, offset: {}, batchSize: {}", consumer, partition, offset, batchSize, e); break; } } FetchResponse.PartitionResponse fetchResponsePartitionData = new FetchResponse.PartitionResponse(partition, KafkaErrorCode.NONE.getCode(), kafkaBrokerMessages); fetchResponsePartitionData.setBytes(currentBytes); fetchResponsePartitionData.setLogStartOffset(minIndex); fetchResponsePartitionData.setLastStableOffset(maxIndex); fetchResponsePartitionData.setHighWater(maxIndex); return fetchResponsePartitionData; }
Example 19
Source File: WkfNodeService.java From axelor-open-suite with GNU Affero General Public License v3.0 | 4 votes |
/** * Add or update items in MetaSelect according to WkfNodes. * * @param metaSelect MetaSelect to update. * @return Return first item as default value for wkfStatus field. * @throws AxelorException */ private String processNodes(MetaSelect metaSelect) throws AxelorException { String wkfFieldInfo[] = wkfService.getWkfFieldInfo(wkfService.workflow); String wkfFieldName = wkfFieldInfo[0]; String wkfFieldType = wkfFieldInfo[1]; List<WkfNode> nodeList = wkfService.workflow.getNodes(); String defaultValue = null; removeOldOptions(metaSelect, nodeList); Collections.sort( nodeList, (WkfNode node1, WkfNode node2) -> node1.getSequence().compareTo(node2.getSequence())); List<Option> oldSeqenceOptions = Beans.get(WkfController.class).getSelect(wkfService.workflow.getStatusMetaField()); int oldSequenceCounter = 0; if (!CollectionUtils.isEmpty(oldSeqenceOptions) && oldSeqenceOptions.size() != nodeList.size()) { throw new AxelorException( TraceBackRepository.CATEGORY_CONFIGURATION_ERROR, IExceptionMessage.CANNOT_ALTER_NODES); } for (WkfNode node : nodeList) { if (!CollectionUtils.isEmpty(oldSeqenceOptions) && !oldSeqenceOptions.get(oldSequenceCounter).getTitle().equals(node.getTitle())) { throw new AxelorException( TraceBackRepository.CATEGORY_CONFIGURATION_ERROR, IExceptionMessage.CANNOT_ALTER_NODES); } log.debug("Procesing node: {}", node.getName()); String option = node.getSequence().toString(); MetaSelectItem metaSelectItem = getMetaSelectItem(metaSelect, option); if (metaSelectItem == null) { metaSelectItem = new MetaSelectItem(); metaSelectItem.setValue( !CollectionUtils.isEmpty(oldSeqenceOptions) ? oldSeqenceOptions.get(oldSequenceCounter).getValue() : option); metaSelect.addItem(metaSelectItem); } metaSelectItem.setTitle(node.getTitle()); metaSelectItem.setOrder(node.getSequence()); if (defaultValue == null) { defaultValue = metaSelectItem.getValue(); log.debug("Default value set: {}", defaultValue); } List<String[]> actions = new ArrayList<String[]>(); if (node.getMetaActionSet() != null) { Stream<MetaAction> actionStream = node.getMetaActionSet().stream().sorted(Comparator.comparing(MetaAction::getSequence)); actionStream.forEach(metaAction -> actions.add(new String[] {metaAction.getName()})); } if (!actions.isEmpty()) { String name = getActionName(node.getName()); String value = node.getSequence().toString(); if (wkfFieldType.equals("string") || wkfFieldType.equals("String")) { value = "'" + value + "'"; } String condition = wkfFieldName + " == " + value; nodeActions.add(new String[] {name, condition}); this.wkfService.updateActionGroup(name, actions); } oldSequenceCounter++; } return defaultValue; }
Example 20
Source File: AtlasClassificationType.java From atlas with Apache License 2.0 | 4 votes |
/** * This method processes the entityTypes to ensure they are valid, using the following principles: * - entityTypes are supplied on the classificationDef to restrict the types of entities that this classification can be applied to * - Any subtypes of the specified entity type can also have this classification applied * - Any subtypes of the classificationDef inherit the parents entityTypes restrictions * - Any subtypes of the classificationDef can further restrict the parents entityTypes restrictions * - An empty entityTypes list when there are no parent restrictions means there are no restrictions * - An empty entityTypes list when there are parent restrictions means that the subtype picks up the parents restrictions * * This method validates that these priniciples are adhered to. * * Note that if duplicate Strings in the entityTypes are specified on an add / update, the duplicates are ignored - as Java Sets cannot have duplicates. * Note if an entityType is supplied in the list that is a subtype of one of the other supplied entityTypes, we are not policing this case as invalid. * * @param typeRegistry * @throws AtlasBaseException */ @Override void resolveReferencesPhase3(AtlasTypeRegistry typeRegistry) throws AtlasBaseException { subTypes = Collections.unmodifiableSet(subTypes); allSubTypes = Collections.unmodifiableSet(allSubTypes); typeAndAllSubTypes = Collections.unmodifiableSet(typeAndAllSubTypes); typeAndAllSubTypesQryStr = ""; // will be computed on next access /* Add any entityTypes defined in our parents as restrictions. */ Set<String> superTypeEntityTypes = null; final Set<String> classificationDefEntityTypes = classificationDef.getEntityTypes(); // Here we find the intersection of the entityTypes specified in all our supertypes; in this way we will honour our parents restrictions. // This following logic assumes typeAndAllSubTypes is populated so needs to be run after resolveReferencesPhase2(). for (String superType : this.allSuperTypes) { AtlasClassificationDef superTypeDef = typeRegistry.getClassificationDefByName(superType); Set<String> entityTypeNames = superTypeDef.getEntityTypes(); if (CollectionUtils.isEmpty(entityTypeNames)) { // no restrictions specified continue; } // classification is applicable for specified entityTypes and their sub-entityTypes Set<String> typesAndSubEntityTypes = AtlasEntityType.getEntityTypesAndAllSubTypes(entityTypeNames, typeRegistry); if (superTypeEntityTypes == null) { superTypeEntityTypes = new HashSet<>(typesAndSubEntityTypes); } else { // retain only the intersections. superTypeEntityTypes.retainAll(typesAndSubEntityTypes); } if (superTypeEntityTypes.isEmpty()) { // if we have no intersections then we are disjoint - so no need to check other supertypes break; } } if (superTypeEntityTypes == null) { // no supertype restrictions; use current classification restrictions this.entityTypes = AtlasEntityType.getEntityTypesAndAllSubTypes(classificationDefEntityTypes, typeRegistry); } else { // restrictions are specified in super-types if (CollectionUtils.isEmpty(superTypeEntityTypes)) { /* Restrictions in superTypes are disjoint! This means that the child cannot exist as it cannot be a restriction of it's parents. For example: parent1 specifies entityTypes ["EntityA"] parent2 specifies entityTypes ["EntityB"] In order to be a valid child of Parent1 the child could only be applied to EntityAs. In order to be a valid child of Parent2 the child could only be applied to EntityBs. Reject the creation of the classificationDef - as it would compromise Atlas's integrity. */ throw new AtlasBaseException(AtlasErrorCode.CLASSIFICATIONDEF_PARENTS_ENTITYTYPES_DISJOINT, this.classificationDef.getName()); } if (CollectionUtils.isEmpty(classificationDefEntityTypes)) { // no restriction specified; use the restrictions from super-types this.entityTypes = superTypeEntityTypes; } else { this.entityTypes = AtlasEntityType.getEntityTypesAndAllSubTypes(classificationDefEntityTypes,typeRegistry); // Compatible parents and entityTypes, now check whether the specified entityTypes are the same as the effective entityTypes due to our parents or a subset. // Only allowed to restrict our parents. if (!superTypeEntityTypes.containsAll(this.entityTypes)) { throw new AtlasBaseException(AtlasErrorCode.CLASSIFICATIONDEF_ENTITYTYPES_NOT_PARENTS_SUBSET, classificationDef.getName(), classificationDefEntityTypes.toString()); } } } classificationDef.setSubTypes(subTypes); }