com.jeesuite.common.util.NodeNameHolder Java Examples
The following examples show how to use
com.jeesuite.common.util.NodeNameHolder.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SendCounterHandler.java From jeesuite-libs with Apache License 2.0 | 6 votes |
private void updateProducerStat(String topic, boolean error) { if (!producerStats.containsKey(topic)) { synchronized (producerStats) { String path = groupPath + "/" + topic; if(!zkClient.exists(path)){ zkClient.createPersistent(path, true); } //节点临时目录 path = path + "/" + NodeNameHolder.getNodeId(); zkClient.createEphemeral(path); statPaths.put(topic, path); producerStats.put(topic, new AtomicLong[] { new AtomicLong(0), new AtomicLong(0), new AtomicLong(0), new AtomicLong(0) }); } } if (!error) { producerStats.get(topic)[0].incrementAndGet(); producerStats.get(topic)[2].incrementAndGet(); } else { producerStats.get(topic)[1].incrementAndGet(); producerStats.get(topic)[3].incrementAndGet(); } commited.set(false); }
Example #2
Source File: EurekaRegistry.java From oneplatform with Apache License 2.0 | 5 votes |
private void initEurekaClient(Properties properties) throws Exception{ properties.setProperty("eureka.metadataMap.nodeId", NodeNameHolder.getNodeId()); ConfigurationManager.loadProperties(properties); //ConfigurationManager.loadPropertiesFromResources("eureka.properties"); //DynamicPropertyFactory configInstance = com.netflix.config.DynamicPropertyFactory.getInstance(); MyDataCenterInstanceConfig instanceConfig = new MyDataCenterInstanceConfig(){ @Override public String getHostName(boolean refresh) { String hostName = super.getHostName(refresh); if(ResourceUtils.getBoolean("eureka.preferIpAddress")){ hostName = IpUtils.getLocalIpAddr(); } return hostName; } @Override public String getIpAddress() { return IpUtils.getLocalIpAddr(); } }; InstanceInfo instanceInfo = new EurekaConfigBasedInstanceInfoProvider(instanceConfig).get(); applicationInfoManager = new ApplicationInfoManager(instanceConfig, instanceInfo); DefaultEurekaClientConfig clientConfig = new DefaultEurekaClientConfig(); eurekaClient = new DiscoveryClient(applicationInfoManager, clientConfig); instanceId = instanceInfo.getInstanceId(); }
Example #3
Source File: SnowflakeGenerator.java From jeesuite-libs with Apache License 2.0 | 5 votes |
/** * 需要zookeeper保存节点信息 */ public SnowflakeGenerator() { try { String appName = ResourceUtils.getProperty("spring.application.name", ResourceUtils.getProperty("jeesuite.configcenter.appName")); Validate.notBlank(appName, "config[spring.application.name] not found"); String zkServer = ResourceUtils.getAndValidateProperty("zookeeper.servers"); zk = new ZooKeeper(zkServer, 10000, this); String path = String.format(ROOT_PATH, appName); String[] parts = StringUtils.split(path, "/"); String tmpParent = ""; Stat stat; for (int i = 0; i < parts.length; i++) { tmpParent = tmpParent + "/" + parts[i]; stat = zk.exists(tmpParent, false); if (stat == null) { zk.create(tmpParent, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } } String nodePath = path + "/" + NodeNameHolder.getNodeId(); zk.create(nodePath, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); int workerId = zk.getChildren(path, false).size(); if (workerId > maxWorkerId || workerId < 0) { throw new IllegalArgumentException( String.format("worker Id can't be greater than %d or less than 0", maxWorkerId)); } this.workerId = workerId; } catch (Exception e) { this.workerId = RandomUtils.nextInt(1, 31); } this.datacenterId = 1; }
Example #4
Source File: BaseApplicationStarter.java From oneplatform with Apache License 2.0 | 4 votes |
protected static long before() { System.setProperty("client.nodeId", NodeNameHolder.getNodeId()); return System.currentTimeMillis(); }
Example #5
Source File: TopicProducerSpringProvider.java From jeesuite-libs with Apache License 2.0 | 4 votes |
@Override public void afterPropertiesSet() throws Exception { Validate.notEmpty(this.configs, "configs is required"); routeEnv = StringUtils.trimToNull(ResourceUtils.getProperty(KafkaConst.PROP_ENV_ROUTE)); if(routeEnv != null)log.info("current route Env value is:",routeEnv); //移除错误的或者未定义变量的配置 Set<String> propertyNames = configs.stringPropertyNames(); for (String propertyName : propertyNames) { String value = configs.getProperty(propertyName); if(StringUtils.isBlank(value) || value.trim().startsWith("$")){ configs.remove(propertyName); log.warn("remove prop[{}],value is:{}",propertyName,value); } } if(!configs.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)){ configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); // key serializer } if(!configs.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)){ configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KyroMessageSerializer.class.getName()); } if(!configs.containsKey(ProducerConfig.PARTITIONER_CLASS_CONFIG)){ configs.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, DefaultPartitioner.class.getName()); } //默认重试一次 if(!configs.containsKey(ProducerConfig.RETRIES_CONFIG)){ configs.put(ProducerConfig.RETRIES_CONFIG, "1"); } if(!configs.containsKey(ProducerConfig.COMPRESSION_TYPE_CONFIG)){ configs.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy"); } if(!configs.containsKey("client.id")){ configs.put("client.id", (producerGroup == null ? "" : "_"+producerGroup) + NodeNameHolder.getNodeId()); } KafkaProducer<String, Object> kafkaProducer = new KafkaProducer<String, Object>(configs); String monitorZkServers = ResourceUtils.getProperty("kafka.zkServers"); if(StringUtils.isNotBlank(monitorZkServers)){ zkClient = new ZkClient(monitorZkServers, 10000, 5000, new ZKStringSerializer()); } this.producer = new DefaultTopicProducer(kafkaProducer,zkClient,consumerAckEnabled); //hanlder if(monitorEnabled){ Validate.notBlank(producerGroup,"enable producer monitor property[producerGroup] is required"); Validate.notNull(zkClient, "enable producer monitor property[kafka.zkServers] is required"); this.producer.addEventHandler(new SendCounterHandler(producerGroup,zkClient)); } if(delayRetries > 0){ this.producer.addEventHandler(new SendErrorDelayRetryHandler(producerGroup,kafkaProducer, delayRetries)); } }
Example #6
Source File: TopicConsumerSpringProvider.java From jeesuite-libs with Apache License 2.0 | 4 votes |
@Override public void afterPropertiesSet() throws Exception { if(StringUtils.isNotBlank(scanPackages)){ String[] packages = org.springframework.util.StringUtils.tokenizeToStringArray(this.scanPackages, ConfigurableApplicationContext.CONFIG_LOCATION_DELIMITERS); scanAndRegisterAnnotationTopics(packages); } Validate.isTrue(topicHandlers != null && topicHandlers.size() > 0, "at latest one topic"); //当前状态 if(status.get() > 0)return; routeEnv = StringUtils.trimToNull(ResourceUtils.getProperty(KafkaConst.PROP_ENV_ROUTE)); if(routeEnv != null){ logger.info("current route Env value is:",routeEnv); Map<String, MessageHandler> newTopicHandlers = new HashMap<>(); for (String origTopicName : topicHandlers.keySet()) { newTopicHandlers.put(routeEnv + "." + origTopicName, topicHandlers.get(origTopicName)); } topicHandlers = newTopicHandlers; } //make sure that rebalance.max.retries * rebalance.backoff.ms > zookeeper.session.timeout.ms. configs.put("rebalance.max.retries", "5"); configs.put("rebalance.backoff.ms", "1205"); configs.put("zookeeper.session.timeout.ms", "6000"); configs.put("key.deserializer",StringDeserializer.class.getName()); if(!configs.containsKey("value.deserializer")){ configs.put("value.deserializer", KyroMessageDeserializer.class.getName()); } if(useNewAPI){ if("smallest".equals(configs.getProperty("auto.offset.reset"))){ configs.put("auto.offset.reset", "earliest"); }else if("largest".equals(configs.getProperty("auto.offset.reset"))){ configs.put("auto.offset.reset", "latest"); } }else{ //强制自动提交 configs.put("enable.auto.commit", "true"); } //同步节点信息 groupId = configs.get(org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG).toString(); logger.info("\n===============KAFKA Consumer group[{}] begin start=================\n",groupId); consumerId = NodeNameHolder.getNodeId(); // configs.put("consumer.id", consumerId); //kafka 内部处理 consumerId = groupId + "_" + consumerId consumerId = groupId + "_" + consumerId; // if(!configs.containsKey("client.id")){ configs.put("client.id", consumerId); } // start(); logger.info("\n===============KAFKA Consumer group[{}],consumerId[{}] start finished!!=================\n",groupId,consumerId); }
Example #7
Source File: JobContext.java From jeesuite-libs with Apache License 2.0 | 4 votes |
public String getNodeId() { return NodeNameHolder.getNodeId(); }