org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient Java Examples
The following examples show how to use
org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KarelDbCoordinator.java From kareldb with Apache License 2.0 | 5 votes |
/** * Initialize the coordination manager. */ public KarelDbCoordinator( LogContext logContext, ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, KarelDbIdentity identity, KarelDbRebalanceListener listener) { super( new GroupRebalanceConfig( sessionTimeoutMs, rebalanceTimeoutMs, heartbeatIntervalMs, groupId, Optional.empty(), retryBackoffMs, true ), logContext, client, metrics, metricGrpPrefix, time ); this.identity = identity; this.assignmentSnapshot = null; this.listener = listener; }
Example #2
Source File: WorkerCoordinator.java From DataLink with Apache License 2.0 | 5 votes |
/** * Initialize the coordination manager. */ public WorkerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, String restUrl, TaskConfigManager taskConfigManager, WorkerRebalanceListener listener) { super(client, groupId, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, metrics, metricGrpPrefix, time, retryBackoffMs); this.restUrl = restUrl; this.taskConfigManager = taskConfigManager; this.assignmentSnapshot = null; this.sensors = new WorkerCoordinatorMetrics(metrics, metricGrpPrefix); this.listener = listener; this.rejoinRequested = false; }
Example #3
Source File: KarelDbLeaderElector.java From kareldb with Apache License 2.0 | 4 votes |
public KarelDbLeaderElector(KarelDbConfig config, KarelDbEngine engine) throws KarelDbElectionException { try { this.engine = engine; this.clientId = "kdb-" + KDB_CLIENT_ID_SEQUENCE.getAndIncrement(); this.myIdentity = findIdentity( config.getList(KarelDbConfig.LISTENERS_CONFIG), config.getBoolean(KarelDbConfig.LEADER_ELIGIBILITY_CONFIG)); Map<String, String> metricsTags = new LinkedHashMap<>(); metricsTags.put("client-id", clientId); MetricConfig metricConfig = new MetricConfig().tags(metricsTags); List<MetricsReporter> reporters = Collections.singletonList(new JmxReporter(JMX_PREFIX)); Time time = Time.SYSTEM; ClientConfig clientConfig = new ClientConfig(config.originalsWithPrefix("kafkacache."), false); this.metrics = new Metrics(metricConfig, reporters, time); this.retryBackoffMs = clientConfig.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG); String groupId = config.getString(KarelDbConfig.CLUSTER_GROUP_ID_CONFIG); LogContext logContext = new LogContext("[KarelDB clientId=" + clientId + ", groupId=" + groupId + "] "); this.metadata = new Metadata( retryBackoffMs, clientConfig.getLong(CommonClientConfigs.METADATA_MAX_AGE_CONFIG), logContext, new ClusterResourceListeners() ); List<String> bootstrapServers = config.getList(KarelDbConfig.KAFKACACHE_BOOTSTRAP_SERVERS_CONFIG); List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(bootstrapServers, clientConfig.getString(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG)); this.metadata.bootstrap(addresses); String metricGrpPrefix = "kareldb"; ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(clientConfig, time); long maxIdleMs = clientConfig.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG); NetworkClient netClient = new NetworkClient( new Selector(maxIdleMs, metrics, time, metricGrpPrefix, channelBuilder, logContext), this.metadata, clientId, 100, // a fixed large enough value will suffice clientConfig.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG), clientConfig.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG), clientConfig.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG), clientConfig.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG), clientConfig.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG), ClientDnsLookup.forConfig(clientConfig.getString(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG)), time, true, new ApiVersions(), logContext); this.client = new ConsumerNetworkClient( logContext, netClient, metadata, time, retryBackoffMs, clientConfig.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG), Integer.MAX_VALUE ); this.coordinator = new KarelDbCoordinator( logContext, this.client, groupId, 300000, // Default MAX_POLL_INTERVAL_MS_CONFIG 10000, // Default SESSION_TIMEOUT_MS_CONFIG) 3000, // Default HEARTBEAT_INTERVAL_MS_CONFIG metrics, metricGrpPrefix, time, retryBackoffMs, myIdentity, this ); AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds()); initTimeout = config.getInt(KarelDbConfig.KAFKACACHE_INIT_TIMEOUT_CONFIG); LOG.debug("Group member created"); } catch (Throwable t) { // call close methods if internal objects are already constructed // this is to prevent resource leak. see KAFKA-2121 stop(true); // now propagate the exception throw new KarelDbElectionException("Failed to construct kafka consumer", t); } }
Example #4
Source File: KarelDbCoordinatorTest.java From kareldb with Apache License 2.0 | 4 votes |
@Before public void setup() { this.time = new MockTime(); this.metadata = new Metadata(0, Long.MAX_VALUE, new LogContext(), new ClusterResourceListeners()); this.client = new MockClient(time, new MockClient.MockMetadataUpdater() { @Override public List<Node> fetchNodes() { return cluster.nodes(); } @Override public boolean isUpdateNeeded() { return false; } @Override public void update(Time time, MockClient.MetadataUpdate update) { throw new UnsupportedOperationException(); } }); LogContext logContext = new LogContext(); this.consumerClient = new ConsumerNetworkClient(logContext, client, metadata, time, 100, 1000, Integer.MAX_VALUE); this.metrics = new Metrics(time); this.rebalanceListener = new MockRebalanceListener(); this.coordinator = new KarelDbCoordinator( logContext, consumerClient, groupId, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, metrics, "kdb-" + groupId, time, retryBackoffMs, LEADER_INFO, rebalanceListener ); }
Example #5
Source File: WorkerGroupMember.java From DataLink with Apache License 2.0 | 4 votes |
public WorkerGroupMember(WorkerConfig config, String restUrl, TaskConfigManager jobTaskConfigManager, WorkerRebalanceListener listener, Time time) { try { this.time = time; String clientIdConfig = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG); clientId = clientIdConfig.length() <= 0 ? "datalink-worker-" + DATALINK_CLIENT_ID_SEQUENCE.getAndIncrement() : clientIdConfig; Map<String, String> metricsTags = new LinkedHashMap<>(); metricsTags.put("client-id", clientId); MetricConfig metricConfig = new MetricConfig().samples(config.getInt(CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG)) .timeWindow(config.getLong(CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS) .tags(metricsTags); List<MetricsReporter> reporters = config.getConfiguredInstances(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class); reporters.add(new JmxReporter(JMX_PREFIX)); this.metrics = new Metrics(metricConfig, reporters, time); this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG); this.metadata = new Metadata(retryBackoffMs, config.getLong(CommonClientConfigs.METADATA_MAX_AGE_CONFIG)); List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(config.getList(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)); this.metadata.update(Cluster.bootstrap(addresses), 0); String metricGrpPrefix = "datalink.worker"; ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config.values()); NetworkClient netClient = new NetworkClient( new Selector(config.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, channelBuilder), this.metadata, clientId, 100, // a fixed large enough value will suffice config.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG), config.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG), config.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG), config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG), time); this.client = new ConsumerNetworkClient(netClient, metadata, time, retryBackoffMs, config.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG)){ @Override public boolean awaitMetadataUpdate(long timeout) { metadata.update(Cluster.bootstrap(addresses),time.milliseconds()); return super.awaitMetadataUpdate(timeout); } }; this.coordinator = new WorkerCoordinator(this.client, config.getString(WorkerConfig.GROUP_ID_CONFIG), config.getInt(WorkerConfig.REBALANCE_TIMEOUT_MS_CONFIG), config.getInt(WorkerConfig.SESSION_TIMEOUT_MS_CONFIG), config.getInt(WorkerConfig.HEARTBEAT_INTERVAL_MS_CONFIG), metrics, metricGrpPrefix, this.time, retryBackoffMs, restUrl, jobTaskConfigManager, listener); AppInfoParser.registerAppInfo(JMX_PREFIX, clientId); log.debug("datalink worker group member created"); } catch (Throwable t) { // call close methods if internal objects are already constructed // this is to prevent resource leak. stop(true); // now propagate the errors throw new DatalinkException("Failed to construct datalink worker", t); } }
Example #6
Source File: KafkaApiRequest.java From kafka-utilities with Apache License 2.0 | 4 votes |
KafkaApiRequest(final ConsumerNetworkClient networkClient){ this.networkClient = networkClient; }