com.couchbase.client.dcp.Client Java Examples
The following examples show how to use
com.couchbase.client.dcp.Client.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StreamerServiceImpl.java From java-dcp-client with Apache License 2.0 | 6 votes |
@Override public String start(String bucket, @Default("[]") List<Short> vbuckets, StreamFrom from, StreamTo to, boolean mitigateRollbacks, boolean collectionAware) { String streamerId = "dcp-test-streamer-" + nextStreamerId.getAndIncrement(); Client client = Client.builder() .bucket(bucket) .credentials(username, password) .mitigateRollbacks(mitigateRollbacks ? 100 : 0, TimeUnit.MILLISECONDS) .flowControl(1024 * 128) .seedNodes(nodes.split(",")) .userAgent("dcp-integration-test", "0", streamerId) .collectionsAware(collectionAware) .build(); idToStreamer.put(streamerId, new DcpStreamer(client, vbuckets, from, to)); return streamerId; }
Example #2
Source File: CouchbaseStreamingConnection.java From components with Apache License 2.0 | 6 votes |
public CouchbaseStreamingConnection(String bootstrapNodes, String bucket, String password) { connected = false; streaming = false; client = Client.configure() .connectTimeout(20000L) .hostnames(bootstrapNodes) .bucket(bucket) .password(password == null ? "" : password) .controlParam(DcpControl.Names.CONNECTION_BUFFER_SIZE, 20480) .bufferAckWatermark(60) .build(); client.controlEventHandler(new ControlEventHandler() { @Override public void onEvent(ChannelFlowController controller, ByteBuf event) { controller.ack(event); event.release(); } }); dataEventHandler = new EventHandler(); client.dataEventHandler(dataEventHandler); }
Example #3
Source File: PerformanceTestDriver.java From java-dcp-client with Apache License 2.0 | 6 votes |
private static void runTest(final Client client, Args args) throws InterruptedException { final CountDownLatch latch = new CountDownLatch(args.dcpMessageCount); final boolean highLevelApi = Boolean.parseBoolean(args.settings.getProperty("highLevelApi", "false")); if (highLevelApi) { System.out.println("Using high-level API. Won't be collecting compression metrics."); registerHighLevelListeners(latch, client); } else { System.out.println("Using low-level API."); registerLowLevelListeners(latch, client); } long startNanos = System.nanoTime(); client.connect().await(); client.initializeState(StreamFrom.BEGINNING, StreamTo.INFINITY).await(); client.startStreaming().await(); latch.await(); System.out.println("Received at least " + args.dcpMessageCount + " messages. Done!"); long elapsedNanos = System.nanoTime() - startNanos; client.disconnect().await(); System.out.println("Shutdown complete. Receiving " + args.dcpMessageCount + " DCP events took " + TimeUnit.NANOSECONDS.toMillis(elapsedNanos) + " ms"); }
Example #4
Source File: PerformanceTestDriver.java From java-dcp-client with Apache License 2.0 | 6 votes |
private static void registerLowLevelListeners(CountDownLatch latch, Client client) { // Don't do anything with control events in this example client.controlEventHandler((flowController, event) -> { if (DcpSnapshotMarkerRequest.is(event)) { flowController.ack(event); } event.release(); }); client.dataEventHandler((flowController, event) -> { totalMessageCount.increment(); if (MessageUtil.isSnappyCompressed(event)) { compressedMessageCount.increment(); totalCompressedBytes.add(MessageUtil.getRawContent(event).readableBytes()); // getContent() triggers decompression, so it's important for perf test to call it. totalDecompressedBytes.add(MessageUtil.getContent(event).readableBytes()); } latch.countDown(); flowController.ack(event); event.release(); }); }
Example #5
Source File: Conductor.java From java-dcp-client with Apache License 2.0 | 6 votes |
public Conductor(final Client.Environment env, DcpClientMetrics metrics) { this.metrics = requireNonNull(metrics); this.env = env; this.bucketConfigArbiter = new BucketConfigArbiter(env); bucketConfigArbiter.configs() .observeOn(Schedulers.from(configUpdateExecutor)) .forEach(config -> { // Couchbase sometimes says a newly created bucket has no partitions. // This doesn't affect cluster topology, but it's a problem for code // that needs to know the real partition count during startup. if (config.numberOfPartitions() == 0 && currentConfig.get() == null) { // Skip this config. The server will send another when the bucket is really ready. LOGGER.debug("Skipping initial config (rev {}) because it has invalid partition count.", config.rev()); return; } LOGGER.trace("Applying new configuration, new rev is {}.", config.rev()); currentConfig.set(config); reconfigure(config); configurationApplied.countDown(); }); }
Example #6
Source File: CouchbaseStreamingConnectionTest.java From components with Apache License 2.0 | 5 votes |
@Before public void setup() { PowerMockito.mockStatic(Client.class); Builder builder = Mockito.mock(Builder.class); Mockito.when(builder.connectTimeout(Mockito.anyLong())).thenReturn(builder); Mockito.when(builder.hostnames(Mockito.anyString())).thenReturn(builder); Mockito.when(builder.bucket(Mockito.anyString())).thenReturn(builder); Mockito.when(builder.password(Mockito.anyString())).thenReturn(builder); Mockito.when(builder.controlParam(Mockito.any(Names.class), Mockito.any())).thenReturn(builder); Mockito.when(builder.bufferAckWatermark(Mockito.anyInt())).thenReturn(builder); client = Mockito.mock(Client.class); PowerMockito.when(Client.configure()).thenReturn(builder); Mockito.when(builder.build()).thenReturn(client); streamingConnection = new CouchbaseStreamingConnection("localhost", "", "testPassword"); }
Example #7
Source File: EventHandlerAdapter.java From java-dcp-client with Apache License 2.0 | 5 votes |
private EventHandlerAdapter(Client dcpClient, EventDispatcher dispatcher) { this.dcpClient = requireNonNull(dcpClient); this.dispatcher = requireNonNull(dispatcher); dcpClient.controlEventHandler(this); dcpClient.systemEventHandler(this); dcpClient.dataEventHandler(dataEventHandler); }
Example #8
Source File: PersistencePollingHandler.java From java-dcp-client with Apache License 2.0 | 5 votes |
public PersistencePollingHandler(final Client.Environment env, final BucketConfigSource bucketConfigSource, final DcpRequestDispatcher dispatcher) { this.env = requireNonNull(env); this.bucketConfigSource = requireNonNull(bucketConfigSource); this.persistedSeqnos = requireNonNull(env.persistedSeqnos()); this.dcpOps = new DcpOpsImpl(dispatcher); }
Example #9
Source File: DcpPipeline.java From java-dcp-client with Apache License 2.0 | 5 votes |
/** * Creates the pipeline. * * @param environment the stateful environment. * @param controlHandler the control event handler. */ public DcpPipeline(final Client.Environment environment, final DcpChannelControlHandler controlHandler, BucketConfigArbiter bucketConfigArbiter, DcpChannelMetrics metrics) { this.bucketConfigArbiter = requireNonNull(bucketConfigArbiter); this.environment = requireNonNull(environment); this.controlHandler = requireNonNull(controlHandler); this.metrics = requireNonNull(metrics); if (environment.sslEnabled()) { this.sslEngineFactory = new SSLEngineFactory(environment); } else { this.sslEngineFactory = null; } }
Example #10
Source File: DcpMessageHandler.java From java-dcp-client with Apache License 2.0 | 5 votes |
/** * Create a new message handler. * * @param environment data event callback handler. * @param controlHandler control event handler. */ DcpMessageHandler(final Channel channel, final Client.Environment environment, final DcpChannelControlHandler controlHandler, final DcpChannelMetrics metrics) { this.dataEventHandler = environment.dataEventHandler(); this.controlHandler = controlHandler; this.flowController = new ChannelFlowControllerImpl(channel, environment); this.metrics = requireNonNull(metrics); }
Example #11
Source File: ChannelFlowControllerImpl.java From java-dcp-client with Apache License 2.0 | 5 votes |
public ChannelFlowControllerImpl(Channel channel, Client.Environment environment) { this.channel = requireNonNull(channel); this.needsBufferAck = environment.dcpControl().bufferAckEnabled(); if (needsBufferAck) { int bufferAckPercent = environment.bufferAckWatermark(); int bufferSize = Integer.parseInt(environment.dcpControl().get(DcpControl.Names.CONNECTION_BUFFER_SIZE)); this.bufferAckWatermark = (int) Math.round(bufferSize / 100.0 * bufferAckPercent); LOGGER.debug("BufferAckWatermark absolute is {}", bufferAckWatermark); } else { this.bufferAckWatermark = 0; } this.bufferAckCounter = 0; }
Example #12
Source File: DcpChannel.java From java-dcp-client with Apache License 2.0 | 5 votes |
public DcpChannel(HostAndPort address, final Client.Environment env, final Conductor conductor) { super(LifecycleState.DISCONNECTED); this.address = address; this.env = env; this.conductor = conductor; this.controlHandler = new DcpChannelControlHandler(this); this.isShutdown = false; this.metrics = new DcpChannelMetrics(new MetricsContext("dcp", Tags.of("remote", address.format()))); }
Example #13
Source File: StreamerServiceImpl.java From java-dcp-client with Apache License 2.0 | 5 votes |
@Override public int getNumberOfPartitions(String bucket) { final Client client = Client.builder() .bucket(bucket) .credentials(username, password) .seedNodes(nodes.split(",")) .build(); client.connect().await(); try { return client.numPartitions(); } finally { client.disconnect().await(); } }
Example #14
Source File: DcpConnectHandler.java From java-dcp-client with Apache License 2.0 | 4 votes |
DcpConnectHandler(final Client.Environment env) { this.env = requireNonNull(env); this.connectionNameGenerator = env.connectionNameGenerator(); this.bucket = env.bucket(); this.dcpControl = env.dcpControl(); }
Example #15
Source File: Rollback.java From java-dcp-client with Apache License 2.0 | 4 votes |
public Rollback(Client client, int vbucket, long seqno, Consumer<Throwable> errorHandler) { this.client = requireNonNull(client); this.vbucket = vbucket; this.seqno = seqno; this.errorHandler = requireNonNull(errorHandler); }
Example #16
Source File: BucketConfigArbiter.java From java-dcp-client with Apache License 2.0 | 4 votes |
public BucketConfigArbiter(Client.Environment environment) { this.environment = requireNonNull(environment); }
Example #17
Source File: EventHandlerAdapter.java From java-dcp-client with Apache License 2.0 | 4 votes |
public static EventHandlerAdapter register(Client dcpClient, EventDispatcher dispatcher) { return new EventHandlerAdapter(dcpClient, dispatcher); }
Example #18
Source File: PerformanceTestDriver.java From java-dcp-client with Apache License 2.0 | 4 votes |
public static void main(String[] commandLineArguments) throws Exception { Args args = parseArgs(commandLineArguments); Client client = buildClient(args); runTest(client, args); generateReport(); }
Example #19
Source File: DcpStreamer.java From java-dcp-client with Apache License 2.0 | 4 votes |
public DcpStreamer(final Client client, final List<Short> vbuckets, final StreamFrom from, final StreamTo to) { this.client = requireNonNull(client); this.streamTo = requireNonNull(to); client.listener(new DatabaseChangeListener() { @Override public void onFailure(StreamFailure streamFailure) { LOGGER.error("stream failure", streamFailure.getCause()); } @Override public void onMutation(Mutation mutation) { mutations.getAndIncrement(); mutation.flowControlAck(); } @Override public void onDeletion(Deletion deletion) { (deletion.isDueToExpiration() ? expirations : deletions).getAndIncrement(); deletion.flowControlAck(); } @Override public void onScopeCreated(ScopeCreated scopeCreated) { scopeCreations.getAndIncrement(); } @Override public void onScopeDropped(ScopeDropped scopeDropped) { scopeDrops.getAndIncrement(); } @Override public void onCollectionCreated(CollectionCreated collectionCreated) { collectionCreations.getAndIncrement(); } @Override public void onCollectionDropped(CollectionDropped collectionDropped) { collectionDrops.getAndIncrement(); } @Override public void onCollectionFlushed(CollectionFlushed collectionFlushed) { collectionFlushes.getAndIncrement(); } }, FlowControlMode.AUTOMATIC); client.connect().await(30, TimeUnit.SECONDS); try { client.initializeState(from, to).await(); client.startStreaming(vbuckets.toArray(new Short[0])).await(); } catch (Throwable t) { stop(); throw t; } }
Example #20
Source File: PerformanceTestDriver.java From java-dcp-client with Apache License 2.0 | 4 votes |
private static Client buildClient(Args args) throws IOException { CompressionMode compressionMode = CompressionMode.valueOf( args.settings.getProperty("compression", CompressionMode.DISABLED.name())); final boolean mitigateRollbacks = Boolean.parseBoolean(args.settings.getProperty("mitigateRollbacks")); PerformanceTestConnectionString connectionString = new PerformanceTestConnectionString(args.connectionString); List<String> hostnames = new ArrayList<>(); for (InetSocketAddress host : connectionString.hosts()) { final String hostAndMaybePort = host.getPort() == 0 ? host.getHostString() : host.getHostString() + ":" + host.getPort(); hostnames.add(hostAndMaybePort); } final String username = requireNonNull(connectionString.username(), "Connection string is missing username"); final String password = requireNonNull(connectionString.password(), "Connection string is missing password"); final Client.Builder builder = Client.builder() .credentials(username, password) .seedNodes(hostnames) .bucket(requireNonNull(connectionString.bucket(), "Connection string is missing bucket name")) .compression(compressionMode); if (args.collectionString != null) { String[] colls = args.collectionString.replace(":", ".").split(","); builder.collectionsAware(true).collectionNames(colls); } if (mitigateRollbacks) { final int KB = 1024; final int MB = 1024 * KB; final int bufferSize = 24 * MB; final int pollingInterval = 100; final TimeUnit intervalUnit = TimeUnit.MILLISECONDS; System.out.println("Mitigating rollbacks with flow control buffer of " + bufferSize + " bytes and polling interval of " + pollingInterval + " " + intervalUnit); builder.flowControl(bufferSize) .mitigateRollbacks(pollingInterval, intervalUnit); } else { System.out.println("Rollback mitigation disabled."); } return builder.build(); }
Example #21
Source File: CouchbaseReader.java From kafka-connect-couchbase with Apache License 2.0 | 4 votes |
public CouchbaseReader(CouchbaseSourceTaskConfig config, final String connectorName, final BlockingQueue<DocumentChange> queue, final BlockingQueue<Throwable> errorQueue, final Short[] partitions, final Map<Short, SeqnoAndVbucketUuid> partitionToSavedSeqno) { this.partitions = partitions; this.partitionToSavedSeqno = partitionToSavedSeqno; this.streamFrom = config.streamFrom(); this.errorQueue = errorQueue; client = Client.builder() .userAgent("kafka-connector", Version.getVersion(), connectorName) .connectTimeout(config.bootstrapTimeout().toMillis()) .seedNodes(config.dcpSeedNodes()) .networkResolution(NetworkResolution.valueOf(config.network())) .bucket(config.bucket()) .credentials(config.username(), config.password().value()) .collectionsAware(true) .scopeName(config.scope()) .collectionNames(config.collections()) .compression(config.compression()) .mitigateRollbacks(config.persistencePollingInterval().toMillis(), TimeUnit.MILLISECONDS) .flowControl(config.flowControlBuffer().getByteCountAsSaturatedInt()) .bufferAckWatermark(60) .sslEnabled(config.enableTls()) .sslKeystoreFile(config.trustStorePath()) .sslKeystorePassword(config.trustStorePassword().value()) .build(); client.nonBlockingListener(new DatabaseChangeListener() { @Override public void onMutation(Mutation mutation) { onChange(mutation); } @Override public void onDeletion(Deletion deletion) { onChange(deletion); } private void onChange(DocumentChange change) { try { queue.put(change); } catch (Throwable t) { change.flowControlAck(); LOGGER.error("Unable to put DCP request into the queue", t); errorQueue.offer(t); } } @Override public void onFailure(StreamFailure streamFailure) { errorQueue.offer(streamFailure.getCause()); } }); }
Example #22
Source File: HighLevelApi.java From java-dcp-client with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { final String bucketName = "travel-sample"; try (Client client = Client.builder() .credentials("Administrator", "password") .seedNodes("localhost") .bucket(bucketName) .flowControl(64 * BYTES_PER_MEGABYTE) .userAgent("HighLevelApiExample", "0.1", "bucket:" + bucketName) .build()) { // If the listener detects a fatal error, it will tell the main thread // by submitting the error to this queue. final LinkedBlockingQueue<Throwable> fatalErrorQueue = new LinkedBlockingQueue<>(); // Preserves stream state between runs of this program. final StreamOffsetTracker offsetTracker = new StreamOffsetTracker( Paths.get("stream-offsets-" + bucketName + ".json")); /* * Register a listener to receive database change events, with automatic * flow control. In automatic flow control mode, you can and should do * your work (blocking I/O, expensive computations, etc.) in the callback * handler thread. * * Processing events in the callback thread implicitly generates * backpressure which the DCP client uses to regulate its flow control * buffer and prevent OutOfMemory errors. * * This example just prints the document key and content length. */ client.listener(new DatabaseChangeListener() { @Override public void onFailure(StreamFailure streamFailure) { fatalErrorQueue.offer(streamFailure.getCause()); } @Override public void onMutation(Mutation mutation) { System.out.println("MUT: " + mutation.getKey() + " (" + mutation.getContent().length + " bytes)"); offsetTracker.accept(mutation); // remember we processed this stream element } @Override public void onDeletion(Deletion deletion) { System.out.println("DEL: " + deletion.getKey()); offsetTracker.accept(deletion); // remember we processed this stream element } }, FlowControlMode.AUTOMATIC); // let the library handle flow control // Connect the client. Need to connect in order to discover number of partitions. client.connect().await(); // Restore saved stream state (or initialize state for streaming from beginning). offsetTracker.load(client.numPartitions()); // Save stream state when program exits. A real program might save the offsets // periodically as well. Runtime.getRuntime().addShutdownHook(new Thread(() -> { try { offsetTracker.save(); } catch (Exception e) { e.printStackTrace(); } })); // Start the streams! The offset map's keyset determine which partitions are streamed, // and the values determine the starting point for each partition. client.resumeStreaming(offsetTracker.offsets()).await(); // Wait until a fatal error occurs or the program is killed. fatalErrorQueue.take().printStackTrace(); } }