Java Code Examples for org.apache.distributedlog.api.namespace.Namespace#openLog()
The following examples show how to use
org.apache.distributedlog.api.namespace.Namespace#openLog() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DistributedLogTool.java From distributedlog with Apache License 2.0 | 6 votes |
private void truncateStreams(Namespace namespace, List<String> streams, int tid, int numStreamsPerThreads) throws IOException { int startIdx = tid * numStreamsPerThreads; int endIdx = Math.min(streams.size(), (tid + 1) * numStreamsPerThreads); for (int i = startIdx; i < endIdx; i++) { String s = streams.get(i); DistributedLogManager dlm = namespace.openLog(s); try { if (deleteStream) { dlm.delete(); } else { dlm.purgeLogsOlderThan(Long.MAX_VALUE); } } finally { dlm.close(); } } }
Example 2
Source File: TestBKDistributedLogNamespace.java From distributedlog with Apache License 2.0 | 6 votes |
private void initDlogMeta(String dlNamespace, String un, String streamName) throws Exception { URI uri = createDLMURI(dlNamespace); DistributedLogConfiguration newConf = new DistributedLogConfiguration(); newConf.addConfiguration(conf); newConf.setCreateStreamIfNotExists(true); newConf.setZkAclId(un); Namespace namespace = NamespaceBuilder.newBuilder() .conf(newConf).uri(uri).build(); DistributedLogManager dlm = namespace.openLog(streamName); LogWriter writer = dlm.startLogSegmentNonPartitioned(); for (int i = 0; i < 10; i++) { writer.write(DLMTestUtil.getLogRecordInstance(1L)); } writer.close(); dlm.close(); namespace.close(); }
Example 3
Source File: DistributedLogTool.java From distributedlog with Apache License 2.0 | 6 votes |
private void deleteSubscriber(Namespace namespace, List<String> streams, int tid, int numStreamsPerThreads) throws Exception { int startIdx = tid * numStreamsPerThreads; int endIdx = Math.min(streams.size(), (tid + 1) * numStreamsPerThreads); for (int i = startIdx; i < endIdx; i++) { final String s = streams.get(i); DistributedLogManager dlm = namespace.openLog(s); final CountDownLatch countDownLatch = new CountDownLatch(1); dlm.getSubscriptionsStore().deleteSubscriber(subscriberId) .whenComplete(new FutureEventListener<Boolean>() { @Override public void onFailure(Throwable cause) { System.out.println("Failed to delete subscriber for stream " + s); cause.printStackTrace(); countDownLatch.countDown(); } @Override public void onSuccess(Boolean value) { countDownLatch.countDown(); } }); countDownLatch.await(); dlm.close(); } }
Example 4
Source File: TestBKDistributedLogNamespace.java From distributedlog with Apache License 2.0 | 6 votes |
private void createLogPathTest(String logName) throws Exception { URI uri = createDLMURI("/" + runtime.getMethodName()); ensureURICreated(zooKeeperClient.get(), uri); DistributedLogConfiguration newConf = new DistributedLogConfiguration(); newConf.addConfiguration(conf); newConf.setCreateStreamIfNotExists(false); Namespace namespace = NamespaceBuilder.newBuilder() .conf(newConf).uri(uri).build(); DistributedLogManager dlm = namespace.openLog(logName); LogWriter writer; try { writer = dlm.startLogSegmentNonPartitioned(); writer.write(DLMTestUtil.getLogRecordInstance(1L)); writer.commit(); fail("Should fail to write data if stream doesn't exist."); } catch (IOException ioe) { // expected } dlm.close(); }
Example 5
Source File: DLMTestUtil.java From distributedlog with Apache License 2.0 | 5 votes |
public static DistributedLogManager createNewDLM(String name, DistributedLogConfiguration conf, URI uri) throws Exception { Namespace namespace = NamespaceBuilder.newBuilder() .conf(conf).uri(uri).build(); return namespace.openLog(name); }
Example 6
Source File: TestBKDistributedLogNamespace.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testCreateIfNotExists() throws Exception { URI uri = createDLMURI("/" + runtime.getMethodName()); ensureURICreated(zooKeeperClient.get(), uri); DistributedLogConfiguration newConf = new DistributedLogConfiguration(); newConf.addConfiguration(conf); newConf.setCreateStreamIfNotExists(false); String streamName = "test-stream"; Namespace namespace = NamespaceBuilder.newBuilder() .conf(newConf).uri(uri).build(); DistributedLogManager dlm = namespace.openLog(streamName); LogWriter writer; try { writer = dlm.startLogSegmentNonPartitioned(); writer.write(DLMTestUtil.getLogRecordInstance(1L)); fail("Should fail to write data if stream doesn't exist."); } catch (IOException ioe) { // expected } dlm.close(); // create the stream namespace.createLog(streamName); DistributedLogManager newDLM = namespace.openLog(streamName); LogWriter newWriter = newDLM.startLogSegmentNonPartitioned(); newWriter.write(DLMTestUtil.getLogRecordInstance(1L)); newWriter.close(); newDLM.close(); }
Example 7
Source File: TailReader.java From distributedlog with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { if (2 != args.length) { System.out.println(HELP); return; } String dlUriStr = args[0]; final String streamName = args[1]; URI uri = URI.create(dlUriStr); DistributedLogConfiguration conf = new DistributedLogConfiguration(); Namespace namespace = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .build(); // open the dlm System.out.println("Opening log stream " + streamName); DistributedLogManager dlm = namespace.openLog(streamName); // get the last record LogRecordWithDLSN lastRecord; DLSN dlsn; try { lastRecord = dlm.getLastLogRecord(); dlsn = lastRecord.getDlsn(); readLoop(dlm, dlsn); } catch (LogNotFoundException lnfe) { System.err.println("Log stream " + streamName + " is not found. Please create it first."); return; } catch (LogEmptyException lee) { System.err.println("Log stream " + streamName + " is empty."); dlsn = DLSN.InitialDLSN; readLoop(dlm, dlsn); } finally { dlm.close(); namespace.close(); } }
Example 8
Source File: DistributedLogAdmin.java From distributedlog with Apache License 2.0 | 5 votes |
private static StreamCandidate checkStream( final Namespace namespace, final String streamName, final OrderedScheduler scheduler) throws IOException { DistributedLogManager dlm = namespace.openLog(streamName); try { List<LogSegmentMetadata> segments = dlm.getLogSegments(); if (segments.isEmpty()) { return null; } List<CompletableFuture<LogSegmentCandidate>> futures = new ArrayList<CompletableFuture<LogSegmentCandidate>>(segments.size()); for (LogSegmentMetadata segment : segments) { futures.add(checkLogSegment(namespace, streamName, segment, scheduler)); } List<LogSegmentCandidate> segmentCandidates; try { segmentCandidates = FutureUtils.result(FutureUtils.collect(futures)); } catch (Exception e) { throw new IOException("Failed on checking stream " + streamName, e); } StreamCandidate streamCandidate = new StreamCandidate(streamName); for (LogSegmentCandidate segmentCandidate: segmentCandidates) { if (null != segmentCandidate) { streamCandidate.addLogSegmentCandidate(segmentCandidate); } } if (streamCandidate.segmentCandidates.isEmpty()) { return null; } return streamCandidate; } finally { dlm.close(); } }
Example 9
Source File: TestLogSegmentsZK.java From distributedlog with Apache License 2.0 | 5 votes |
@Test(timeout = 60000) public void testCompleteLogSegmentConflicts() throws Exception { URI uri = createURI(); String streamName = testName.getMethodName(); DistributedLogConfiguration conf = new DistributedLogConfiguration() .setLockTimeout(99999) .setOutputBufferSize(0) .setImmediateFlushEnabled(true) .setEnableLedgerAllocatorPool(true) .setLedgerAllocatorPoolName("test"); Namespace namespace = NamespaceBuilder.newBuilder().conf(conf).uri(uri).build(); namespace.createLog(streamName); DistributedLogManager dlm1 = namespace.openLog(streamName); DistributedLogManager dlm2 = namespace.openLog(streamName); // dlm1 is writing BKSyncLogWriter out1 = (BKSyncLogWriter) dlm1.startLogSegmentNonPartitioned(); out1.write(DLMTestUtil.getLogRecordInstance(1)); // before out1 complete, out2 is in on recovery // it completed the log segments which bump the version of /ledgers znode BKAsyncLogWriter out2 = (BKAsyncLogWriter) dlm2.startAsyncLogSegmentNonPartitioned(); try { out1.closeAndComplete(); fail("Should fail closeAndComplete since other people already completed it."); } catch (IOException ioe) { } }
Example 10
Source File: WorkerUtils.java From pulsar with Apache License 2.0 | 5 votes |
public static void downloadFromBookkeeper(Namespace namespace, OutputStream outputStream, String packagePath) throws IOException { log.info("Downloading {} from BK...", packagePath); DistributedLogManager dlm = namespace.openLog(packagePath); try (InputStream in = new DLInputStream(dlm)) { int read = 0; byte[] bytes = new byte[1024]; while ((read = in.read(bytes)) != -1) { outputStream.write(bytes, 0, read); } outputStream.flush(); } }
Example 11
Source File: TestLogSegmentsZK.java From distributedlog with Apache License 2.0 | 4 votes |
/** * Create Log Segment while max sequence number isn't match with list of log segments. */ @Test(timeout = 60000) public void testCreateLogSegmentUnmatchMaxSequenceNumber() throws Exception { URI uri = createURI(); String streamName = testName.getMethodName(); DistributedLogConfiguration conf = new DistributedLogConfiguration() .setLockTimeout(99999) .setOutputBufferSize(0) .setImmediateFlushEnabled(true) .setEnableLedgerAllocatorPool(true) .setLedgerAllocatorPoolName("test"); Namespace namespace = NamespaceBuilder.newBuilder().conf(conf).uri(uri).build(); namespace.createLog(streamName); MaxLogSegmentSequenceNo max1 = getMaxLogSegmentSequenceNo(getZooKeeperClient(namespace), uri, streamName, conf); assertEquals(DistributedLogConstants.UNASSIGNED_LOGSEGMENT_SEQNO, max1.getSequenceNumber()); DistributedLogManager dlm = namespace.openLog(streamName); final int numSegments = 3; for (int i = 0; i < numSegments; i++) { BKSyncLogWriter out = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned(); out.write(DLMTestUtil.getLogRecordInstance(i)); out.closeAndComplete(); } MaxLogSegmentSequenceNo max2 = getMaxLogSegmentSequenceNo(getZooKeeperClient(namespace), uri, streamName, conf); assertEquals(3, max2.getSequenceNumber()); // update the max ledger sequence number updateMaxLogSegmentSequenceNo(getZooKeeperClient(namespace), uri, streamName, conf, DLUtils.serializeLogSegmentSequenceNumber(99)); DistributedLogManager dlm1 = namespace.openLog(streamName); try { BKSyncLogWriter out1 = (BKSyncLogWriter) dlm1.startLogSegmentNonPartitioned(); out1.write(DLMTestUtil.getLogRecordInstance(numSegments + 1)); out1.closeAndComplete(); fail("Should fail creating new log segment when encountered unmatch max ledger sequence number"); } catch (DLIllegalStateException lse) { // expected } finally { dlm1.close(); } DistributedLogManager dlm2 = namespace.openLog(streamName); List<LogSegmentMetadata> segments = dlm2.getLogSegments(); try { assertEquals(3, segments.size()); assertEquals(1L, segments.get(0).getLogSegmentSequenceNumber()); assertEquals(2L, segments.get(1).getLogSegmentSequenceNumber()); assertEquals(3L, segments.get(2).getLogSegmentSequenceNumber()); } finally { dlm2.close(); } dlm.close(); namespace.close(); }
Example 12
Source File: ReaderTest.java From distributedlog with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { if (args.length < 4) { System.err.println("ReaderTest <uri> <stream> <num_records> <start_tx_id>"); return; } URI uri = URI.create(args[0]); String streamName = args[1]; int numRecords = Integer.parseInt(args[2]); final long startTxId = Long.parseLong(args[3]); DistributedLogConfiguration conf = new DistributedLogConfiguration() .setOutputBufferSize(0) .setPeriodicFlushFrequencyMilliSeconds(2); Namespace namespace = NamespaceBuilder.newBuilder() .uri(uri) .conf(conf) .build(); try { try (DistributedLogManager manager = namespace.openLog(streamName)) { AsyncLogReader reader = FutureUtils.result(manager.openAsyncLogReader(startTxId)); try { System.out.println("Try to read " + numRecords + " records from stream " + streamName + " ."); for (int i = 0; i < numRecords; ++i) { LogRecord record = FutureUtils.result(reader.readNext()); String data = new String(record.getPayload(), UTF_8); System.out.println("Read record : " + data); String expectedData = "record-" + (startTxId + i); checkArgument(expectedData.equals(data), "Expected = " + expectedData + ", Actual = " + data); long expectedTxId = startTxId + i; checkArgument(expectedTxId == record.getTransactionId(), "Expected TxId = " + expectedTxId + ", Actual TxId = " + record.getTransactionId()); } System.out.println("Successfully read " + numRecords + " records to stream " + streamName + " ."); } finally { Utils.close(reader); } } } finally { namespace.close(); } }
Example 13
Source File: StreamTransformer.java From distributedlog with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { if (3 != args.length) { System.out.println(HELP); return; } String dlUriStr = args[0]; final String srcStreamName = args[1]; final String targetStreamName = args[2]; URI uri = URI.create(dlUriStr); DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.setOutputBufferSize(16*1024); // 16KB conf.setPeriodicFlushFrequencyMilliSeconds(5); // 5ms Namespace namespace = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .build(); // open the dlm System.out.println("Opening log stream " + srcStreamName); DistributedLogManager srcDlm = namespace.openLog(srcStreamName); System.out.println("Opening log stream " + targetStreamName); DistributedLogManager targetDlm = namespace.openLog(targetStreamName); Transformer<byte[], byte[]> replicationTransformer = new IdenticalTransformer<byte[]>(); LogRecordWithDLSN lastTargetRecord; DLSN srcDlsn; try { lastTargetRecord = targetDlm.getLastLogRecord(); TransformedRecord lastTransformedRecord = new TransformedRecord(); try { lastTransformedRecord.read(protocolFactory.getProtocol( new TIOStreamTransport(new ByteArrayInputStream(lastTargetRecord.getPayload())))); srcDlsn = DLSN.deserializeBytes(lastTransformedRecord.getSrcDlsn()); System.out.println("Last transformed record is " + srcDlsn); } catch (TException e) { System.err.println("Error on reading last transformed record"); e.printStackTrace(System.err); srcDlsn = DLSN.InitialDLSN; } } catch (LogNotFoundException lnfe) { srcDlsn = DLSN.InitialDLSN; } catch (LogEmptyException lee) { srcDlsn = DLSN.InitialDLSN; } AsyncLogWriter targetWriter = FutureUtils.result(targetDlm.openAsyncLogWriter()); try { readLoop(srcDlm, srcDlsn, targetWriter, replicationTransformer); } finally { FutureUtils.result(targetWriter.asyncClose(), 5, TimeUnit.SECONDS); targetDlm.close(); srcDlm.close(); namespace.close(); } }
Example 14
Source File: Util.java From incubator-heron with Apache License 2.0 | 4 votes |
private static InputStream openInputStream(Namespace ns, String logName) throws Exception { DistributedLogManager dlm = ns.openLog(logName); return new DLInputStream(dlm); }
Example 15
Source File: WriterTest.java From distributedlog with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { if (args.length < 3) { System.err.println("WriterTest <uri> <stream> <num_records>"); return; } URI uri = URI.create(args[0]); String streamName = args[1]; int numRecords = Integer.parseInt(args[2]); DistributedLogConfiguration conf = new DistributedLogConfiguration() .setOutputBufferSize(0) .setPeriodicFlushFrequencyMilliSeconds(2); Namespace namespace = NamespaceBuilder.newBuilder() .uri(uri) .conf(conf) .build(); try { try (DistributedLogManager manager = namespace.openLog(streamName)) { AsyncLogWriter writer = FutureUtils.result(manager.openAsyncLogWriter()); try { long txid = writer.getLastTxId(); if (txid < 0L) { txid = 0L; } System.out.println("Publishing " + numRecords + " records to stream " + streamName + " ."); for (int i = 1; i <= numRecords; ++i) { String content = "record-" + (txid + i); LogRecord record = new LogRecord(txid + i, content.getBytes(UTF_8)); FutureUtils.result(writer.write(record)); System.out.println("Write record : " + content); } System.out.println("Successfully published " + numRecords + " records to stream " + streamName + " ."); } finally { Utils.close(writer); } } } finally { namespace.close(); } }
Example 16
Source File: TestAsyncReaderWriter.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testCreateLogStreamWithDifferentReplicationFactor() throws Exception { String name = runtime.getMethodName(); DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(testConf); confLocal.setOutputBufferSize(0); confLocal.setImmediateFlushEnabled(false); confLocal.setPeriodicFlushFrequencyMilliSeconds(0); ConcurrentBaseConfiguration baseConf = new ConcurrentConstConfiguration(confLocal); DynamicDistributedLogConfiguration dynConf = new DynamicDistributedLogConfiguration(baseConf); dynConf.setProperty(DistributedLogConfiguration.BKDL_BOOKKEEPER_ENSEMBLE_SIZE, DistributedLogConfiguration.BKDL_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT - 1); URI uri = createDLMURI("/" + name); ensureURICreated(uri); Namespace namespace = NamespaceBuilder.newBuilder() .conf(confLocal).uri(uri).build(); // use the pool DistributedLogManager dlm = namespace.openLog(name + "-pool"); AsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned(); Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(1L))); List<LogSegmentMetadata> segments = dlm.getLogSegments(); assertEquals(1, segments.size()); long ledgerId = segments.get(0).getLogSegmentId(); LedgerHandle lh = ((BKNamespaceDriver) namespace.getNamespaceDriver()).getReaderBKC().get() .openLedgerNoRecovery(ledgerId, BookKeeper.DigestType.CRC32, confLocal.getBKDigestPW().getBytes(UTF_8)); LedgerMetadata metadata = BookKeeperAccessor.getLedgerMetadata(lh); assertEquals(DistributedLogConfiguration.BKDL_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT, metadata.getEnsembleSize()); lh.close(); Utils.close(writer); dlm.close(); // use customized configuration dlm = namespace.openLog( name + "-custom", java.util.Optional.empty(), java.util.Optional.of(dynConf), java.util.Optional.empty()); writer = dlm.startAsyncLogSegmentNonPartitioned(); Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(1L))); segments = dlm.getLogSegments(); assertEquals(1, segments.size()); ledgerId = segments.get(0).getLogSegmentId(); lh = ((BKNamespaceDriver) namespace.getNamespaceDriver()).getReaderBKC().get() .openLedgerNoRecovery(ledgerId, BookKeeper.DigestType.CRC32, confLocal.getBKDigestPW().getBytes(UTF_8)); metadata = BookKeeperAccessor.getLedgerMetadata(lh); assertEquals(DistributedLogConfiguration.BKDL_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT - 1, metadata.getEnsembleSize()); lh.close(); Utils.close(writer); dlm.close(); namespace.close(); }
Example 17
Source File: TestAsyncReaderLock.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testReaderLockSessionExpires() throws Exception { String name = runtime.getMethodName(); URI uri = createDLMURI("/" + name); ensureURICreated(uri); Namespace ns0 = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .build(); DistributedLogManager dlm0 = ns0.openLog(name); BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm0.startAsyncLogSegmentNonPartitioned()); writer.write(DLMTestUtil.getLogRecordInstance(1L)); writer.write(DLMTestUtil.getLogRecordInstance(2L)); writer.closeAndComplete(); Namespace ns1 = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .build(); DistributedLogManager dlm1 = ns1.openLog(name); CompletableFuture<AsyncLogReader> futureReader1 = dlm1.getAsyncLogReaderWithLock(DLSN.InitialDLSN); AsyncLogReader reader1 = Utils.ioResult(futureReader1); ZooKeeperClientUtils.expireSession(((BKNamespaceDriver) ns1.getNamespaceDriver()).getWriterZKC(), zkServers, 1000); // The result of expireSession is somewhat non-deterministic with this lock. // It may fail with LockingException or it may succesfully reacquire, so for // the moment rather than make it deterministic we accept either result. boolean success = false; try { Utils.ioResult(reader1.readNext()); success = true; } catch (LockingException ex) { } if (success) { Utils.ioResult(reader1.readNext()); } Utils.close(reader1); dlm0.close(); ns0.close(); dlm1.close(); ns1.close(); }
Example 18
Source File: TestDLCK.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) @SuppressWarnings("deprecation") public void testCheckAndRepairDLNamespace() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(conf); confLocal.setImmediateFlushEnabled(true); confLocal.setOutputBufferSize(0); confLocal.setLogSegmentSequenceNumberValidationEnabled(false); confLocal.setLogSegmentCacheEnabled(false); URI uri = createDLMURI("/check-and-repair-dl-namespace"); zkc.get().create(uri.getPath(), new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); Namespace namespace = NamespaceBuilder.newBuilder() .conf(confLocal) .uri(uri) .build(); OrderedScheduler scheduler = OrderedScheduler.newBuilder() .name("dlck-tool") .corePoolSize(1) .build(); ExecutorService executorService = Executors.newCachedThreadPool(); String streamName = "check-and-repair-dl-namespace"; // Create completed log segments DistributedLogManager dlm = namespace.openLog(streamName); DLMTestUtil.injectLogSegmentWithLastDLSN(dlm, confLocal, 1L, 1L, 10, false); DLMTestUtil.injectLogSegmentWithLastDLSN(dlm, confLocal, 2L, 11L, 10, true); DLMTestUtil.injectLogSegmentWithLastDLSN(dlm, confLocal, 3L, 21L, 10, false); DLMTestUtil.injectLogSegmentWithLastDLSN(dlm, confLocal, 4L, 31L, 10, true); // dryrun DistributedLogAdmin.checkAndRepairDLNamespace( uri, namespace, new DryrunLogSegmentMetadataStoreUpdater(confLocal, getLogSegmentMetadataStore(namespace)), scheduler, false, false); Map<Long, LogSegmentMetadata> segments = getLogSegments(dlm); LOG.info("segments after drynrun {}", segments); verifyLogSegment(segments, new DLSN(1L, 18L, 0L), 1L, 10, 10L); verifyLogSegment(segments, new DLSN(2L, 16L, 0L), 2L, 9, 19L); verifyLogSegment(segments, new DLSN(3L, 18L, 0L), 3L, 10, 30L); verifyLogSegment(segments, new DLSN(4L, 16L, 0L), 4L, 9, 39L); // check and repair DistributedLogAdmin.checkAndRepairDLNamespace( uri, namespace, LogSegmentMetadataStoreUpdater.createMetadataUpdater(confLocal, getLogSegmentMetadataStore(namespace)), scheduler, false, false); segments = getLogSegments(dlm); LOG.info("segments after repair {}", segments); verifyLogSegment(segments, new DLSN(1L, 18L, 0L), 1L, 10, 10L); verifyLogSegment(segments, new DLSN(2L, 18L, 0L), 2L, 10, 20L); verifyLogSegment(segments, new DLSN(3L, 18L, 0L), 3L, 10, 30L); verifyLogSegment(segments, new DLSN(4L, 18L, 0L), 4L, 10, 40L); dlm.close(); SchedulerUtils.shutdownScheduler(executorService, 5, TimeUnit.MINUTES); namespace.close(); }
Example 19
Source File: ReaderWithOffsets.java From distributedlog with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { if (4 != args.length) { System.out.println(HELP); return; } String dlUriStr = args[0]; final String streamName = args[1]; final String readerId = args[2]; final String offsetStoreFile = args[3]; URI uri = URI.create(dlUriStr); DistributedLogConfiguration conf = new DistributedLogConfiguration(); Namespace namespace = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .build(); // open the dlm System.out.println("Opening log stream " + streamName); DistributedLogManager dlm = namespace.openLog(streamName); // open the offset store Options options = new Options(); options.createIfMissing(true); final DB offsetDB = factory.open(new File(offsetStoreFile), options); final AtomicReference<DLSN> lastDLSN = new AtomicReference<DLSN>(null); // offset updater final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); executorService.scheduleAtFixedRate(new Runnable() { @Override public void run() { if (null != lastDLSN.get()) { offsetDB.put(readerId.getBytes(UTF_8), lastDLSN.get().serializeBytes()); System.out.println("Updated reader " + readerId + " offset to " + lastDLSN.get()); } } }, 10, 10, TimeUnit.SECONDS); try { byte[] offset = offsetDB.get(readerId.getBytes(UTF_8)); DLSN dlsn; if (null == offset) { dlsn = DLSN.InitialDLSN; } else { dlsn = DLSN.deserializeBytes(offset); } readLoop(dlm, dlsn, lastDLSN); } finally { offsetDB.close(); dlm.close(); namespace.close(); } }
Example 20
Source File: ConsoleWriter.java From distributedlog with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { if (2 != args.length) { System.out.println(HELP); return; } String dlUriStr = args[0]; final String streamName = args[1]; URI uri = URI.create(dlUriStr); DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.setImmediateFlushEnabled(true); conf.setOutputBufferSize(0); conf.setPeriodicFlushFrequencyMilliSeconds(0); conf.setLockTimeout(DistributedLogConstants.LOCK_IMMEDIATE); Namespace namespace = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .regionId(DistributedLogConstants.LOCAL_REGION_ID) .clientId("console-writer") .build(); // open the dlm System.out.println("Opening log stream " + streamName); DistributedLogManager dlm = namespace.openLog(streamName); try { AsyncLogWriter writer = null; try { writer = FutureUtils.result(dlm.openAsyncLogWriter()); ConsoleReader reader = new ConsoleReader(); String line; while ((line = reader.readLine(PROMPT_MESSAGE)) != null) { writer.write(new LogRecord(System.currentTimeMillis(), line.getBytes(UTF_8))) .whenComplete(new FutureEventListener<DLSN>() { @Override public void onFailure(Throwable cause) { System.out.println("Encountered error on writing data"); cause.printStackTrace(System.err); Runtime.getRuntime().exit(0); } @Override public void onSuccess(DLSN value) { // done } }); } } finally { if (null != writer) { FutureUtils.result(writer.asyncClose(), 5, TimeUnit.SECONDS); } } } finally { dlm.close(); namespace.close(); } }