Java Code Examples for org.apache.distributedlog.DistributedLogConfiguration#setOutputBufferSize()
The following examples show how to use
org.apache.distributedlog.DistributedLogConfiguration#setOutputBufferSize() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DLNamespace.java From Elasticsearch with Apache License 2.0 | 5 votes |
public static synchronized DistributedLogNamespace getNamespace(Settings settings, String localNodeId) throws IllegalArgumentException, NullPointerException, IOException { if (logNamespace == null) { String logServiceUrl = settings.get(LOG_SERVICE_ENDPOINT); URI uri = URI.create(logServiceUrl); DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.setOutputBufferSize(settings.getAsInt(DL_MERGE_BUFFER_SIZE, 4 * 1024)); // immediate flush means write the user record and write a control record immediately, so that current client could get the record immediately // but this means write two record into bookkeeper // in our case we do not need that because replica replay it and not need read it immediately // if primary failed, if it recovering, it will write a control record into bk and could read it again conf.setImmediateFlushEnabled(false); // set write enabled == false, because lease already confirmed there is only one writer conf.setWriteLockEnabled(false); // this enables move lac after 10 seconds so that other node could see the latest records conf.setPeriodicFlushFrequencyMilliSeconds(2); // batch write to bookkeeper is disabled conf.setMinDelayBetweenImmediateFlushMs(0); conf.setZKSessionTimeoutSeconds(settings.getAsInt(ZK_SESSION_TIMEOUT, 10)); conf.setLockTimeout(DistributedLogConstants.LOCK_IMMEDIATE); conf.setLogSegmentRollingIntervalMinutes(0); // has to set to 0 to disable time based rolling policy and enable size based rolling policy conf.setMaxLogSegmentBytes(1 << 20 << settings.getAsInt(DL_SEGMENT_SIZE_MB, 8)); // set it to 256MB conf.setEnsembleSize(settings.getAsInt(DL_ENSEMBLE_SIZE, 3)); conf.setAckQuorumSize(settings.getAsInt(DL_ACK_QUORUM_SIZE, 2)); conf.setWriteQuorumSize(settings.getAsInt(DL_REPLICA_NUM, 3)); conf.setRowAwareEnsemblePlacementEnabled(false); conf.setReadAheadMaxRecords(100); conf.setReadAheadBatchSize(3); conf.setExplicitTruncationByApplication(true); // set it to true to disable auto truncate conf.setRetentionPeriodHours(1); // dl will purge truncated log segments after 1 hour logNamespace = DistributedLogNamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .regionId(DistributedLogConstants.LOCAL_REGION_ID) .clientId(localNodeId) .build(); } return logNamespace; }
Example 2
Source File: TestDLCK.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) @SuppressWarnings("deprecation") public void testCheckAndRepairDLNamespace() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.loadConf(conf); confLocal.setImmediateFlushEnabled(true); confLocal.setOutputBufferSize(0); confLocal.setLogSegmentSequenceNumberValidationEnabled(false); confLocal.setLogSegmentCacheEnabled(false); URI uri = createDLMURI("/check-and-repair-dl-namespace"); zkc.get().create(uri.getPath(), new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); Namespace namespace = NamespaceBuilder.newBuilder() .conf(confLocal) .uri(uri) .build(); OrderedScheduler scheduler = OrderedScheduler.newBuilder() .name("dlck-tool") .corePoolSize(1) .build(); ExecutorService executorService = Executors.newCachedThreadPool(); String streamName = "check-and-repair-dl-namespace"; // Create completed log segments DistributedLogManager dlm = namespace.openLog(streamName); DLMTestUtil.injectLogSegmentWithLastDLSN(dlm, confLocal, 1L, 1L, 10, false); DLMTestUtil.injectLogSegmentWithLastDLSN(dlm, confLocal, 2L, 11L, 10, true); DLMTestUtil.injectLogSegmentWithLastDLSN(dlm, confLocal, 3L, 21L, 10, false); DLMTestUtil.injectLogSegmentWithLastDLSN(dlm, confLocal, 4L, 31L, 10, true); // dryrun DistributedLogAdmin.checkAndRepairDLNamespace( uri, namespace, new DryrunLogSegmentMetadataStoreUpdater(confLocal, getLogSegmentMetadataStore(namespace)), scheduler, false, false); Map<Long, LogSegmentMetadata> segments = getLogSegments(dlm); LOG.info("segments after drynrun {}", segments); verifyLogSegment(segments, new DLSN(1L, 18L, 0L), 1L, 10, 10L); verifyLogSegment(segments, new DLSN(2L, 16L, 0L), 2L, 9, 19L); verifyLogSegment(segments, new DLSN(3L, 18L, 0L), 3L, 10, 30L); verifyLogSegment(segments, new DLSN(4L, 16L, 0L), 4L, 9, 39L); // check and repair DistributedLogAdmin.checkAndRepairDLNamespace( uri, namespace, LogSegmentMetadataStoreUpdater.createMetadataUpdater(confLocal, getLogSegmentMetadataStore(namespace)), scheduler, false, false); segments = getLogSegments(dlm); LOG.info("segments after repair {}", segments); verifyLogSegment(segments, new DLSN(1L, 18L, 0L), 1L, 10, 10L); verifyLogSegment(segments, new DLSN(2L, 18L, 0L), 2L, 10, 20L); verifyLogSegment(segments, new DLSN(3L, 18L, 0L), 3L, 10, 30L); verifyLogSegment(segments, new DLSN(4L, 18L, 0L), 4L, 10, 40L); dlm.close(); SchedulerUtils.shutdownScheduler(executorService, 5, TimeUnit.MINUTES); namespace.close(); }
Example 3
Source File: TestBKLogSegmentEntryReader.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testReadEntriesFromCompleteLogSegment() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); confLocal.setOutputBufferSize(0); confLocal.setPeriodicFlushFrequencyMilliSeconds(0); confLocal.setImmediateFlushEnabled(false); confLocal.setNumPrefetchEntriesPerLogSegment(10); confLocal.setMaxPrefetchEntriesPerLogSegment(10); DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName()); generateCompletedLogSegments(dlm, confLocal, 1, 20); List<LogSegmentMetadata> segments = dlm.getLogSegments(); assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size()); BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal); reader.start(); boolean done = false; long txId = 1L; long entryId = 0L; while (!done) { Entry.Reader entryReader; try { entryReader = Utils.ioResult(reader.readNext(1)).get(0); } catch (EndOfLogSegmentException eol) { done = true; continue; } LogRecordWithDLSN record = entryReader.nextRecord(); while (null != record) { if (!record.isControl()) { DLMTestUtil.verifyLogRecord(record); assertEquals(txId, record.getTransactionId()); ++txId; } DLSN dlsn = record.getDlsn(); assertEquals(1L, dlsn.getLogSegmentSequenceNo()); assertEquals(entryId, dlsn.getEntryId()); record = entryReader.nextRecord(); } ++entryId; } assertEquals(21, txId); assertFalse(reader.hasCaughtUpOnInprogress()); Utils.close(reader); }
Example 4
Source File: TestBKLogSegmentEntryReader.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testMaxPrefetchEntriesSmallBatch() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); confLocal.setOutputBufferSize(0); confLocal.setPeriodicFlushFrequencyMilliSeconds(0); confLocal.setImmediateFlushEnabled(false); confLocal.setNumPrefetchEntriesPerLogSegment(2); confLocal.setMaxPrefetchEntriesPerLogSegment(10); DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName()); generateCompletedLogSegments(dlm, confLocal, 1, 20); List<LogSegmentMetadata> segments = dlm.getLogSegments(); assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size()); BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal); reader.start(); // wait for the read ahead entries to become available while (reader.readAheadEntries.size() < 10) { TimeUnit.MILLISECONDS.sleep(10); } long txId = 1L; long entryId = 0L; assertEquals(10, reader.readAheadEntries.size()); assertEquals(10, reader.getNextEntryId()); assertFalse(reader.hasCaughtUpOnInprogress()); // read first entry Entry.Reader entryReader = Utils.ioResult(reader.readNext(1)).get(0); LogRecordWithDLSN record = entryReader.nextRecord(); while (null != record) { if (!record.isControl()) { DLMTestUtil.verifyLogRecord(record); assertEquals(txId, record.getTransactionId()); ++txId; } DLSN dlsn = record.getDlsn(); assertEquals(1L, dlsn.getLogSegmentSequenceNo()); assertEquals(entryId, dlsn.getEntryId()); record = entryReader.nextRecord(); } ++entryId; assertEquals(2L, txId); // wait for the read ahead entries to become 10 again while (reader.readAheadEntries.size() < 10) { TimeUnit.MILLISECONDS.sleep(10); } assertEquals(10, reader.readAheadEntries.size()); assertEquals(11, reader.getNextEntryId()); assertFalse(reader.hasCaughtUpOnInprogress()); Utils.close(reader); }
Example 5
Source File: TestBKLogSegmentEntryReader.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testMaxPrefetchEntriesLargeBatch() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); confLocal.setOutputBufferSize(0); confLocal.setPeriodicFlushFrequencyMilliSeconds(0); confLocal.setImmediateFlushEnabled(false); confLocal.setNumPrefetchEntriesPerLogSegment(10); confLocal.setMaxPrefetchEntriesPerLogSegment(5); DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName()); generateCompletedLogSegments(dlm, confLocal, 1, 20); List<LogSegmentMetadata> segments = dlm.getLogSegments(); assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size()); BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal); reader.start(); // wait for the read ahead entries to become available while (reader.readAheadEntries.size() < 5) { TimeUnit.MILLISECONDS.sleep(10); } long txId = 1L; long entryId = 0L; assertEquals(5, reader.readAheadEntries.size()); assertEquals(5, reader.getNextEntryId()); // read first entry Entry.Reader entryReader = Utils.ioResult(reader.readNext(1)).get(0); LogRecordWithDLSN record = entryReader.nextRecord(); while (null != record) { if (!record.isControl()) { DLMTestUtil.verifyLogRecord(record); assertEquals(txId, record.getTransactionId()); ++txId; } DLSN dlsn = record.getDlsn(); assertEquals(1L, dlsn.getLogSegmentSequenceNo()); assertEquals(entryId, dlsn.getEntryId()); record = entryReader.nextRecord(); } ++entryId; assertEquals(2L, txId); // wait for the read ahead entries to become 10 again while (reader.readAheadEntries.size() < 5) { TimeUnit.MILLISECONDS.sleep(10); } assertEquals(5, reader.readAheadEntries.size()); assertEquals(6, reader.getNextEntryId()); assertFalse(reader.hasCaughtUpOnInprogress()); Utils.close(reader); }
Example 6
Source File: TestBKLogSegmentEntryReader.java From distributedlog with Apache License 2.0 | 4 votes |
@Test(timeout = 60000) public void testMaxPrefetchEntriesSmallSegment() throws Exception { DistributedLogConfiguration confLocal = new DistributedLogConfiguration(); confLocal.addConfiguration(conf); confLocal.setOutputBufferSize(0); confLocal.setPeriodicFlushFrequencyMilliSeconds(0); confLocal.setImmediateFlushEnabled(false); confLocal.setNumPrefetchEntriesPerLogSegment(10); confLocal.setMaxPrefetchEntriesPerLogSegment(20); DistributedLogManager dlm = createNewDLM(confLocal, runtime.getMethodName()); generateCompletedLogSegments(dlm, confLocal, 1, 5); List<LogSegmentMetadata> segments = dlm.getLogSegments(); assertEquals(segments.size() + " log segments found, expected to be only one", 1, segments.size()); BKLogSegmentEntryReader reader = createEntryReader(segments.get(0), 0, confLocal); reader.start(); // wait for the read ahead entries to become available while (reader.readAheadEntries.size() < (reader.getLastAddConfirmed() + 1)) { TimeUnit.MILLISECONDS.sleep(10); } long txId = 1L; long entryId = 0L; assertEquals((reader.getLastAddConfirmed() + 1), reader.readAheadEntries.size()); assertEquals((reader.getLastAddConfirmed() + 1), reader.getNextEntryId()); // read first entry Entry.Reader entryReader = Utils.ioResult(reader.readNext(1)).get(0); LogRecordWithDLSN record = entryReader.nextRecord(); while (null != record) { if (!record.isControl()) { DLMTestUtil.verifyLogRecord(record); assertEquals(txId, record.getTransactionId()); ++txId; } DLSN dlsn = record.getDlsn(); assertEquals(1L, dlsn.getLogSegmentSequenceNo()); assertEquals(entryId, dlsn.getEntryId()); record = entryReader.nextRecord(); } ++entryId; assertEquals(2L, txId); assertEquals(reader.getLastAddConfirmed(), reader.readAheadEntries.size()); assertEquals((reader.getLastAddConfirmed() + 1), reader.getNextEntryId()); assertFalse(reader.hasCaughtUpOnInprogress()); Utils.close(reader); }
Example 7
Source File: ConsoleWriter.java From distributedlog with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { if (2 != args.length) { System.out.println(HELP); return; } String dlUriStr = args[0]; final String streamName = args[1]; URI uri = URI.create(dlUriStr); DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.setImmediateFlushEnabled(true); conf.setOutputBufferSize(0); conf.setPeriodicFlushFrequencyMilliSeconds(0); conf.setLockTimeout(DistributedLogConstants.LOCK_IMMEDIATE); Namespace namespace = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .regionId(DistributedLogConstants.LOCAL_REGION_ID) .clientId("console-writer") .build(); // open the dlm System.out.println("Opening log stream " + streamName); DistributedLogManager dlm = namespace.openLog(streamName); try { AsyncLogWriter writer = null; try { writer = FutureUtils.result(dlm.openAsyncLogWriter()); ConsoleReader reader = new ConsoleReader(); String line; while ((line = reader.readLine(PROMPT_MESSAGE)) != null) { writer.write(new LogRecord(System.currentTimeMillis(), line.getBytes(UTF_8))) .whenComplete(new FutureEventListener<DLSN>() { @Override public void onFailure(Throwable cause) { System.out.println("Encountered error on writing data"); cause.printStackTrace(System.err); Runtime.getRuntime().exit(0); } @Override public void onSuccess(DLSN value) { // done } }); } } finally { if (null != writer) { FutureUtils.result(writer.asyncClose(), 5, TimeUnit.SECONDS); } } } finally { dlm.close(); namespace.close(); } }
Example 8
Source File: StreamTransformer.java From distributedlog with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws Exception { if (3 != args.length) { System.out.println(HELP); return; } String dlUriStr = args[0]; final String srcStreamName = args[1]; final String targetStreamName = args[2]; URI uri = URI.create(dlUriStr); DistributedLogConfiguration conf = new DistributedLogConfiguration(); conf.setOutputBufferSize(16*1024); // 16KB conf.setPeriodicFlushFrequencyMilliSeconds(5); // 5ms Namespace namespace = NamespaceBuilder.newBuilder() .conf(conf) .uri(uri) .build(); // open the dlm System.out.println("Opening log stream " + srcStreamName); DistributedLogManager srcDlm = namespace.openLog(srcStreamName); System.out.println("Opening log stream " + targetStreamName); DistributedLogManager targetDlm = namespace.openLog(targetStreamName); Transformer<byte[], byte[]> replicationTransformer = new IdenticalTransformer<byte[]>(); LogRecordWithDLSN lastTargetRecord; DLSN srcDlsn; try { lastTargetRecord = targetDlm.getLastLogRecord(); TransformedRecord lastTransformedRecord = new TransformedRecord(); try { lastTransformedRecord.read(protocolFactory.getProtocol( new TIOStreamTransport(new ByteArrayInputStream(lastTargetRecord.getPayload())))); srcDlsn = DLSN.deserializeBytes(lastTransformedRecord.getSrcDlsn()); System.out.println("Last transformed record is " + srcDlsn); } catch (TException e) { System.err.println("Error on reading last transformed record"); e.printStackTrace(System.err); srcDlsn = DLSN.InitialDLSN; } } catch (LogNotFoundException lnfe) { srcDlsn = DLSN.InitialDLSN; } catch (LogEmptyException lee) { srcDlsn = DLSN.InitialDLSN; } AsyncLogWriter targetWriter = FutureUtils.result(targetDlm.openAsyncLogWriter()); try { readLoop(srcDlm, srcDlsn, targetWriter, replicationTransformer); } finally { FutureUtils.result(targetWriter.asyncClose(), 5, TimeUnit.SECONDS); targetDlm.close(); srcDlm.close(); namespace.close(); } }