org.apache.distributedlog.LogRecord Java Examples

The following examples show how to use org.apache.distributedlog.LogRecord. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DistributedTranslog.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
/**
 * 
 * @param operation
 * @return
 * @throws IOException 
 */
public Tuple<Future<DLSN>, Tuple<BytesReference, Long>> writeOperation(Translog.Operation operation, AtomicLong txid) throws IOException {
    BytesStreamOutput out = new BytesStreamOutput();
    try (ReleasableLock lock = writeLock.acquire()) {
        Future<DLSN> writeResult = null;
        out.writeByte(operation.opType().id());
        operation.writeTo(out);
        BytesReference bytes = out.bytes();
        LogRecord logRecord = new LogRecord(txid.incrementAndGet(), bytes.toBytes());
        writeResult = logWriter.write(logRecord);
        sizeInBytes += (20 + logRecord.getPayload().length);
        ++ numOperations;
        return new Tuple<Future<DLSN>, Tuple<BytesReference, Long>>(writeResult, new Tuple<BytesReference, Long>(bytes, txid.get()));
    } catch (TransactionIdOutOfOrderException e) {
        throw e;
    } finally {
        out.close();
    }
}
 
Example #2
Source File: TestBKLogSegmentEntryReader.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
void generateCompletedLogSegments(DistributedLogManager dlm,
                                  DistributedLogConfiguration conf,
                                  long numCompletedSegments,
                                  long segmentSize) throws Exception {
    long txid = 1L;
    for (long i = 0; i < numCompletedSegments; i++) {
        AsyncLogWriter writer = Utils.ioResult(dlm.openAsyncLogWriter());
        for (long j = 1; j <= segmentSize; j++) {
            Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(txid++)));
            LogRecord ctrlRecord = DLMTestUtil.getLogRecordInstance(txid);
            ctrlRecord.setControl();
            Utils.ioResult(writer.write(ctrlRecord));
        }
        Utils.close(writer);
    }
}
 
Example #3
Source File: DistributedLogTool.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
private void dumpRecord(LogRecord record) {
    System.out.println("------------------------------------------------");
    if (record instanceof LogRecordWithDLSN) {
        System.out.println("Record (txn = " + record.getTransactionId() + ", bytes = "
                + record.getPayload().length + ", dlsn = "
                + ((LogRecordWithDLSN) record).getDlsn() + ", sequence id = "
                + ((LogRecordWithDLSN) record).getSequenceId() + ")");
    } else {
        System.out.println("Record (txn = " + record.getTransactionId() + ", bytes = "
                + record.getPayload().length + ")");
    }
    System.out.println("");

    if (skipPayload) {
        return;
    }

    if (printHex) {
        System.out.println(Hex.encodeHexString(record.getPayload()));
    } else {
        System.out.println(new String(record.getPayload(), UTF_8));
    }
}
 
Example #4
Source File: DistributedLogTool.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
private void dumpRecords(AsyncLogReader reader) throws Exception {
    int numRead = 0;
    LogRecord record = FutureUtils.result(reader.readNext());
    while (record != null) {
        // dump the record
        dumpRecord(record);
        ++numRead;
        if (numRead >= count) {
            break;
        }
        record = FutureUtils.result(reader.readNext());
    }
    if (numRead == 0) {
        System.out.println("No records.");
    } else {
        System.out.println("------------------------------------------------");
    }
}
 
Example #5
Source File: TestStreamOp.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 60000)
public void testResponseSucceededThenFailed() throws Exception {
    AsyncLogWriter writer = mock(AsyncLogWriter.class);
    when(writer.write((LogRecord) any())).thenReturn(FutureUtils.value(new DLSN(1, 2, 3)));
    when(writer.getStreamName()).thenReturn("test");
    WriteOp writeOp = getWriteOp();
    writeOp.execute(writer, new Sequencer() {
        public long nextId() {
            return 0;
        }
    }, new Object());
    writeOp.fail(new InternalServerException("test2"));

    WriteResponse response = Await.result(writeOp.result());
    assertEquals(StatusCode.SUCCESS, response.getHeader().getCode());
}
 
Example #6
Source File: HeartbeatOp.java    From distributedlog with Apache License 2.0 6 votes vote down vote up
@Override
protected Future<WriteResponse> executeOp(AsyncLogWriter writer,
                                          Sequencer sequencer,
                                          Object txnLock) {
    // write a control record if heartbeat is the first request of the recovered log segment.
    if (writeControlRecord) {
        long txnId;
        Future<DLSN> writeResult;
        synchronized (txnLock) {
            txnId = sequencer.nextId();
            LogRecord hbRecord = new LogRecord(txnId, HEARTBEAT_DATA);
            hbRecord.setControl();
            writeResult = newTFuture(writer.write(hbRecord));
        }
        return writeResult.map(new AbstractFunction1<DLSN, WriteResponse>() {
            @Override
            public WriteResponse apply(DLSN value) {
                return ResponseUtils.writeSuccess().setDlsn(value.serialize(dlsnVersion));
            }
        });
    } else {
        return Future.value(ResponseUtils.writeSuccess());
    }
}
 
Example #7
Source File: ClusterStateOpLog.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private void writeControlRecord(long version) throws IOException {
    try {
        LogRecord logRecord = new LogRecord(version, new byte[1]);
        logRecord.setControl();
        Future<DLSN> result = logWriter.write(logRecord);
        FutureUtils.result(result);
        return;
    } catch (TransactionIdOutOfOrderException e) {
        throw e;
    }
}
 
Example #8
Source File: DLWriterWorker.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {
    LOG.info("Started writer {}.", idx);
    while (running) {
        final int streamIdx = random.nextInt(numStreams);
        final AsyncLogWriter writer = streamWriters.get(streamIdx);
        rateLimiter.getLimiter().acquire();
        final long requestMillis = System.currentTimeMillis();
        final byte[] data;
        try {
            data = Utils.generateMessage(requestMillis, messageSizeBytes);
        } catch (TException e) {
            LOG.error("Error on generating message : ", e);
            break;
        }
        writer.write(new LogRecord(requestMillis, data)).whenComplete(new FutureEventListener<DLSN>() {
            @Override
            public void onSuccess(DLSN value) {
                requestStat.registerSuccessfulEvent(
                  System.currentTimeMillis() - requestMillis, TimeUnit.MILLISECONDS);
            }

            @Override
            public void onFailure(Throwable cause) {
                requestStat.registerFailedEvent(
                  System.currentTimeMillis() - requestMillis, TimeUnit.MILLISECONDS);
                LOG.error("Failed to publish, rescue it : ", cause);
                scheduleRescue(streamIdx, writer, 0);
            }
        });
    }
}
 
Example #9
Source File: StreamTransformer.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
private static void transform(final AsyncLogWriter writer,
                              LogRecordWithDLSN record,
                              Transformer<byte[], byte[]> replicationTransformer,
                              final CountDownLatch keepAliveLatch)
        throws Exception {
    DLSN srcDLSN = record.getDlsn();
    byte[] payload = record.getPayload();
    byte[] transformedPayload = replicationTransformer.transform(payload);
    TransformedRecord transformedRecord =
            new TransformedRecord(ByteBuffer.wrap(transformedPayload));
    transformedRecord.setSrcDlsn(srcDLSN.serializeBytes());
    ByteArrayOutputStream baos = new ByteArrayOutputStream(4096);
    transformedRecord.write(protocolFactory.getProtocol(new TIOStreamTransport(baos)));
    byte[] data = baos.toByteArray();
    writer.write(new LogRecord(record.getSequenceId(), data))
            .whenComplete(new FutureEventListener<DLSN>() {
        @Override
        public void onFailure(Throwable cause) {
            System.err.println("Encountered error on writing records to stream " + writer.getStreamName());
            cause.printStackTrace(System.err);
            keepAliveLatch.countDown();
        }

        @Override
        public void onSuccess(DLSN dlsn) {
            System.out.println("Write transformed record " + dlsn);
        }
    });
}
 
Example #10
Source File: TestDistributedLogMultiStreamWriter.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 20000)
public void testWriteTooLargeRecord() throws Exception {
    DistributedLogClient client = mock(DistributedLogClient.class);
    DistributedLogMultiStreamWriter writer = DistributedLogMultiStreamWriter.newBuilder()
            .streams(Lists.newArrayList("stream1", "stream2"))
            .client(client)
            .compressionCodec(CompressionCodec.Type.LZ4)
            .firstSpeculativeTimeoutMs(100000)
            .maxSpeculativeTimeoutMs(200000)
            .speculativeBackoffMultiplier(2)
            .requestTimeoutMs(5000000)
            .flushIntervalMs(0)
            .bufferSize(0)
            .build();

    byte[] data = new byte[LogRecord.MAX_LOGRECORD_SIZE + 10];
    ByteBuffer buffer = ByteBuffer.wrap(data);
    Future<DLSN> writeFuture = writer.write(buffer);
    assertTrue(writeFuture.isDefined());
    try {
        Await.result(writeFuture);
        fail("Should fail on writing too long record");
    } catch (LogRecordTooLongException lrtle) {
        // expected
    }
    writer.close();
}
 
Example #11
Source File: TestDistributedLogMultiStreamWriter.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 20000)
public void testFlushWhenExceedMaxLogRecordSetSize()
        throws Exception {
    DistributedLogClient client = mock(DistributedLogClient.class);
    when(client.writeRecordSet((String) any(), (LogRecordSetBuffer) any()))
            .thenReturn(Future.value(new DLSN(1L, 1L, 999L)));
    ScheduledExecutorService executorService =
            Executors.newSingleThreadScheduledExecutor();
    DistributedLogMultiStreamWriter writer = DistributedLogMultiStreamWriter.newBuilder()
            .streams(Lists.newArrayList("stream1", "stream2"))
            .client(client)
            .compressionCodec(CompressionCodec.Type.LZ4)
            .firstSpeculativeTimeoutMs(100000)
            .maxSpeculativeTimeoutMs(200000)
            .speculativeBackoffMultiplier(2)
            .requestTimeoutMs(500000)
            .flushIntervalMs(0)
            .bufferSize(Integer.MAX_VALUE)
            .scheduler(executorService)
            .build();

    byte[] data = new byte[LogRecord.MAX_LOGRECORD_SIZE - 3 * 100];
    ByteBuffer buffer1 = ByteBuffer.wrap(data);
    writer.write(buffer1);
    verify(client, times(0)).writeRecordSet((String) any(), (LogRecordSetBuffer) any());
    LogRecordSet.Writer recordSetWriter1 = writer.getLogRecordSetWriter();
    assertEquals(1, recordSetWriter1.getNumRecords());
    assertEquals(LogRecordSet.HEADER_LEN + 4 + data.length, recordSetWriter1.getNumBytes());

    ByteBuffer buffer2 = ByteBuffer.wrap(data);
    writer.write(buffer2);
    verify(client, times(1)).writeRecordSet((String) any(), (LogRecordSetBuffer) any());
    LogRecordSet.Writer recordSetWriter2 = writer.getLogRecordSetWriter();
    assertEquals(1, recordSetWriter2.getNumRecords());
    assertEquals(LogRecordSet.HEADER_LEN + 4 + data.length, recordSetWriter2.getNumBytes());
    assertTrue(recordSetWriter1 != recordSetWriter2);

    writer.close();
}
 
Example #12
Source File: TestBKLogSegmentEntryReader.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
AsyncLogWriter createInprogressLogSegment(DistributedLogManager dlm,
                                          DistributedLogConfiguration conf,
                                          long segmentSize) throws Exception {
    AsyncLogWriter writer = Utils.ioResult(dlm.openAsyncLogWriter());
    for (long i = 1L; i <= segmentSize; i++) {
        Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(i)));
        LogRecord ctrlRecord = DLMTestUtil.getLogRecordInstance(i);
        ctrlRecord.setControl();
        Utils.ioResult(writer.write(ctrlRecord));
    }
    return writer;
}
 
Example #13
Source File: BulkWriteOp.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
private List<LogRecord> asRecordList(List<ByteBuffer> buffers, Sequencer sequencer) {
    List<LogRecord> records = new ArrayList<LogRecord>(buffers.size());
    for (ByteBuffer buffer : buffers) {
        byte[] payload = new byte[buffer.remaining()];
        buffer.get(payload);
        records.add(new LogRecord(sequencer.nextId(), payload));
    }
    return records;
}
 
Example #14
Source File: DlogBenchmarkProducer.java    From openmessaging-benchmark with Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<Void> sendAsync(Optional<String> key, byte[] payload) {
    LogRecord record = new LogRecord(
        sequencer.nextId(), payload);

    return writer.write(record).thenApply(dlsn -> null);
}
 
Example #15
Source File: WriteOp.java    From distributedlog with Apache License 2.0 5 votes vote down vote up
@Override
protected Future<WriteResponse> executeOp(AsyncLogWriter writer,
                                          Sequencer sequencer,
                                          Object txnLock) {
    if (!stream.equals(writer.getStreamName())) {
        logger.error("Write: Stream Name Mismatch in the Stream Map {}, {}", stream, writer.getStreamName());
        return Future.exception(new IllegalStateException("The stream mapping is incorrect, fail the request"));
    }

    long txnId;
    Future<DLSN> writeResult;
    synchronized (txnLock) {
        txnId = sequencer.nextId();
        LogRecord record = new LogRecord(txnId, payload);
        if (isRecordSet) {
            record.setRecordSet();
        }
        writeResult = newTFuture(writer.write(record));
    }
    return writeResult.map(new AbstractFunction1<DLSN, WriteResponse>() {
        @Override
        public WriteResponse apply(DLSN value) {
            successRecordCounter.inc();
            return ResponseUtils.writeSuccess().setDlsn(value.serialize(dlsnVersion));
        }
    });
}
 
Example #16
Source File: ConsoleWriter.java    From distributedlog with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    if (2 != args.length) {
        System.out.println(HELP);
        return;
    }

    String dlUriStr = args[0];
    final String streamName = args[1];

    URI uri = URI.create(dlUriStr);
    DistributedLogConfiguration conf = new DistributedLogConfiguration();
    conf.setImmediateFlushEnabled(true);
    conf.setOutputBufferSize(0);
    conf.setPeriodicFlushFrequencyMilliSeconds(0);
    conf.setLockTimeout(DistributedLogConstants.LOCK_IMMEDIATE);
    Namespace namespace = NamespaceBuilder.newBuilder()
            .conf(conf)
            .uri(uri)
            .regionId(DistributedLogConstants.LOCAL_REGION_ID)
            .clientId("console-writer")
            .build();

    // open the dlm
    System.out.println("Opening log stream " + streamName);
    DistributedLogManager dlm = namespace.openLog(streamName);

    try {
        AsyncLogWriter writer = null;
        try {
            writer = FutureUtils.result(dlm.openAsyncLogWriter());

            ConsoleReader reader = new ConsoleReader();
            String line;
            while ((line = reader.readLine(PROMPT_MESSAGE)) != null) {
                writer.write(new LogRecord(System.currentTimeMillis(), line.getBytes(UTF_8)))
                        .whenComplete(new FutureEventListener<DLSN>() {
                            @Override
                            public void onFailure(Throwable cause) {
                                System.out.println("Encountered error on writing data");
                                cause.printStackTrace(System.err);
                                Runtime.getRuntime().exit(0);
                            }

                            @Override
                            public void onSuccess(DLSN value) {
                                // done
                            }
                        });
            }
        } finally {
            if (null != writer) {
                FutureUtils.result(writer.asyncClose(), 5, TimeUnit.SECONDS);
            }
        }
    } finally {
        dlm.close();
        namespace.close();
    }
}
 
Example #17
Source File: WriterTest.java    From distributedlog with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    if (args.length < 3) {
        System.err.println("WriterTest <uri> <stream> <num_records>");
        return;
    }

    URI uri = URI.create(args[0]);
    String streamName = args[1];
    int numRecords = Integer.parseInt(args[2]);

    DistributedLogConfiguration conf = new DistributedLogConfiguration()
        .setOutputBufferSize(0)
        .setPeriodicFlushFrequencyMilliSeconds(2);

    DistributedLogNamespace namespace = DistributedLogNamespaceBuilder.newBuilder()
        .uri(uri)
        .conf(conf)
        .build();
    try {
        try (DistributedLogManager manager = namespace.openLog(streamName)) {
            AsyncLogWriter writer = FutureUtils.result(manager.openAsyncLogWriter());
            try {
                long txid = writer.getLastTxId();
                if (txid < 0L) {
                    txid = 0L;
                }

                System.out.println("Publishing " + numRecords + " records to stream " + streamName + " .");

                for (int i = 1; i <= numRecords; ++i) {
                    String content = "record-" + (txid + i);
                    LogRecord record = new LogRecord(txid + i, content.getBytes(UTF_8));
                    FutureUtils.result(writer.write(record));
                    System.out.println("Write record : " + content);
                }

                System.out.println("Successfully published " + numRecords + " records to stream " + streamName + " .");
            } finally {
                Utils.close(writer);
            }
        }
    } finally {
        namespace.close();
    }
}
 
Example #18
Source File: ReaderTest.java    From distributedlog with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    if (args.length < 4) {
        System.err.println("ReaderTest <uri> <stream> <num_records> <start_tx_id>");
        return;
    }

    URI uri = URI.create(args[0]);
    String streamName = args[1];
    int numRecords = Integer.parseInt(args[2]);
    final long startTxId = Long.parseLong(args[3]);

    DistributedLogConfiguration conf = new DistributedLogConfiguration()
        .setOutputBufferSize(0)
        .setPeriodicFlushFrequencyMilliSeconds(2);

    DistributedLogNamespace namespace = DistributedLogNamespaceBuilder.newBuilder()
        .uri(uri)
        .conf(conf)
        .build();
    try {
        try (DistributedLogManager manager = namespace.openLog(streamName)) {
            AsyncLogReader reader = FutureUtils.result(manager.openAsyncLogReader(startTxId));
            try {
                System.out.println("Try to read " + numRecords + " records from stream " + streamName + " .");
                for (int i = 0; i < numRecords; ++i) {
                    LogRecord record = FutureUtils.result(reader.readNext());
                    String data = new String(record.getPayload(), UTF_8);

                    System.out.println("Read record : " + data);

                    String expectedData = "record-" + (startTxId + i);
                    checkArgument(expectedData.equals(data),
                        "Expected = " + expectedData + ", Actual = " + data);
                    long expectedTxId = startTxId + i;
                    checkArgument(expectedTxId == record.getTransactionId(),
                        "Expected TxId = " + expectedTxId + ", Actual TxId = " + record.getTransactionId());
                }
                System.out.println("Successfully read " + numRecords + " records to stream " + streamName + " .");
            } finally {
                Utils.close(reader);
            }
        }
    } finally {
        namespace.close();
    }
}
 
Example #19
Source File: WriterTest.java    From distributedlog with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    if (args.length < 3) {
        System.err.println("WriterTest <uri> <stream> <num_records>");
        return;
    }

    URI uri = URI.create(args[0]);
    String streamName = args[1];
    int numRecords = Integer.parseInt(args[2]);

    DistributedLogConfiguration conf = new DistributedLogConfiguration()
        .setOutputBufferSize(0)
        .setPeriodicFlushFrequencyMilliSeconds(2);

    Namespace namespace = NamespaceBuilder.newBuilder()
        .uri(uri)
        .conf(conf)
        .build();
    try {
        try (DistributedLogManager manager = namespace.openLog(streamName)) {
            AsyncLogWriter writer = FutureUtils.result(manager.openAsyncLogWriter());
            try {
                long txid = writer.getLastTxId();
                if (txid < 0L) {
                    txid = 0L;
                }

                System.out.println("Publishing " + numRecords + " records to stream " + streamName + " .");

                for (int i = 1; i <= numRecords; ++i) {
                    String content = "record-" + (txid + i);
                    LogRecord record = new LogRecord(txid + i, content.getBytes(UTF_8));
                    FutureUtils.result(writer.write(record));
                    System.out.println("Write record : " + content);
                }

                System.out.println("Successfully published " + numRecords
                    + " records to stream " + streamName + " .");
            } finally {
                Utils.close(writer);
            }
        }
    } finally {
        namespace.close();
    }
}
 
Example #20
Source File: ReaderTest.java    From distributedlog with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
    if (args.length < 4) {
        System.err.println("ReaderTest <uri> <stream> <num_records> <start_tx_id>");
        return;
    }

    URI uri = URI.create(args[0]);
    String streamName = args[1];
    int numRecords = Integer.parseInt(args[2]);
    final long startTxId = Long.parseLong(args[3]);

    DistributedLogConfiguration conf = new DistributedLogConfiguration()
        .setOutputBufferSize(0)
        .setPeriodicFlushFrequencyMilliSeconds(2);

    Namespace namespace = NamespaceBuilder.newBuilder()
        .uri(uri)
        .conf(conf)
        .build();
    try {
        try (DistributedLogManager manager = namespace.openLog(streamName)) {
            AsyncLogReader reader = FutureUtils.result(manager.openAsyncLogReader(startTxId));
            try {
                System.out.println("Try to read " + numRecords + " records from stream " + streamName + " .");
                for (int i = 0; i < numRecords; ++i) {
                    LogRecord record = FutureUtils.result(reader.readNext());
                    String data = new String(record.getPayload(), UTF_8);

                    System.out.println("Read record : " + data);

                    String expectedData = "record-" + (startTxId + i);
                    checkArgument(expectedData.equals(data),
                        "Expected = " + expectedData + ", Actual = " + data);
                    long expectedTxId = startTxId + i;
                    checkArgument(expectedTxId == record.getTransactionId(),
                        "Expected TxId = " + expectedTxId + ", Actual TxId = " + record.getTransactionId());
                }
                System.out.println("Successfully read " + numRecords + " records to stream " + streamName + " .");
            } finally {
                Utils.close(reader);
            }
        }
    } finally {
        namespace.close();
    }
}
 
Example #21
Source File: LogWriter.java    From distributedlog with Apache License 2.0 2 votes vote down vote up
/**
 * Write a list of log records to the stream.
 *
 * @param records list of log records
 * @throws IOException
 */
@Deprecated
int writeBulk(List<LogRecord> records) throws IOException;
 
Example #22
Source File: LogWriter.java    From distributedlog with Apache License 2.0 2 votes vote down vote up
/**
 * Write a log record to the stream.
 *
 * @param record single log record
 * @throws IOException
 */
void write(LogRecord record) throws IOException;
 
Example #23
Source File: AsyncLogWriter.java    From distributedlog with Apache License 2.0 2 votes vote down vote up
/**
 * Write log records to the stream in bulk. Each future in the list represents the result of
 * one write operation. The size of the result list is equal to the size of the input list.
 * Buffers are written in order, and the list of result futures has the same order.
 *
 * @param record set of log records
 * @return A Future which contains a list of Future DLSNs if the record was successfully written
 * or an exception if the operation fails.
 */
CompletableFuture<List<CompletableFuture<DLSN>>> writeBulk(List<LogRecord> record);
 
Example #24
Source File: AsyncLogWriter.java    From distributedlog with Apache License 2.0 2 votes vote down vote up
/**
 * Write a log record to the stream.
 *
 * @param record single log record
 * @return A Future which contains a DLSN if the record was successfully written
 * or an exception if the write fails
 */
CompletableFuture<DLSN> write(LogRecord record);
 
Example #25
Source File: LogSegmentWriter.java    From distributedlog with Apache License 2.0 2 votes vote down vote up
/**
 * This isn't a simple synchronous version of {@code asyncWrite}. It has different semantic.
 * This method only writes data to the buffer and flushes buffer if needed.
 * TODO: we should remove this method. when we rewrite synchronous writer based on asynchronous writer,
 *       since this is the semantic needed to be provided in higher level but just calling write & flush.
 *
 * @param record single log record
 * @throws IOException when tried to flush the buffer.
 * @see LogSegmentWriter#asyncWrite(LogRecord)
 */
void write(LogRecord record) throws IOException;
 
Example #26
Source File: LogSegmentWriter.java    From distributedlog with Apache License 2.0 2 votes vote down vote up
/**
 * Write a log record to a log segment.
 *
 * @param record single log record
 * @return a future representing write result. A {@link DLSN} is returned if write succeeds,
 *         otherwise, exceptions are returned.
 * @throws org.apache.distributedlog.exceptions.LogRecordTooLongException if log record is too long
 * @throws org.apache.distributedlog.exceptions.InvalidEnvelopedEntryException on invalid enveloped entry
 * @throws LockingException if failed to acquire lock for the writer
 * @throws BKTransmitException if failed to transmit data to bk
 * @throws org.apache.distributedlog.exceptions.WriteException if failed to write to bk
 */
CompletableFuture<DLSN> asyncWrite(LogRecord record);