com.aliyun.openservices.aliyun.log.producer.Result Java Examples
The following examples show how to use
com.aliyun.openservices.aliyun.log.producer.Result.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SampleProducerWithCallback.java From aliyun-log-producer-sample with Apache License 2.0 | 6 votes |
@Override public void onCompletion(Result result) { try { if (result.isSuccessful()) { LOGGER.info("Send log successfully."); } else { LOGGER.error( "Failed to send log, project={}, logStore={}, logItem={}, result={}", project, logStore, logItem.ToJsonString(), result); } } finally { completed.getAndIncrement(); } }
Example #2
Source File: LogAccumulator.java From aliyun-log-java-producer with Apache License 2.0 | 6 votes |
public ListenableFuture<Result> append( String project, String logStore, String topic, String source, String shardHash, List<LogItem> logItems, Callback callback) throws InterruptedException, ProducerException { appendsInProgress.incrementAndGet(); try { return doAppend(project, logStore, topic, source, shardHash, logItems, callback); } finally { appendsInProgress.decrementAndGet(); } }
Example #3
Source File: ProducerBatchTest.java From aliyun-log-java-producer with Apache License 2.0 | 6 votes |
@Test public void testFireCallbacksAndSetFutures() { GroupKey groupKey = new GroupKey("project", "logStore", "topic", "source", "shardHash"); ProducerBatch batch = new ProducerBatch(groupKey, "id", 100, 100, 3, System.currentTimeMillis()); List<LogItem> logItems = new ArrayList<LogItem>(); logItems.add(new LogItem()); logItems.add(new LogItem()); logItems.add(new LogItem()); logItems.add(new LogItem()); logItems.add(new LogItem()); int sizeInBytes = LogSizeCalculator.calculate(logItems); ListenableFuture<Result> f = batch.tryAppend(logItems, sizeInBytes, null); Assert.assertNotNull(f); thrown.expect(NoSuchElementException.class); batch.fireCallbacksAndSetFutures(); }
Example #4
Source File: ProducerBatchTest.java From aliyun-log-java-producer with Apache License 2.0 | 6 votes |
@Test public void testAppendAttempt() { GroupKey groupKey = new GroupKey("project", "logStore", "topic", "source", "shardHash"); ProducerBatch batch = new ProducerBatch(groupKey, "id", 100, 100, 3, System.currentTimeMillis()); List<LogItem> logItems = new ArrayList<LogItem>(); logItems.add(new LogItem()); logItems.add(new LogItem()); logItems.add(new LogItem()); logItems.add(new LogItem()); logItems.add(new LogItem()); int sizeInBytes = LogSizeCalculator.calculate(logItems); ListenableFuture<Result> f = batch.tryAppend(logItems, sizeInBytes, null); Assert.assertNotNull(f); batch.appendAttempt(new Attempt(true, "xxx", "", "", System.currentTimeMillis())); batch.fireCallbacksAndSetFutures(); }
Example #5
Source File: ProducerBatchTest.java From aliyun-log-java-producer with Apache License 2.0 | 5 votes |
@Test public void testTryAppendLog() { GroupKey groupKey = new GroupKey("project", "logStore", "topic", "source", "shardHash"); ProducerBatch batch = new ProducerBatch(groupKey, "id", 35, 10, 3, System.currentTimeMillis()); LogItem logItem = new LogItem(); logItem.PushBack("key1", "val1"); logItem.PushBack("key2", "val2"); logItem.PushBack("key3", "val3"); logItem.PushBack("key4", "val4"); int sizeInBytes = LogSizeCalculator.calculate(logItem); Assert.assertEquals(36, sizeInBytes); ListenableFuture<Result> f = batch.tryAppend(logItem, sizeInBytes, null); Assert.assertNotNull(f); Assert.assertTrue(batch.isMeetSendCondition()); }
Example #6
Source File: LoghubAppenderCallback.java From aliyun-log-log4j-appender with Apache License 2.0 | 5 votes |
@Override public void onCompletion(Result result) { if (!result.isSuccessful()) { LogLog.error( "Failed to send log, project=" + project + ", logStore=" + logStore + ", topic=" + topic + ", source=" + source + ", logItem=" + logItem + ", errorCode=" + result.getErrorCode() + ", errorMessage=" + result.getErrorMessage()); } }
Example #7
Source File: FlinkLogProducer.java From aliyun-log-flink-connector with Apache License 2.0 | 5 votes |
@Override public void onSuccess(@Nullable Result result) { count.decrementAndGet(); if (result != null && !result.isSuccessful()) { LOG.error("Send logs failed, code={}, errorMsg={}, retries={}", result.getErrorCode(), result.getErrorMessage(), result.getAttemptCount()); } }
Example #8
Source File: LoghubAppenderCallback.java From aliyun-log-log4j2-appender with Apache License 2.0 | 5 votes |
@Override public void onCompletion(Result result) { if (!result.isSuccessful()) { logger.error( "Failed to send log, project=" + project + ", logStore=" + logStore + ", topic=" + topic + ", source=" + source + ", logItem=" + logItems + ", errorCode=" + result.getErrorCode() + ", errorMessage=" + result.getErrorMessage()); } }
Example #9
Source File: ProducerBatchTest.java From aliyun-log-java-producer with Apache License 2.0 | 5 votes |
@Test public void testIsMeetSendCondition() { GroupKey groupKey = new GroupKey("project", "logStore", "topic", "source", "shardHash"); ProducerBatch batch = new ProducerBatch(groupKey, "id", 8, 100, 3, System.currentTimeMillis()); List<LogItem> logItems = new ArrayList<LogItem>(); logItems.add(new LogItem()); logItems.add(new LogItem()); int sizeInBytes = LogSizeCalculator.calculate(logItems); Assert.assertEquals(8, sizeInBytes); ListenableFuture<Result> f = batch.tryAppend(logItems, sizeInBytes, null); Assert.assertNotNull(f); Assert.assertTrue(batch.isMeetSendCondition()); }
Example #10
Source File: ProducerBatchTest.java From aliyun-log-java-producer with Apache License 2.0 | 5 votes |
@Test public void testTryAppendLogsExceedBatchCountThreshold() { GroupKey groupKey = new GroupKey("project", "logStore", "topic", "source", "shardHash"); ProducerBatch batch = new ProducerBatch(groupKey, "id", 10000, 1, 3, System.currentTimeMillis()); List<LogItem> logItems = new ArrayList<LogItem>(); logItems.add(new LogItem()); logItems.add(new LogItem()); int sizeInBytes = LogSizeCalculator.calculate(logItems); Assert.assertEquals(8, sizeInBytes); ListenableFuture<Result> f = batch.tryAppend(logItems, sizeInBytes, null); Assert.assertNotNull(f); Assert.assertTrue(batch.isMeetSendCondition()); }
Example #11
Source File: ProducerBatchTest.java From aliyun-log-java-producer with Apache License 2.0 | 5 votes |
@Test public void testTryAppendLogsExceedBatchSizeThreshold() { GroupKey groupKey = new GroupKey("project", "logStore", "topic", "source", "shardHash"); ProducerBatch batch = new ProducerBatch(groupKey, "id", 20, 10, 3, System.currentTimeMillis()); List<LogItem> logItems = new ArrayList<LogItem>(); logItems.add(ProducerTest.buildLogItem()); logItems.add(ProducerTest.buildLogItem()); logItems.add(ProducerTest.buildLogItem()); int sizeInBytes = LogSizeCalculator.calculate(logItems); Assert.assertEquals(36, sizeInBytes); ListenableFuture<Result> f = batch.tryAppend(logItems, sizeInBytes, null); Assert.assertNotNull(f); Assert.assertTrue(batch.isMeetSendCondition()); }
Example #12
Source File: SlsOutputFormat.java From alibaba-flink-connectors with Apache License 2.0 | 5 votes |
@Override public void writeRecord(T row) throws IOException { if (null != row) { long start = System.currentTimeMillis(); List<LogItem> tmpLogGroup = new ArrayList<>(1); tmpLogGroup.add(serializationSchema.getLogItem(row)); /** calc the partition key, if not set, use null as random shard **/ String partitionKey = serializationSchema.getPartitionKey(row); String topic = serializationSchema.getTopic(row); String source = serializationSchema.getSource(row); try { ListenableFuture<Result> future = logProducerProvider.getClient().send( this.projectName, this.logstore, topic, source, partitionKey, tmpLogGroup); Futures.addCallback(future, sendFutureCallback, executor); numSent.incrementAndGet(); } catch (InterruptedException | ProducerException e) { callBackException = new RuntimeException(e); } if (null != callBackException) { LOG.warn("Fail in write to SLS", callBackException); if (failOnError) { throw callBackException; } callBackException = null; } // report metrics long end = System.currentTimeMillis(); latencyGauge.report(end - start, 1); outTps.markEvent(); } }
Example #13
Source File: LoghubAppenderCallback.java From aliyun-log-logback-appender with Apache License 2.0 | 5 votes |
@Override public void onCompletion(Result result) { if (!result.isSuccessful()) { loghubAppender.addError( "Failed to send log, project=" + project + ", logStore=" + logstore + ", topic=" + topic + ", source=" + source + ", logItem=" + logItems + ", errorCode=" + result.getErrorCode() + ", errorMessage=" + result.getErrorMessage()); } }
Example #14
Source File: SlsOutputFormat.java From alibaba-flink-connectors with Apache License 2.0 | 5 votes |
@Override public void onSuccess(Result result) { LOG.debug("loghub-callback: send success, result: " + result.toString()); if (!result.isSuccessful()) { LOG.error("loghub-callback: send failed, result:" + result.toString()); callBackException = new RuntimeException(result.getErrorMessage()); } numCommitted.incrementAndGet(); }
Example #15
Source File: ResultFailedException.java From aliyun-log-java-producer with Apache License 2.0 | 4 votes |
public Result getResult() { return result; }
Example #16
Source File: ResultFailedException.java From aliyun-log-java-producer with Apache License 2.0 | 4 votes |
public ResultFailedException(Result result) { this.result = result; }
Example #17
Source File: LogAccumulator.java From aliyun-log-java-producer with Apache License 2.0 | 4 votes |
private ListenableFuture<Result> doAppend( String project, String logStore, String topic, String source, String shardHash, List<LogItem> logItems, Callback callback) throws InterruptedException, ProducerException { if (closed) { throw new IllegalStateException("cannot append after the log accumulator was closed"); } int sizeInBytes = LogSizeCalculator.calculate(logItems); ensureValidLogSize(sizeInBytes); long maxBlockMs = producerConfig.getMaxBlockMs(); LOGGER.trace( "Prepare to acquire bytes, sizeInBytes={}, maxBlockMs={}, project={}, logStore={}", sizeInBytes, maxBlockMs, project, logStore); if (maxBlockMs >= 0) { boolean acquired = memoryController.tryAcquire(sizeInBytes, maxBlockMs, TimeUnit.MILLISECONDS); if (!acquired) { throw new TimeoutException( "failed to acquire memory within the configured max blocking time " + producerConfig.getMaxBlockMs() + " ms"); } } else { memoryController.acquire(sizeInBytes); } try { GroupKey groupKey = new GroupKey(project, logStore, topic, source, shardHash); ProducerBatchHolder holder = getOrCreateProducerBatchHolder(groupKey); synchronized (holder) { return appendToHolder(groupKey, logItems, callback, sizeInBytes, holder); } } catch (Exception e) { memoryController.release(sizeInBytes); throw new ProducerException(e); } }
Example #18
Source File: FlinkLogProducer.java From aliyun-log-flink-connector with Apache License 2.0 | 4 votes |
@Override public void invoke(T value, Context context) { if (this.producer == null) { throw new IllegalStateException("Flink log producer has not been initialized yet!"); } RawLogGroup logGroup = schema.serialize(value); if (logGroup == null) { LOG.info("The serialized log group is null, will not send any data to log service"); return; } String shardHashKey = null; if (customPartitioner != null) { shardHashKey = customPartitioner.getHashKey(value); } List<LogItem> logs = new ArrayList<>(); for (RawLog rawLog : logGroup.getLogs()) { if (rawLog == null) { continue; } LogItem record = new LogItem(rawLog.getTime()); for (Map.Entry<String, String> kv : rawLog.getContents().entrySet()) { record.PushBack(kv.getKey(), kv.getValue()); } logs.add(record); } if (logs.isEmpty()) { return; } try { ListenableFuture<Result> future = producer.send(project, logstore, logGroup.getTopic(), logGroup.getSource(), shardHashKey, logs); Futures.addCallback(future, callback, executor); buffered.incrementAndGet(); } catch (InterruptedException | ProducerException e) { LOG.error("Error while sending logs", e); throw new RuntimeException(e); } }
Example #19
Source File: SlsOutputFormatTest.java From alibaba-flink-connectors with Apache License 2.0 | 4 votes |
@Test public void testCommit() throws ProducerException, InterruptedException, IOException { SlsRecordResolver<Row> serializationSchema = Mockito.mock(SlsRecordResolver.class); SlsOutputFormat<Row> outputFormat = new SlsOutputFormat( "", "", "", "test_project", "test_store", serializationSchema); LogProducer producer = Mockito.mock(LogProducer.class); LogProducerProvider producerProvider = Mockito.mock(LogProducerProvider.class); Mockito.when(producerProvider.getClient()).thenReturn(producer); SettableFuture future = SettableFuture.create(); // Use any() instead of anyString() because in Mockito 2.x, anyString() does not match null any more, // which may cause the test to fail. Mockito.when( producer.send( Mockito.eq("test_project"), Mockito.eq("test_store"), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.anyList())).thenReturn(future); ExecutorService executor = Executors.newSingleThreadExecutor(); Whitebox.setInternalState(outputFormat, "logProducerProvider", producerProvider); Whitebox.setInternalState( outputFormat, "sendFutureCallback", outputFormat.new SendFutureCallback()); Whitebox.setInternalState(outputFormat, "executor", executor); Whitebox.setInternalState(outputFormat, "latencyGauge", Mockito.mock(MetricUtils.LatencyGauge.class)); Whitebox.setInternalState(outputFormat, "outTps", Mockito.mock(Meter.class)); Row record = new Row(3); record.setField(0, 100); record.setField(1, 1000); record.setField(2, "test"); outputFormat.writeRecord(record); AtomicLong numSent = (AtomicLong) Whitebox.getInternalState(outputFormat, "numSent"); AtomicLong numCommitted = (AtomicLong) Whitebox.getInternalState(outputFormat, "numCommitted"); assertEquals(1, numSent.get()); assertEquals(0, numCommitted.get()); // trigger call back. future.set(new Result(true, null, 0)); // wait call back finished. executor.awaitTermination(1, TimeUnit.SECONDS); assertEquals(1, numSent.get()); assertEquals(1, numCommitted.get()); }