Java Code Examples for com.alibaba.otter.canal.protocol.Message#getId()
The following examples show how to use
com.alibaba.otter.canal.protocol.Message#getId() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CanalTCPConsumer.java From canal with Apache License 2.0 | 6 votes |
@Override public List<CommonMessage> getMessage(Long timeout, TimeUnit unit) { try { Message message = canalConnector.getWithoutAck(batchSize, timeout, unit); long batchId = message.getId(); currentBatchId = batchId; int size = message.getEntries().size(); if (batchId == -1 || size == 0) { return null; } else { return MessageUtil.convert(message); } } catch (Throwable e) { throw new RuntimeException(e); } }
Example 2
Source File: AbstractCanalClientTest.java From canal-1.1.3 with Apache License 2.0 | 5 votes |
protected void process() { int batchSize = 5 * 1024; while (running) { try { MDC.put("destination", destination); connector.connect(); connector.subscribe(); while (running) { Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据 long batchId = message.getId(); int size = message.getEntries().size(); if (batchId == -1 || size == 0) { // try { // Thread.sleep(1000); // } catch (InterruptedException e) { // } } else { printSummary(message, batchId, size); printEntry(message.getEntries()); } connector.ack(batchId); // 提交确认 // connector.rollback(batchId); // 处理失败, 回滚数据 } } catch (Exception e) { logger.error("process error!", e); } finally { connector.disconnect(); MDC.remove("destination"); } } }
Example 3
Source File: LockTest.java From canal-elasticsearch with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws InterruptedException { CanalConnector connector = CanalConnectors.newSingleConnector(new InetSocketAddress("127.0.0.1", 11111), "totoro", "", ""); connector.connect(); connector.subscribe(); int emptyTimes = 0; while (running) { Message message = connector.getWithoutAck(5 * 1024); if (message == null || message.getId() == -1L) { applyWait(emptyTimes++); } else { //logger.info(message.toString()); long messageId = message.getId(); System.out.println("消息id:" + messageId); Thread.sleep(1000); connector.rollback(); } } }
Example 4
Source File: AbstractCanalClientTest.java From canal with Apache License 2.0 | 5 votes |
protected void process() { int batchSize = 5 * 1024; while (running) { try { MDC.put("destination", destination); connector.connect(); connector.subscribe(); while (running) { Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据 long batchId = message.getId(); int size = message.getEntries().size(); if (batchId == -1 || size == 0) { // try { // Thread.sleep(1000); // } catch (InterruptedException e) { // } } else { printSummary(message, batchId, size); printEntry(message.getEntries()); } if (batchId != -1) { connector.ack(batchId); // 提交确认 // connector.rollback(batchId); // 处理失败, 回滚数据 } } } catch (Exception e) { logger.error("process error!", e); try { Thread.sleep(1000L); } catch (InterruptedException e1) { // ignore } } finally { connector.disconnect(); MDC.remove("destination"); } } }
Example 5
Source File: KafkaMessage.java From canal-1.1.3 with Apache License 2.0 | 4 votes |
public KafkaMessage(Message message, long offset){ super(message.getId()); BeanUtils.copyProperties(message, this); this.offset = offset; }
Example 6
Source File: CanalReader.java From DataLink with Apache License 2.0 | 4 votes |
private CanalReaderMsg doGet() throws InterruptedException { Message message; try { if (batchTimeout < 0) { message = canalServer.getWithoutAck(clientIdentity, batchSize); } else { // 超时控制 message = canalServer.getWithoutAck(clientIdentity, batchSize, batchTimeout, TimeUnit.MILLISECONDS); } } catch (ZkInterruptedException e) { throw new InterruptedException(); } if (message == null || message.getId() == -1L) { latestReaderMsg = null; return null; } else { // 获取第一个的entry时间,包括被过滤的数据 // 获取该批次数据对应的binlog日志大小 long payloadSize = 0; String logFileName = null; long logFileOffset = 0; if (!org.springframework.util.CollectionUtils.isEmpty(message.getEntries())) { payloadSize = message.getEntries().stream().mapToLong(i -> i.getHeader().getEventLength()).summaryStatistics().getSum(); logFileName = message.getEntries().get(0).getHeader().getLogfileName(); logFileOffset = message.getEntries().get(0).getHeader().getLogfileOffset(); } latestReaderMsg = new CanalReaderMsg( message.getId(), logFileName, logFileOffset, calcFirstEntryTime(message), payloadSize, messageParser.parse(message.getEntries(), context) .stream() .filter(r -> !parameter.getFilteredEventTypes().contains(r.getEventType())) .collect(Collectors.toList()) ); if (parameter.isDump()) { latestReaderMsg.getMetaData().put(MESSAGE_KEY, message); } return latestReaderMsg; } }
Example 7
Source File: SimpleCanalClientExample.java From DBus with Apache License 2.0 | 4 votes |
public static void main(String args[]) { //args = new String[]{"vdbus-4", "10000", "mysql_db2"}; args = new String[]{"vdbus-4", "10000", "mysql_db2"}; if (args.length != 3) { System.out.println("args: dbus-n1 11111 testdb"); return; } String ip = args[0]; int port = Integer.parseInt(args[1]); String dbname = args[2]; // 创建链接 CanalConnector connector = null; int batchSize = 1000; int emptyCount = 0; try { connector = CanalConnectors.newSingleConnector(new InetSocketAddress(ip, port), dbname, "", ""); //connector = CanalConnectors.newClusterConnector("vdbus-7:2181/DBus/Canal/mysql_db1", dbname, "", ""); connector.connect(); connector.subscribe(""); connector.rollback(); int totalEmtryCount = 120; while (emptyCount < totalEmtryCount) { Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据 long batchId = message.getId(); int size = message.getEntries().size(); if (batchId == -1 || size == 0) { emptyCount++; System.out.print("."); try { Thread.sleep(1000); } catch (InterruptedException e) { } } else { emptyCount = 0; // System.out.printf("message[batchId=%s,size=%s] \n", batchId, size); System.out.println(""); printEntry(message.getEntries(), batchId); } connector.ack(batchId); // 提交确认 // connector.rollback(batchId); // 处理失败, 回滚数据 } System.out.println("empty too many times, exit"); } finally { if (connector != null) { connector.disconnect(); } } }
Example 8
Source File: CanalClient.java From flume-canal-source with Apache License 2.0 | 4 votes |
public Message fetchRows(int batchSize) { Message message = this.canalConnector.getWithoutAck(batchSize); long batchId = message.getId(); int size = message.getEntries().size(); if (batchId == -1 || size == 0) { // LOGGER.info("batch - {} 没有获取到数据", batchId); return null; } else { LOGGER.info("batch - {} data fetched successful, size is {}", batchId, size); return message; } }
Example 9
Source File: SelectorTask.java From canal-elasticsearch with Apache License 2.0 | 4 votes |
@Override public void run() { running = true; totoroSelector.start(); totoroSelector.rollback(); logger.info("Selector task start ......."); Message message; rollBack.set(true); while (running) { try { //出现回滚立即停止 message = totoroSelector.selector(); /** * 当前处理回滚的调度模型,可以保证在消费端出错的时候、正确处理回滚,并正确应答和继续消费数据。 * * 当发生回滚时,首先 consumer task 会将 rollback 设置为true ,自己停止工作,等待唤醒 * 然后 trans task 也会同样挂起 * channel会拒绝接受 message 和 future ,对于已经提交的 future 会尝试取消 * * 到此 除了 selector task 以外 的所有线程 全部尽最大努力去停止处理消息,但注意此时还没有回滚 * * 因为 selector 是循环获取数据,每次循环都会判断 rollback 状态,一旦发现rollback状态,跳出循环 * 返回到 selector task里面 ,task会感知到回滚状态 ,清空渠道中的消息 ,并回滚 至最后一个未应答的 * 消费点,然后丢弃本条消息,重新获取一次消息(回滚的消息) * 当上面所有工作 都做完了,便完成了回滚 ,selector task 改变回滚状态,重新正常工作 * * 粗略测试结果 : 40000条数据,单机测试,当 batchId 能被2整除的时候回滚 * 在不真正消费数据的前提下(消费端直接应答),处理性能非常好 * */ if (rollBack.state() == false) { totoroSelector.rollback(); logger.info("The rollback happened =============> discard message , batchId :{}", message.getId()); //丢弃刚才的消息 message = totoroSelector.selector(); channel.clearMessage(); rollBack.set(true); } long batchId = message.getId(); int size = message.getEntries().size(); if (batchId == -1 || size == 0) { message = null;//help gc } else { logger.info("Put message into channel =====> batchId :{}", message.getId()); if (logger.isDebugEnabled()) { printSummary(message, batchId, size); printEntry(message.getEntries()); } //将消息放入管道 channel.putMessage(message); } } catch (InterruptedException e) { logger.error("Selector task has been interrupted ", e); running = false; break; } } }
Example 10
Source File: CanalEmbedSelector.java From canal-elasticsearch with Apache License 2.0 | 4 votes |
@Override public Message selector() throws InterruptedException { if (!running) { throw new RuntimeException("CanalEmbedSelector has benn not start"); } Message message = null; int emptyTimes = 0; if (batchTimeout < 0) { while (running) { message = connector.getWithoutAck(batchSize); if (message == null || message.getId() == -1L) { if (rollBack.state() == false) { break; } else { applyWait(emptyTimes++); } } else { break; } } if (!running) { throw new InterruptedException(); } } else { while (running) { message = connector.getWithoutAck(batchSize, batchTimeout, TimeUnit.SECONDS); if (message == null || message.getId() == -1L) { continue; } else { break; } } if (!running) { throw new InterruptedException(); } } return message; }
Example 11
Source File: CanalByteSource.java From pulsar with Apache License 2.0 | 4 votes |
@Override public Long getMessageId(Message message) { return message.getId(); }
Example 12
Source File: CanalStringSource.java From pulsar with Apache License 2.0 | 4 votes |
@Override public Long getMessageId(Message message) { this.messageId = message.getId(); return this.messageId; }
Example 13
Source File: KafkaMessage.java From canal with Apache License 2.0 | 4 votes |
public KafkaMessage(Message message, long offset){ super(message.getId()); BeanUtils.copyProperties(message, this); this.offset = offset; }