Java Code Examples for io.prestosql.spi.HostAddress#fromParts()
The following examples show how to use
io.prestosql.spi.HostAddress#fromParts() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSystemSplit.java From presto with Apache License 2.0 | 5 votes |
@Test public void testSerialization() { SystemSplit expected = new SystemSplit(HostAddress.fromParts("127.0.0.1", 0), TupleDomain.all()); JsonCodec<SystemSplit> codec = jsonCodec(SystemSplit.class); SystemSplit actual = codec.fromJson(codec.toJson(expected)); assertEquals(actual.getAddresses(), expected.getAddresses()); assertEquals(actual.getConstraint(), expected.getConstraint()); }
Example 2
Source File: TestAtopSplit.java From presto with Apache License 2.0 | 5 votes |
@Test public void testSerialization() { JsonCodec<AtopSplit> codec = JsonCodec.jsonCodec(AtopSplit.class); ZonedDateTime now = ZonedDateTime.now(ZoneId.of("+01:23")); AtopSplit split = new AtopSplit(HostAddress.fromParts("localhost", 123), now.toEpochSecond(), now.getZone()); AtopSplit decoded = codec.fromJson(codec.toJson(split)); assertEquals(decoded.getHost(), split.getHost()); assertEquals(decoded.getDate(), split.getDate()); assertEquals(decoded.getEpochSeconds(), split.getEpochSeconds()); assertEquals(decoded.getTimeZone(), split.getTimeZone()); }
Example 3
Source File: KafkaSplitManager.java From presto with Apache License 2.0 | 4 votes |
@Override public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle table, SplitSchedulingStrategy splitSchedulingStrategy) { KafkaTableHandle kafkaTableHandle = (KafkaTableHandle) table; try (KafkaConsumer<byte[], byte[]> kafkaConsumer = consumerFactory.create()) { List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(kafkaTableHandle.getTopicName()); List<TopicPartition> topicPartitions = partitionInfos.stream() .map(KafkaSplitManager::toTopicPartition) .collect(toImmutableList()); Map<TopicPartition, Long> partitionBeginOffsets = kafkaConsumer.beginningOffsets(topicPartitions); Map<TopicPartition, Long> partitionEndOffsets = kafkaConsumer.endOffsets(topicPartitions); ImmutableList.Builder<KafkaSplit> splits = ImmutableList.builder(); Optional<String> keyDataSchemaContents = kafkaTableHandle.getKeyDataSchemaLocation() .map(KafkaSplitManager::readSchema); Optional<String> messageDataSchemaContents = kafkaTableHandle.getMessageDataSchemaLocation() .map(KafkaSplitManager::readSchema); for (PartitionInfo partitionInfo : partitionInfos) { TopicPartition topicPartition = toTopicPartition(partitionInfo); HostAddress leader = HostAddress.fromParts(partitionInfo.leader().host(), partitionInfo.leader().port()); new Range(partitionBeginOffsets.get(topicPartition), partitionEndOffsets.get(topicPartition)) .partition(messagesPerSplit).stream() .map(range -> new KafkaSplit( kafkaTableHandle.getTopicName(), kafkaTableHandle.getKeyDataFormat(), kafkaTableHandle.getMessageDataFormat(), keyDataSchemaContents, messageDataSchemaContents, partitionInfo.partition(), range, leader)) .forEach(splits::add); } return new FixedSplitSource(splits.build()); } catch (Exception e) { // Catch all exceptions because Kafka library is written in scala and checked exceptions are not declared in method signature. if (e instanceof PrestoException) { throw e; } throw new PrestoException(KAFKA_SPLIT_ERROR, format("Cannot list splits for table '%s' reading topic '%s'", kafkaTableHandle.getTableName(), kafkaTableHandle.getTopicName()), e); } }
Example 4
Source File: PrestoThriftHostAddress.java From presto with Apache License 2.0 | 4 votes |
public HostAddress toHostAddress() { return HostAddress.fromParts(getHost(), getPort()); }
Example 5
Source File: BenchmarkNodeScheduler.java From presto with Apache License 2.0 | 4 votes |
private static HostAddress addressForHost(int host) { int rack = Integer.hashCode(host) % RACKS; return HostAddress.fromParts("host" + host + ".rack" + rack, 1); }