io.airlift.json.JsonCodec Java Examples
The following examples show how to use
io.airlift.json.JsonCodec.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestSignature.java From presto with Apache License 2.0 | 6 votes |
@Test public void testSerializationRoundTrip() { ObjectMapperProvider objectMapperProvider = new ObjectMapperProvider(); objectMapperProvider.setJsonDeserializers(ImmutableMap.of( Type.class, new TypeDeserializer(createTestMetadataManager()), TypeSignature.class, new TypeSignatureDeserializer())); JsonCodec<Signature> codec = new JsonCodecFactory(objectMapperProvider, true).jsonCodec(Signature.class); Signature expected = new Signature( "function", BIGINT.getTypeSignature(), ImmutableList.of(BOOLEAN.getTypeSignature(), DOUBLE.getTypeSignature(), VARCHAR.getTypeSignature())); String json = codec.toJson(expected); Signature actual = codec.fromJson(json); assertEquals(actual.getName(), expected.getName()); assertEquals(actual.getReturnType(), expected.getReturnType()); assertEquals(actual.getArgumentTypes(), expected.getArgumentTypes()); }
Example #2
Source File: TestResourceGroupsDao.java From presto with Apache License 2.0 | 6 votes |
@Test public void testExactMatchSelector() { H2ResourceGroupsDao dao = setup("exact_match_selector"); dao.createExactMatchSelectorsTable(); ResourceGroupId resourceGroupId1 = new ResourceGroupId(ImmutableList.of("global", "test", "user", "insert")); ResourceGroupId resourceGroupId2 = new ResourceGroupId(ImmutableList.of("global", "test", "user", "select")); JsonCodec<ResourceGroupId> codec = JsonCodec.jsonCodec(ResourceGroupId.class); dao.insertExactMatchSelector("test", "@test@test_pipeline", INSERT.name(), codec.toJson(resourceGroupId1)); dao.insertExactMatchSelector("test", "@test@test_pipeline", SELECT.name(), codec.toJson(resourceGroupId2)); assertNull(dao.getExactMatchResourceGroup("test", "@test@test_pipeline", null)); assertEquals(dao.getExactMatchResourceGroup("test", "@test@test_pipeline", INSERT.name()), codec.toJson(resourceGroupId1)); assertEquals(dao.getExactMatchResourceGroup("test", "@test@test_pipeline", SELECT.name()), codec.toJson(resourceGroupId2)); assertNull(dao.getExactMatchResourceGroup("test", "@test@test_pipeline", DELETE.name())); assertNull(dao.getExactMatchResourceGroup("test", "abc", INSERT.name())); assertNull(dao.getExactMatchResourceGroup("prod", "@test@test_pipeline", INSERT.name())); }
Example #3
Source File: S3TableConfigClient.java From presto with Apache License 2.0 | 6 votes |
@Inject public S3TableConfigClient( KinesisConfig connectorConfig, KinesisClientProvider clientManager, JsonCodec<KinesisStreamDescription> jsonCodec) { requireNonNull(connectorConfig, "connectorConfig is null"); this.clientManager = requireNonNull(clientManager, "clientManager is null"); this.streamDescriptionCodec = requireNonNull(jsonCodec, "jsonCodec is null"); // If using S3 start thread that periodically looks for updates if (connectorConfig.getTableDescriptionLocation().startsWith("s3://")) { this.bucketUrl = Optional.of(connectorConfig.getTableDescriptionLocation()); } else { this.bucketUrl = Optional.empty(); } }
Example #4
Source File: JsonResponse.java From presto with Apache License 2.0 | 6 votes |
private JsonResponse(int statusCode, String statusMessage, Headers headers, String responseBody, JsonCodec<T> jsonCodec) { this.statusCode = statusCode; this.statusMessage = statusMessage; this.headers = requireNonNull(headers, "headers is null"); this.responseBody = requireNonNull(responseBody, "responseBody is null"); T value = null; IllegalArgumentException exception = null; try { value = jsonCodec.fromJson(responseBody); } catch (IllegalArgumentException e) { exception = new IllegalArgumentException(format("Unable to create %s from JSON response:\n[%s]", jsonCodec.getType(), responseBody), e); } this.hasValue = (exception == null); this.value = value; this.exception = exception; }
Example #5
Source File: ContinuousTaskStatusFetcher.java From presto with Apache License 2.0 | 6 votes |
public ContinuousTaskStatusFetcher( Consumer<Throwable> onFail, TaskStatus initialTaskStatus, Duration refreshMaxWait, JsonCodec<TaskStatus> taskStatusCodec, Executor executor, HttpClient httpClient, Duration maxErrorDuration, ScheduledExecutorService errorScheduledExecutor, RemoteTaskStats stats) { requireNonNull(initialTaskStatus, "initialTaskStatus is null"); this.taskId = initialTaskStatus.getTaskId(); this.onFail = requireNonNull(onFail, "onFail is null"); this.taskStatus = new StateMachine<>("task-" + taskId, executor, initialTaskStatus); this.refreshMaxWait = requireNonNull(refreshMaxWait, "refreshMaxWait is null"); this.taskStatusCodec = requireNonNull(taskStatusCodec, "taskStatusCodec is null"); this.executor = requireNonNull(executor, "executor is null"); this.httpClient = requireNonNull(httpClient, "httpClient is null"); this.errorTracker = new RequestErrorTracker(taskId, initialTaskStatus.getSelf(), maxErrorDuration, errorScheduledExecutor, "getting task status"); this.stats = requireNonNull(stats, "stats is null"); }
Example #6
Source File: RedisTestUtils.java From presto with Apache License 2.0 | 6 votes |
public static Map.Entry<SchemaTableName, RedisTableDescription> loadTpchTableDescription( JsonCodec<RedisTableDescription> tableDescriptionJsonCodec, SchemaTableName schemaTableName, String dataFormat) throws IOException { RedisTableDescription tpchTemplate; try (InputStream data = RedisTestUtils.class.getResourceAsStream(format("/tpch/%s/%s.json", dataFormat, schemaTableName.getTableName()))) { tpchTemplate = tableDescriptionJsonCodec.fromJson(ByteStreams.toByteArray(data)); } RedisTableDescription tableDescription = new RedisTableDescription( schemaTableName.getTableName(), schemaTableName.getSchemaName(), tpchTemplate.getKey(), tpchTemplate.getValue()); return new AbstractMap.SimpleImmutableEntry<>(schemaTableName, tableDescription); }
Example #7
Source File: QueryMonitor.java From presto with Apache License 2.0 | 6 votes |
@Inject public QueryMonitor( JsonCodec<StageInfo> stageInfoCodec, JsonCodec<OperatorStats> operatorStatsCodec, JsonCodec<ExecutionFailureInfo> executionFailureInfoCodec, JsonCodec<StatsAndCosts> statsAndCostsCodec, EventListenerManager eventListenerManager, NodeInfo nodeInfo, NodeVersion nodeVersion, SessionPropertyManager sessionPropertyManager, Metadata metadata, QueryMonitorConfig config) { this.eventListenerManager = requireNonNull(eventListenerManager, "eventListenerManager is null"); this.stageInfoCodec = requireNonNull(stageInfoCodec, "stageInfoCodec is null"); this.operatorStatsCodec = requireNonNull(operatorStatsCodec, "operatorStatsCodec is null"); this.statsAndCostsCodec = requireNonNull(statsAndCostsCodec, "statsAndCostsCodec is null"); this.executionFailureInfoCodec = requireNonNull(executionFailureInfoCodec, "executionFailureInfoCodec is null"); this.serverVersion = requireNonNull(nodeVersion, "nodeVersion is null").toString(); this.serverAddress = requireNonNull(nodeInfo, "nodeInfo is null").getExternalAddress(); this.environment = requireNonNull(nodeInfo, "nodeInfo is null").getEnvironment(); this.sessionPropertyManager = requireNonNull(sessionPropertyManager, "sessionPropertyManager is null"); this.metadata = requireNonNull(metadata, "metadata is null"); this.maxJsonLimit = toIntExact(requireNonNull(config, "config is null").getMaxOutputStageJsonSize().toBytes()); }
Example #8
Source File: ExpressionInterpreter.java From presto with Apache License 2.0 | 6 votes |
private static Expression createFailureFunction(RuntimeException exception, Type type, Metadata metadata) { requireNonNull(exception, "Exception is null"); String failureInfo = JsonCodec.jsonCodec(FailureInfo.class).toJson(Failures.toFailure(exception).toFailureInfo()); FunctionCall jsonParse = new FunctionCallBuilder(metadata) .setName(QualifiedName.of("json_parse")) .addArgument(VARCHAR, new StringLiteral(failureInfo)) .build(); FunctionCall failureFunction = new FunctionCallBuilder(metadata) .setName(QualifiedName.of("fail")) .addArgument(JSON, jsonParse) .build(); return new Cast(failureFunction, toSqlType(type)); }
Example #9
Source File: FileHiveMetastore.java From presto with Apache License 2.0 | 6 votes |
private <T> Optional<T> readFile(String type, Path path, JsonCodec<T> codec) { try { if (!metadataFileSystem.isFile(path)) { return Optional.empty(); } try (FSDataInputStream inputStream = metadataFileSystem.open(path)) { byte[] json = ByteStreams.toByteArray(inputStream); return Optional.of(codec.fromJson(json)); } } catch (Exception e) { throw new PrestoException(HIVE_METASTORE_ERROR, "Could not read " + type, e); } }
Example #10
Source File: FileHiveMetastore.java From presto with Apache License 2.0 | 6 votes |
private <T> void writeFile(String type, Path path, JsonCodec<T> codec, T value, boolean overwrite) { try { byte[] json = codec.toJsonBytes(value); if (!overwrite) { if (metadataFileSystem.exists(path)) { throw new PrestoException(HIVE_METASTORE_ERROR, type + " file already exists"); } } metadataFileSystem.mkdirs(path.getParent()); // todo implement safer overwrite code try (OutputStream outputStream = metadataFileSystem.create(path, overwrite)) { outputStream.write(json); } } catch (Exception e) { throw new PrestoException(HIVE_METASTORE_ERROR, "Could not write " + type, e); } }
Example #11
Source File: TestHiveColumnHandle.java From presto with Apache License 2.0 | 6 votes |
private void testRoundTrip(HiveColumnHandle expected) { ObjectMapperProvider objectMapperProvider = new ObjectMapperProvider(); objectMapperProvider.setJsonDeserializers(ImmutableMap.of(Type.class, new HiveModule.TypeDeserializer(new InternalTypeManager(createTestMetadataManager())))); JsonCodec<HiveColumnHandle> codec = new JsonCodecFactory(objectMapperProvider).jsonCodec(HiveColumnHandle.class); String json = codec.toJson(expected); HiveColumnHandle actual = codec.fromJson(json); assertEquals(actual.getBaseColumnName(), expected.getBaseColumnName()); assertEquals(actual.getBaseHiveColumnIndex(), expected.getBaseHiveColumnIndex()); assertEquals(actual.getBaseType(), expected.getBaseType()); assertEquals(actual.getBaseHiveType(), expected.getBaseHiveType()); assertEquals(actual.getName(), expected.getName()); assertEquals(actual.getType(), expected.getType()); assertEquals(actual.getHiveType(), expected.getHiveType()); assertEquals(actual.getHiveColumnProjectionInfo(), expected.getHiveColumnProjectionInfo()); assertEquals(actual.isPartitionKey(), expected.isPartitionKey()); }
Example #12
Source File: PinotClient.java From presto with Apache License 2.0 | 5 votes |
private <T> T sendHttpGetToBrokerJson(String table, String path, JsonCodec<T> codec) { return doHttpActionWithHeadersJson( Request.builder().prepareGet().setUri(URI.create(format("http://%s/%s", getBrokerHost(table), path))), Optional.empty(), codec); }
Example #13
Source File: IcebergMetadataFactory.java From presto with Apache License 2.0 | 5 votes |
public IcebergMetadataFactory( HiveMetastore metastore, HdfsEnvironment hdfsEnvironment, TypeManager typeManager, JsonCodec<CommitTaskData> commitTaskCodec, long metastoreTransactionCacheSize) { this.metastore = requireNonNull(metastore, "metastore is null"); this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null"); this.typeManager = requireNonNull(typeManager, "typeManager is null"); this.commitTaskCodec = requireNonNull(commitTaskCodec, "commitTaskCodec is null"); this.metastoreTransactionCacheSize = metastoreTransactionCacheSize; }
Example #14
Source File: TestHivePageSink.java From presto with Apache License 2.0 | 5 votes |
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveConfig config, HiveMetastore metastore, Path outputPath, HiveWriterStats stats) { ConnectorSession session = getHiveSession(config); HiveIdentity identity = new HiveIdentity(session); LocationHandle locationHandle = new LocationHandle(outputPath, outputPath, false, DIRECT_TO_TARGET_NEW_DIRECTORY); HiveOutputTableHandle handle = new HiveOutputTableHandle( SCHEMA_NAME, TABLE_NAME, getColumnHandles(), new HivePageSinkMetadata(new SchemaTableName(SCHEMA_NAME, TABLE_NAME), metastore.getTable(identity, SCHEMA_NAME, TABLE_NAME), ImmutableMap.of()), locationHandle, config.getHiveStorageFormat(), config.getHiveStorageFormat(), ImmutableList.of(), Optional.empty(), "test", ImmutableMap.of(), false); JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); HivePageSinkProvider provider = new HivePageSinkProvider( getDefaultHiveFileWriterFactories(config, HDFS_ENVIRONMENT), HDFS_ENVIRONMENT, PAGE_SORTER, metastore, new GroupByHashPageIndexerFactory(new JoinCompiler(createTestMetadataManager())), TYPE_MANAGER, config, new HiveLocationService(HDFS_ENVIRONMENT), partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), stats); return provider.createPageSink(transaction, getHiveSession(config), handle); }
Example #15
Source File: S3TableConfigClient.java From presto-kinesis with Apache License 2.0 | 5 votes |
@Inject public S3TableConfigClient(KinesisConnectorConfig aConnectorConfig, KinesisClientProvider aClientManager, JsonCodec<KinesisStreamDescription> jsonCodec) { this.kinesisConnectorConfig = requireNonNull(aConnectorConfig, "connector configuration object is null"); this.clientManager = requireNonNull(aClientManager, "client manager object is null"); this.streamDescriptionCodec = requireNonNull(jsonCodec, "JSON codec object is null"); // If using S3 start thread that periodically looks for updates this.bucketUrl = this.kinesisConnectorConfig.getTableDescriptionsS3(); if (!this.bucketUrl.isEmpty()) { startS3Updates(); } }
Example #16
Source File: TestPinotTableHandle.java From presto with Apache License 2.0 | 5 votes |
@Test public void testJsonRoundTrip() { JsonCodec<PinotTableHandle> codec = jsonCodec(PinotTableHandle.class); String json = codec.toJson(tableHandle); PinotTableHandle copy = codec.fromJson(json); assertEquals(copy, tableHandle); }
Example #17
Source File: PinotClient.java From presto with Apache License 2.0 | 5 votes |
protected <T> T doHttpActionWithHeadersJson(Request.Builder requestBuilder, Optional<String> requestBody, JsonCodec<T> codec) { requestBuilder.setHeader(HttpHeaders.ACCEPT, APPLICATION_JSON); if (requestBody.isPresent()) { requestBuilder.setHeader(HttpHeaders.CONTENT_TYPE, APPLICATION_JSON); requestBuilder.setBodyGenerator(StaticBodyGenerator.createStaticBodyGenerator(requestBody.get(), StandardCharsets.UTF_8)); } Request request = requestBuilder.build(); JsonResponseHandler<T> responseHandler = createJsonResponseHandler(codec); long startTime = ticker.read(); long duration; T response = null; try { response = httpClient.execute(request, responseHandler); } catch (UnexpectedResponseException e) { throw new PinotException( PinotErrorCode.PINOT_HTTP_ERROR, Optional.empty(), format( "Unexpected response status: %d for request %s to url %s, with headers %s, full response %s", e.getStatusCode(), requestBody.orElse(""), request.getUri(), request.getHeaders(), response)); } finally { duration = ticker.read() - startTime; } //metrics.monitorRequest(request, response, duration, TimeUnit.NANOSECONDS); return response; }
Example #18
Source File: PinotClient.java From presto with Apache License 2.0 | 5 votes |
@Inject public PinotClient( PinotConfig config, PinotMetrics metrics, @ForPinot HttpClient httpClient, JsonCodec<GetTables> tablesJsonCodec, JsonCodec<BrokersForTable> brokersForTableJsonCodec, JsonCodec<TimeBoundary> timeBoundaryJsonCodec, JsonCodec<BrokerResponseNative> brokerResponseCodec) { this.brokersForTableJsonCodec = requireNonNull(brokersForTableJsonCodec, "brokers for table json codec is null"); this.timeBoundaryJsonCodec = requireNonNull(timeBoundaryJsonCodec, "time boundary json codec is null"); this.tablesJsonCodec = requireNonNull(tablesJsonCodec, "json codec is null"); this.schemaJsonCodec = new JsonCodecFactory(() -> new ObjectMapper() .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)).jsonCodec(Schema.class); this.brokerResponseCodec = requireNonNull(brokerResponseCodec, "brokerResponseCodec is null"); requireNonNull(config, "config is null"); if (config.getControllerUrls() == null || config.getControllerUrls().isEmpty()) { throw new PinotException(PINOT_INVALID_CONFIGURATION, Optional.empty(), "No pinot controllers specified"); } this.controllerUrls = config.getControllerUrls(); this.metrics = requireNonNull(metrics, "metrics is null"); this.httpClient = requireNonNull(httpClient, "httpClient is null"); this.brokersForTableCache = CacheBuilder.newBuilder() .expireAfterWrite(config.getMetadataCacheExpiry().roundTo(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS) .build((CacheLoader.from(this::getAllBrokersForTable))); }
Example #19
Source File: TestAtopSplit.java From presto with Apache License 2.0 | 5 votes |
@Test public void testSerialization() { JsonCodec<AtopSplit> codec = JsonCodec.jsonCodec(AtopSplit.class); ZonedDateTime now = ZonedDateTime.now(ZoneId.of("+01:23")); AtopSplit split = new AtopSplit(HostAddress.fromParts("localhost", 123), now.toEpochSecond(), now.getZone()); AtopSplit decoded = codec.fromJson(codec.toJson(split)); assertEquals(decoded.getHost(), split.getHost()); assertEquals(decoded.getDate(), split.getDate()); assertEquals(decoded.getEpochSeconds(), split.getEpochSeconds()); assertEquals(decoded.getTimeZone(), split.getTimeZone()); }
Example #20
Source File: PinotClient.java From presto with Apache License 2.0 | 5 votes |
private <T> T sendHttpGetToControllerJson(String path, JsonCodec<T> codec) { return doHttpActionWithHeadersJson( Request.builder().prepareGet().setUri(URI.create(format("http://%s/%s", getControllerUrl(), path))), Optional.empty(), codec); }
Example #21
Source File: CassandraMetadata.java From presto with Apache License 2.0 | 5 votes |
@Inject public CassandraMetadata( CassandraSession cassandraSession, CassandraPartitionManager partitionManager, JsonCodec<List<ExtraColumnMetadata>> extraColumnMetadataCodec, CassandraClientConfig config) { this.partitionManager = requireNonNull(partitionManager, "partitionManager is null"); this.cassandraSession = requireNonNull(cassandraSession, "cassandraSession is null"); this.allowDropTable = requireNonNull(config, "config is null").getAllowDropTable(); this.extraColumnMetadataCodec = requireNonNull(extraColumnMetadataCodec, "extraColumnMetadataCodec is null"); }
Example #22
Source File: KinesisTableDescriptionSupplier.java From presto with Apache License 2.0 | 5 votes |
@Inject public KinesisTableDescriptionSupplier( KinesisConfig kinesisConfig, JsonCodec<KinesisStreamDescription> streamDescriptionCodec, S3TableConfigClient s3TableConfigClient) { this.kinesisConfig = requireNonNull(kinesisConfig, "kinesisConfig is null"); this.streamDescriptionCodec = requireNonNull(streamDescriptionCodec, "streamDescriptionCodec is null"); this.s3TableConfigClient = requireNonNull(s3TableConfigClient, "S3 table config client is null"); }
Example #23
Source File: TestQueryProgressStats.java From presto with Apache License 2.0 | 5 votes |
@Test public void testJson() { QueryProgressStats expected = new QueryProgressStats( 123456, 1111, 22222, 3333, 100000, 34230492, 1000, 100000, false, OptionalDouble.of(33.33)); JsonCodec<QueryProgressStats> codec = JsonCodec.jsonCodec(QueryProgressStats.class); String json = codec.toJson(expected); QueryProgressStats actual = codec.fromJson(json); assertEquals(actual.getElapsedTimeMillis(), 123456); assertEquals(actual.getQueuedTimeMillis(), 1111); assertEquals(actual.getCpuTimeMillis(), 22222); assertEquals(actual.getScheduledTimeMillis(), 3333); assertEquals(actual.getCurrentMemoryBytes(), 100000); assertEquals(actual.getPeakMemoryBytes(), 34230492); assertEquals(actual.getInputRows(), 1000); assertEquals(actual.getInputBytes(), 100000); assertFalse(actual.isBlocked()); assertEquals(actual.getProgressPercentage(), OptionalDouble.of(33.33)); }
Example #24
Source File: TestLocalFileSplit.java From presto with Apache License 2.0 | 5 votes |
@Test public void testJsonRoundTrip() { JsonCodec<LocalFileSplit> codec = jsonCodec(LocalFileSplit.class); String json = codec.toJson(split); LocalFileSplit copy = codec.fromJson(json); assertEquals(copy.getAddress(), split.getAddress()); assertEquals(copy.getAddresses(), ImmutableList.of(address)); assertEquals(copy.isRemotelyAccessible(), false); }
Example #25
Source File: IcebergPageSink.java From presto with Apache License 2.0 | 5 votes |
public IcebergPageSink( Schema outputSchema, PartitionSpec partitionSpec, String outputPath, IcebergFileWriterFactory fileWriterFactory, PageIndexerFactory pageIndexerFactory, HdfsEnvironment hdfsEnvironment, HdfsContext hdfsContext, List<IcebergColumnHandle> inputColumns, JsonCodec<CommitTaskData> jsonCodec, ConnectorSession session, FileFormat fileFormat) { requireNonNull(inputColumns, "inputColumns is null"); this.outputSchema = requireNonNull(outputSchema, "outputSchema is null"); this.partitionSpec = requireNonNull(partitionSpec, "partitionSpec is null"); this.outputPath = requireNonNull(outputPath, "outputPath is null"); this.fileWriterFactory = requireNonNull(fileWriterFactory, "fileWriterFactory is null"); this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null"); requireNonNull(hdfsContext, "hdfsContext is null"); this.jobConf = toJobConf(hdfsEnvironment.getConfiguration(hdfsContext, new Path(outputPath))); this.jsonCodec = requireNonNull(jsonCodec, "jsonCodec is null"); this.session = requireNonNull(session, "session is null"); this.fileFormat = requireNonNull(fileFormat, "fileFormat is null"); this.inputColumns = ImmutableList.copyOf(inputColumns); this.pagePartitioner = new PagePartitioner(pageIndexerFactory, toPartitionColumns(inputColumns, partitionSpec)); }
Example #26
Source File: TestQueryResource.java From presto with Apache License 2.0 | 5 votes |
private QueryInfo getQueryInfo(String queryId) { URI uri = uriBuilderFrom(server.getBaseUrl()) .replacePath("/v1/query") .appendPath(queryId) .addParameter("pretty", "true") .build(); Request request = prepareGet() .setUri(uri) .setHeader(PRESTO_USER, "unknown") .build(); JsonCodec<QueryInfo> codec = server.getInstance(Key.get(new TypeLiteral<JsonCodec<QueryInfo>>() {})); return client.execute(request, createJsonResponseHandler(codec)); }
Example #27
Source File: KafkaTableDescriptionSupplier.java From presto with Apache License 2.0 | 5 votes |
@Inject KafkaTableDescriptionSupplier(KafkaConfig kafkaConfig, JsonCodec<KafkaTopicDescription> topicDescriptionCodec) { this.topicDescriptionCodec = requireNonNull(topicDescriptionCodec, "topicDescriptionCodec is null"); requireNonNull(kafkaConfig, "kafkaConfig is null"); this.tableDescriptionDir = kafkaConfig.getTableDescriptionDir(); this.defaultSchema = kafkaConfig.getDefaultSchema(); this.tableNames = ImmutableSet.copyOf(kafkaConfig.getTableNames()); }
Example #28
Source File: TestResourceGroupId.java From presto with Apache License 2.0 | 5 votes |
@Test public void testCodec() { JsonCodec<ResourceGroupId> codec = JsonCodec.jsonCodec(ResourceGroupId.class); ResourceGroupId resourceGroupId = new ResourceGroupId(new ResourceGroupId("test.test"), "foo"); assertEquals(codec.fromJson(codec.toJson(resourceGroupId)), resourceGroupId); assertEquals(codec.toJson(resourceGroupId), "[ \"test.test\", \"foo\" ]"); assertEquals(codec.fromJson("[\"test.test\", \"foo\"]"), resourceGroupId); }
Example #29
Source File: IcebergPageSinkProvider.java From presto with Apache License 2.0 | 5 votes |
@Inject public IcebergPageSinkProvider( HdfsEnvironment hdfsEnvironment, JsonCodec<CommitTaskData> jsonCodec, IcebergFileWriterFactory fileWriterFactory, PageIndexerFactory pageIndexerFactory) { this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null"); this.jsonCodec = requireNonNull(jsonCodec, "jsonCodec is null"); this.fileWriterFactory = requireNonNull(fileWriterFactory, "fileWriterFactory is null"); this.pageIndexerFactory = requireNonNull(pageIndexerFactory, "pageIndexerFactory is null"); }
Example #30
Source File: IcebergMetadataFactory.java From presto with Apache License 2.0 | 5 votes |
@Inject public IcebergMetadataFactory( IcebergConfig config, HiveMetastore metastore, HdfsEnvironment hdfsEnvironment, TypeManager typeManager, JsonCodec<CommitTaskData> commitTaskDataJsonCodec) { this(metastore, hdfsEnvironment, typeManager, commitTaskDataJsonCodec, config.getMetastoreTransactionCacheSize()); }