Java Code Examples for io.airlift.json.JsonCodec#jsonCodec()
The following examples show how to use
io.airlift.json.JsonCodec#jsonCodec() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestResourceGroupsDao.java From presto with Apache License 2.0 | 6 votes |
@Test public void testExactMatchSelector() { H2ResourceGroupsDao dao = setup("exact_match_selector"); dao.createExactMatchSelectorsTable(); ResourceGroupId resourceGroupId1 = new ResourceGroupId(ImmutableList.of("global", "test", "user", "insert")); ResourceGroupId resourceGroupId2 = new ResourceGroupId(ImmutableList.of("global", "test", "user", "select")); JsonCodec<ResourceGroupId> codec = JsonCodec.jsonCodec(ResourceGroupId.class); dao.insertExactMatchSelector("test", "@test@test_pipeline", INSERT.name(), codec.toJson(resourceGroupId1)); dao.insertExactMatchSelector("test", "@test@test_pipeline", SELECT.name(), codec.toJson(resourceGroupId2)); assertNull(dao.getExactMatchResourceGroup("test", "@test@test_pipeline", null)); assertEquals(dao.getExactMatchResourceGroup("test", "@test@test_pipeline", INSERT.name()), codec.toJson(resourceGroupId1)); assertEquals(dao.getExactMatchResourceGroup("test", "@test@test_pipeline", SELECT.name()), codec.toJson(resourceGroupId2)); assertNull(dao.getExactMatchResourceGroup("test", "@test@test_pipeline", DELETE.name())); assertNull(dao.getExactMatchResourceGroup("test", "abc", INSERT.name())); assertNull(dao.getExactMatchResourceGroup("prod", "@test@test_pipeline", INSERT.name())); }
Example 2
Source File: TestPipelineStats.java From presto with Apache License 2.0 | 5 votes |
@Test public void testJson() { JsonCodec<PipelineStats> codec = JsonCodec.jsonCodec(PipelineStats.class); String json = codec.toJson(EXPECTED); PipelineStats actual = codec.fromJson(json); assertExpectedPipelineStats(actual); }
Example 3
Source File: TestOperatorStats.java From presto with Apache License 2.0 | 5 votes |
@Test public void testJson() { JsonCodec<OperatorStats> codec = JsonCodec.jsonCodec(OperatorStats.class); String json = codec.toJson(EXPECTED); OperatorStats actual = codec.fromJson(json); assertExpectedOperatorStats(actual); }
Example 4
Source File: TestTaskStats.java From presto with Apache License 2.0 | 5 votes |
@Test public void testJson() { JsonCodec<TaskStats> codec = JsonCodec.jsonCodec(TaskStats.class); String json = codec.toJson(EXPECTED); TaskStats actual = codec.fromJson(json); assertExpectedTaskStats(actual); }
Example 5
Source File: TestDriverStats.java From presto with Apache License 2.0 | 5 votes |
@Test public void testJson() { JsonCodec<DriverStats> codec = JsonCodec.jsonCodec(DriverStats.class); String json = codec.toJson(EXPECTED); DriverStats actual = codec.fromJson(json); assertExpectedDriverStats(actual); }
Example 6
Source File: TestStageStats.java From presto with Apache License 2.0 | 5 votes |
@Test public void testJson() { JsonCodec<StageStats> codec = JsonCodec.jsonCodec(StageStats.class); String json = codec.toJson(EXPECTED); StageStats actual = codec.fromJson(json); assertExpectedStageStats(actual); }
Example 7
Source File: TestQueryStats.java From presto with Apache License 2.0 | 5 votes |
@Test public void testJson() { JsonCodec<QueryStats> codec = JsonCodec.jsonCodec(QueryStats.class); String json = codec.toJson(EXPECTED); QueryStats actual = codec.fromJson(json); assertExpectedQueryStats(actual); }
Example 8
Source File: TestQueryProgressStats.java From presto with Apache License 2.0 | 5 votes |
@Test public void testJson() { QueryProgressStats expected = new QueryProgressStats( 123456, 1111, 22222, 3333, 100000, 34230492, 1000, 100000, false, OptionalDouble.of(33.33)); JsonCodec<QueryProgressStats> codec = JsonCodec.jsonCodec(QueryProgressStats.class); String json = codec.toJson(expected); QueryProgressStats actual = codec.fromJson(json); assertEquals(actual.getElapsedTimeMillis(), 123456); assertEquals(actual.getQueuedTimeMillis(), 1111); assertEquals(actual.getCpuTimeMillis(), 22222); assertEquals(actual.getScheduledTimeMillis(), 3333); assertEquals(actual.getCurrentMemoryBytes(), 100000); assertEquals(actual.getPeakMemoryBytes(), 34230492); assertEquals(actual.getInputRows(), 1000); assertEquals(actual.getInputBytes(), 100000); assertFalse(actual.isBlocked()); assertEquals(actual.getProgressPercentage(), OptionalDouble.of(33.33)); }
Example 9
Source File: TestStatisticAggregationsDescriptor.java From presto with Apache License 2.0 | 5 votes |
@Test public void testSerializationRoundTrip() { JsonCodec<StatisticAggregationsDescriptor<Symbol>> codec = JsonCodec.jsonCodec(new TypeToken<StatisticAggregationsDescriptor<Symbol>>() {}); assertSerializationRoundTrip(codec, StatisticAggregationsDescriptor.<Symbol>builder().build()); assertSerializationRoundTrip(codec, createTestDescriptor()); }
Example 10
Source File: TestResourceGroupId.java From presto with Apache License 2.0 | 5 votes |
@Test public void testCodec() { JsonCodec<ResourceGroupId> codec = JsonCodec.jsonCodec(ResourceGroupId.class); ResourceGroupId resourceGroupId = new ResourceGroupId(new ResourceGroupId("test.test"), "foo"); assertEquals(codec.fromJson(codec.toJson(resourceGroupId)), resourceGroupId); assertEquals(codec.toJson(resourceGroupId), "[ \"test.test\", \"foo\" ]"); assertEquals(codec.fromJson("[\"test.test\", \"foo\"]"), resourceGroupId); }
Example 11
Source File: TestHivePageSink.java From presto with Apache License 2.0 | 5 votes |
private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveConfig config, HiveMetastore metastore, Path outputPath, HiveWriterStats stats) { ConnectorSession session = getHiveSession(config); HiveIdentity identity = new HiveIdentity(session); LocationHandle locationHandle = new LocationHandle(outputPath, outputPath, false, DIRECT_TO_TARGET_NEW_DIRECTORY); HiveOutputTableHandle handle = new HiveOutputTableHandle( SCHEMA_NAME, TABLE_NAME, getColumnHandles(), new HivePageSinkMetadata(new SchemaTableName(SCHEMA_NAME, TABLE_NAME), metastore.getTable(identity, SCHEMA_NAME, TABLE_NAME), ImmutableMap.of()), locationHandle, config.getHiveStorageFormat(), config.getHiveStorageFormat(), ImmutableList.of(), Optional.empty(), "test", ImmutableMap.of(), false); JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); HivePageSinkProvider provider = new HivePageSinkProvider( getDefaultHiveFileWriterFactories(config, HDFS_ENVIRONMENT), HDFS_ENVIRONMENT, PAGE_SORTER, metastore, new GroupByHashPageIndexerFactory(new JoinCompiler(createTestMetadataManager())), TYPE_MANAGER, config, new HiveLocationService(HDFS_ENVIRONMENT), partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), stats); return provider.createPageSink(transaction, getHiveSession(config), handle); }
Example 12
Source File: TestAtopSplit.java From presto with Apache License 2.0 | 5 votes |
@Test public void testSerialization() { JsonCodec<AtopSplit> codec = JsonCodec.jsonCodec(AtopSplit.class); ZonedDateTime now = ZonedDateTime.now(ZoneId.of("+01:23")); AtopSplit split = new AtopSplit(HostAddress.fromParts("localhost", 123), now.toEpochSecond(), now.getZone()); AtopSplit decoded = codec.fromJson(codec.toJson(split)); assertEquals(decoded.getHost(), split.getHost()); assertEquals(decoded.getDate(), split.getDate()); assertEquals(decoded.getEpochSeconds(), split.getEpochSeconds()); assertEquals(decoded.getTimeZone(), split.getTimeZone()); }
Example 13
Source File: AbstractTestHiveFileSystem.java From presto with Apache License 2.0 | 4 votes |
protected void setup(String host, int port, String databaseName, boolean s3SelectPushdownEnabled, HdfsConfiguration hdfsConfiguration) { database = databaseName; table = new SchemaTableName(database, "presto_test_external_fs"); tableWithHeader = new SchemaTableName(database, "presto_test_external_fs_with_header"); tableWithHeaderAndFooter = new SchemaTableName(database, "presto_test_external_fs_with_header_and_footer"); String random = randomUUID().toString().toLowerCase(ENGLISH).replace("-", ""); temporaryCreateTable = new SchemaTableName(database, "tmp_presto_test_create_" + random); config = new HiveConfig().setS3SelectPushdownEnabled(s3SelectPushdownEnabled); Optional<HostAndPort> proxy = Optional.ofNullable(System.getProperty("hive.metastore.thrift.client.socks-proxy")) .map(HostAndPort::fromString); MetastoreLocator metastoreLocator = new TestingMetastoreLocator(proxy, HostAndPort.fromParts(host, port)); ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("hive-%s")); HivePartitionManager hivePartitionManager = new HivePartitionManager(config); hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, new HdfsConfig(), new NoHdfsAuthentication()); metastoreClient = new TestingHiveMetastore( new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, new HiveConfig(), new ThriftMetastoreConfig(), hdfsEnvironment, false)), executor, getBasePath(), hdfsEnvironment); locationService = new HiveLocationService(hdfsEnvironment); JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); metadataFactory = new HiveMetadataFactory( new CatalogName("hive"), config, metastoreClient, hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), heartbeatService, TYPE_MANAGER, locationService, partitionUpdateCodec, new HiveTypeTranslator(), new NodeVersion("test_version"), (metastore) -> new SqlStandardAccessControlMetadata(metastore)); transactionManager = new HiveTransactionManager(); splitManager = new HiveSplitManager( transactionHandle -> ((HiveMetadata) transactionManager.get(transactionHandle)).getMetastore(), hivePartitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HiveConfig()), new BoundedExecutor(executor, config.getMaxSplitIteratorThreads()), new HiveCoercionPolicy(TYPE_MANAGER), new CounterStat(), config.getMaxOutstandingSplits(), config.getMaxOutstandingSplitsSize(), config.getMinPartitionBatchSize(), config.getMaxPartitionBatchSize(), config.getMaxInitialSplits(), config.getSplitLoaderConcurrency(), config.getMaxSplitsPerSecond(), config.getRecursiveDirWalkerEnabled(), TYPE_MANAGER); pageSinkProvider = new HivePageSinkProvider( getDefaultHiveFileWriterFactories(config, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, metastoreClient, new GroupByHashPageIndexerFactory(new JoinCompiler(createTestMetadataManager())), TYPE_MANAGER, config, locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), new HiveWriterStats()); pageSourceProvider = new HivePageSourceProvider( TYPE_MANAGER, config, hdfsEnvironment, getDefaultHivePageSourceFactories(hdfsEnvironment), getDefaultHiveRecordCursorProviders(config, hdfsEnvironment), new GenericHiveRecordCursorProvider(hdfsEnvironment, config)); onSetupComplete(); }