org.apache.hadoop.hive.metastore.api.FieldSchema Java Examples
The following examples show how to use
org.apache.hadoop.hive.metastore.api.FieldSchema.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DynamoDBStorageHandlerTest.java From emr-dynamodb-connector with Apache License 2.0 | 6 votes |
@Test public void testCheckTableSchemaMappingMissingColumnMapping() throws MetaException { TableDescription description = getHashRangeTable(); Table table = new Table(); Map<String, String> parameters = Maps.newHashMap(); parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$," + "col2:dynamo_col2#,hashKey:hashKey,hashMap:hashMap"); table.setParameters(parameters); StorageDescriptor sd = new StorageDescriptor(); List<FieldSchema> cols = Lists.newArrayList(); cols.add(new FieldSchema("col1", "string", "")); cols.add(new FieldSchema("hashMap", "map<string,string>", "")); sd.setCols(cols); table.setSd(sd); exceptionRule.expect(MetaException.class); exceptionRule.expectMessage("Could not find column(s) for column mapping(s): "); exceptionRule.expectMessage("col2:dynamo_col2#"); exceptionRule.expectMessage("hashkey:hashKey"); storageHandler.checkTableSchemaMapping(description, table); }
Example #2
Source File: TestUtils.java From circus-train with Apache License 2.0 | 6 votes |
private static String expandHql( String database, String table, List<FieldSchema> dataColumns, List<FieldSchema> partitionColumns) { List<String> dataColumnNames = toQualifiedColumnNames(table, dataColumns); List<String> partitionColumnNames = partitionColumns != null ? toQualifiedColumnNames(table, partitionColumns) : ImmutableList.<String>of(); List<String> colNames = ImmutableList .<String>builder() .addAll(dataColumnNames) .addAll(partitionColumnNames) .build(); String cols = COMMA_JOINER.join(colNames); return String.format("SELECT %s FROM `%s`.`%s`", cols, database, table); }
Example #3
Source File: HiveUtils.java From kite with Apache License 2.0 | 6 votes |
static Table createEmptyTable(String namespace, String name) { Table table = new Table(); table.setDbName(namespace); table.setTableName(name); table.setPartitionKeys(new ArrayList<FieldSchema>()); table.setParameters(new HashMap<String, String>()); StorageDescriptor sd = new StorageDescriptor(); sd.setSerdeInfo(new SerDeInfo()); sd.setNumBuckets(-1); sd.setBucketCols(new ArrayList<String>()); sd.setCols(new ArrayList<FieldSchema>()); sd.setParameters(new HashMap<String, String>()); sd.setSortCols(new ArrayList<Order>()); sd.getSerdeInfo().setParameters(new HashMap<String, String>()); SkewedInfo skewInfo = new SkewedInfo(); skewInfo.setSkewedColNames(new ArrayList<String>()); skewInfo.setSkewedColValues(new ArrayList<List<String>>()); skewInfo.setSkewedColValueLocationMaps(new HashMap<List<String>, String>()); sd.setSkewedInfo(skewInfo); table.setSd(sd); return table; }
Example #4
Source File: HiveAvroToOrcConverterTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Test public void dropReplacedPartitionsTest() throws Exception { Table table = ConvertibleHiveDatasetTest.getTestTable("dbName", "tableName"); table.setTableType("VIRTUAL_VIEW"); table.setPartitionKeys(ImmutableList.of(new FieldSchema("year", "string", ""), new FieldSchema("month", "string", ""))); Partition part = new Partition(); part.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01")); SchemaAwareHiveTable hiveTable = new SchemaAwareHiveTable(table, null); SchemaAwareHivePartition partition = new SchemaAwareHivePartition(table, part, null); QueryBasedHiveConversionEntity conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(partition)); List<ImmutableMap<String, String>> expected = ImmutableList.of(ImmutableMap.of("year", "2015", "month", "12"), ImmutableMap.of("year", "2016", "month", "01")); Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected); // Make sure that a partition itself is not dropped Partition replacedSelf = new Partition(); replacedSelf.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01|2016,02")); replacedSelf.setValues(ImmutableList.of("2016", "02")); conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(new SchemaAwareHivePartition(table, replacedSelf, null))); Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected); }
Example #5
Source File: AvroHiveTableStrategy.java From data-highway with Apache License 2.0 | 6 votes |
@Override public Table newHiveTable( String databaseName, String tableName, String partitionColumnName, String location, Schema schema, int version) { Table table = new Table(); table.setDbName(databaseName); table.setTableName(tableName); table.setTableType(TableType.EXTERNAL_TABLE.toString()); table.putToParameters("EXTERNAL", "TRUE"); addRoadAnnotations(table); URI schemaUri = uriResolver.resolve(schema, table.getTableName(), version); table.putToParameters(AVRO_SCHEMA_URL, schemaUri.toString()); table.putToParameters(AVRO_SCHEMA_VERSION, Integer.toString(version)); table.setPartitionKeys(Arrays.asList(new FieldSchema(partitionColumnName, "string", null))); table.setSd(AvroStorageDescriptorFactory.create(location)); return table; }
Example #6
Source File: CircusTrainParquetSchemaEvolutionIntegrationTest.java From circus-train with Apache License 2.0 | 6 votes |
private void assertTable(HiveMetaStoreClient client, Schema schema, String database, String table, List<String> expectedData) throws Exception { assertThat(client.getAllTables(database).size(), is(1)); Table hiveTable = client.getTable(database, table); List<FieldSchema> cols = hiveTable.getSd().getCols(); assertThat(cols.size(), is(schema.getFields().size())); assertColumnSchema(schema, cols); PartitionIterator partitionIterator = new PartitionIterator(client, hiveTable, (short) 1000); List<Partition> partitions = new ArrayList<>(); while (partitionIterator.hasNext()) { Partition partition = partitionIterator.next(); assertColumnSchema(schema, partition.getSd().getCols()); partitions.add(partition); } assertThat(partitions.size(), is(2)); List<String> data = shell.executeQuery("select * from " + database + "." + table); assertThat(data.size(), is(expectedData.size())); assertThat(data.containsAll(expectedData), is(true)); }
Example #7
Source File: DatePartitionHiveVersionFinder.java From incubator-gobblin with Apache License 2.0 | 6 votes |
public DatePartitionHiveVersionFinder(FileSystem fs, Config config) { this.pattern = ConfigUtils.getString(config, PARTITION_VALUE_DATE_TIME_PATTERN_KEY, DEFAULT_PARTITION_VALUE_DATE_TIME_PATTERN); if (config.hasPath(PARTITION_VALUE_DATE_TIME_TIMEZONE_KEY)) { this.formatter = DateTimeFormat.forPattern(pattern) .withZone(DateTimeZone.forID(config.getString(PARTITION_VALUE_DATE_TIME_TIMEZONE_KEY))); } else { this.formatter = DateTimeFormat.forPattern(pattern).withZone(DateTimeZone.forID(DEFAULT_PARTITION_VALUE_DATE_TIME_TIMEZONE)); } this.partitionKeyName = ConfigUtils.getString(config, PARTITION_KEY_NAME_KEY, DEFAULT_PARTITION_KEY_NAME); this.partitionKeyNamePredicate = new Predicate<FieldSchema>() { @Override public boolean apply(FieldSchema input) { return StringUtils.equalsIgnoreCase(input.getName(), DatePartitionHiveVersionFinder.this.partitionKeyName); } }; }
Example #8
Source File: HiveMetaStoreUtils.java From incubator-gobblin with Apache License 2.0 | 6 votes |
/** * First tries getting the {@code FieldSchema}s from the {@code HiveRegistrationUnit}'s columns, if set. * Else, gets the {@code FieldSchema}s from the deserializer. */ private static List<FieldSchema> getFieldSchemas(HiveRegistrationUnit unit) { List<Column> columns = unit.getColumns(); List<FieldSchema> fieldSchemas = new ArrayList<>(); if (columns != null && columns.size() > 0) { fieldSchemas = getFieldSchemas(columns); } else { Deserializer deserializer = getDeserializer(unit); if (deserializer != null) { try { fieldSchemas = MetaStoreUtils.getFieldsFromDeserializer(unit.getTableName(), deserializer); } catch (SerDeException | MetaException e) { LOG.warn("Encountered exception while getting fields from deserializer.", e); } } } return fieldSchemas; }
Example #9
Source File: MockThriftMetastoreClient.java From presto with Apache License 2.0 | 6 votes |
@Override public Table getTable(String dbName, String tableName) throws TException { accessCount.incrementAndGet(); if (throwException) { throw new RuntimeException(); } if (!dbName.equals(TEST_DATABASE) || !tableName.equals(TEST_TABLE)) { throw new NoSuchObjectException(); } return new Table( TEST_TABLE, TEST_DATABASE, "", 0, 0, 0, DEFAULT_STORAGE_DESCRIPTOR, ImmutableList.of(new FieldSchema("key", "string", null)), ImmutableMap.of(), "", "", TableType.MANAGED_TABLE.name()); }
Example #10
Source File: HiveTableUtil.java From flink with Apache License 2.0 | 6 votes |
/** * Create a Flink's TableSchema from Hive table's columns and partition keys. */ public static TableSchema createTableSchema(List<FieldSchema> cols, List<FieldSchema> partitionKeys) { List<FieldSchema> allCols = new ArrayList<>(cols); allCols.addAll(partitionKeys); String[] colNames = new String[allCols.size()]; DataType[] colTypes = new DataType[allCols.size()]; for (int i = 0; i < allCols.size(); i++) { FieldSchema fs = allCols.get(i); colNames[i] = fs.getName(); colTypes[i] = HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(fs.getType())); } return TableSchema.builder() .fields(colNames, colTypes) .build(); }
Example #11
Source File: LocalHiveMetastoreTestUtils.java From incubator-gobblin with Apache License 2.0 | 6 votes |
public Table createTestAvroTable(String dbName, String tableName, String tableSdLoc, Optional<String> partitionFieldName, boolean ignoreDbCreation) throws Exception { if (!ignoreDbCreation) { createTestDb(dbName); } Table tbl = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(dbName, tableName); tbl.getSd().setLocation(tableSdLoc); tbl.getSd().getSerdeInfo().setSerializationLib(AvroSerDe.class.getName()); tbl.getSd().getSerdeInfo().setParameters(ImmutableMap.of(HiveAvroSerDeManager.SCHEMA_URL, "/tmp/dummy")); if (partitionFieldName.isPresent()) { tbl.addToPartitionKeys(new FieldSchema(partitionFieldName.get(), "string", "some comment")); } this.localMetastoreClient.createTable(tbl); return tbl; }
Example #12
Source File: HiveMetadataUtils.java From dremio-oss with Apache License 2.0 | 6 votes |
public static List<PartitionValue> getPartitionValues(Table table, Partition partition, boolean enableVarcharWidth) { if (partition == null) { return Collections.emptyList(); } final List<String> partitionValues = partition.getValues(); final List<PartitionValue> output = new ArrayList<>(); final List<FieldSchema> partitionKeys = table.getPartitionKeys(); for (int i = 0; i < partitionKeys.size(); i++) { final PartitionValue value = getPartitionValue(partitionKeys.get(i), partitionValues.get(i), enableVarcharWidth); if (value != null) { output.add(value); } } return output; }
Example #13
Source File: DiffGeneratedPartitionPredicateTest.java From circus-train with Apache License 2.0 | 6 votes |
private void setupHiveTables() throws TException, IOException { List<FieldSchema> partitionKeys = Lists.newArrayList(newFieldSchema("p1"), newFieldSchema("p2")); File tableLocation = new File("db1", "table1"); StorageDescriptor sd = newStorageDescriptor(tableLocation, "col0"); table1 = newTable("table1", "db1", partitionKeys, sd); Partition partition1 = newPartition(table1, "value1", "value2"); Partition partition2 = newPartition(table1, "value11", "value22"); table1Partitions = Arrays.asList(partition1, partition2); // table1PartitionNames = Arrays .asList(Warehouse.makePartName(partitionKeys, partition1.getValues()), Warehouse.makePartName(partitionKeys, partition2.getValues())); File tableLocation2 = new File("db2", "table2"); StorageDescriptor sd2 = newStorageDescriptor(tableLocation2, "col0"); table2 = newTable("table2", "db2", partitionKeys, sd2); }
Example #14
Source File: PartitionLevelWatermarkerTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Test public void testDroppedPartitions() throws Exception { WorkUnitState previousWus = new WorkUnitState(); previousWus.setProp(ConfigurationKeys.DATASET_URN_KEY, "db@test_dataset_urn"); previousWus.setProp(PartitionLevelWatermarker.IS_WATERMARK_WORKUNIT_KEY, true); previousWus .setActualHighWatermark(new MultiKeyValueLongWatermark(ImmutableMap.of("2015-01", 100l, "2015-02", 101l))); SourceState state = new SourceState(new State(), Lists.newArrayList(previousWus)); PartitionLevelWatermarker watermarker = new PartitionLevelWatermarker(state); Table table = mockTable("test_dataset_urn"); Mockito.when(table.getPartitionKeys()).thenReturn(ImmutableList.of(new FieldSchema("year", "string", ""))); Partition partition2015 = mockPartition(table, ImmutableList.of("2015")); // partition 2015 replaces 2015-01 and 2015-02 Mockito.when(partition2015.getParameters()).thenReturn( ImmutableMap.of(AbstractAvroToOrcConverter.REPLACED_PARTITIONS_HIVE_METASTORE_KEY, "2015-01|2015-02")); watermarker.onPartitionProcessBegin(partition2015, 0l, 0l); Assert.assertEquals(watermarker.getExpectedHighWatermarks().get("db@test_dataset_urn"), ImmutableMap.of("2015", 0l)); }
Example #15
Source File: HiveMetastoreServiceJDBCImplTest.java From griffin with Apache License 2.0 | 6 votes |
@Test public void testGetTable() throws SQLException { String meta = "CREATE EXTERNAL TABLE `default.session_data`( `session_date` string COMMENT 'this is session date', `site_id` int COMMENT '', `guid` string COMMENT '', `user_id` string COMMENT '')COMMENT 'session_data for session team' PARTITIONED BY ( `dt` string, `place` int) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' LOCATION 'hdfs://localhost/session/common/session_data'TBLPROPERTIES ( 'COLUMN_STATS_ACCURATE'='false', 'avro.schema.url'='hdfs://localhost/griffin/session/avro/session-data-1.0.avsc', 'transient_lastDdlTime'='1535651637')"; when(conn.createStatement()).thenReturn(stmt); when(stmt.executeQuery(anyString())).thenReturn(rs); when(rs.next()).thenReturn(true).thenReturn(false); when(rs.getString(anyInt())).thenReturn(meta); Table res = serviceJdbc.getTable("default", "session_data"); assert (res.getDbName().equals("default")); assert (res.getTableName().equals("session_data")); assert (res.getSd().getLocation().equals("hdfs://localhost/session/common/session_data")); List<FieldSchema> fieldSchemas = res.getSd().getCols(); for (FieldSchema fieldSchema : fieldSchemas) { Assert.assertEquals(fieldSchema.getName(),"session_date"); Assert.assertEquals(fieldSchema.getType(),"string"); Assert.assertEquals(fieldSchema.getComment(),"this is session date"); break; } }
Example #16
Source File: TestSchemaConversion.java From kite with Apache License 2.0 | 6 votes |
@Test public void testConvertSchemaWithComplexRecord() { // convertSchema returns a list of FieldSchema objects rather than TypeInfo List<FieldSchema> fields = HiveSchemaConverter.convertSchema(COMPLEX_RECORD); Assert.assertEquals("Field names should match", Lists.newArrayList("groupName", "simpleRecords"), Lists.transform(fields, GET_NAMES)); Assert.assertEquals("Field types should match", Lists.newArrayList( STRING_TYPE_INFO.toString(), TypeInfoFactory.getListTypeInfo( TypeInfoFactory.getStructTypeInfo( Lists.newArrayList("id", "name"), Lists.newArrayList( INT_TYPE_INFO, STRING_TYPE_INFO))).toString()), Lists.transform(fields, GET_TYPE_STRINGS)); }
Example #17
Source File: DynamoDBStorageHandlerTest.java From emr-dynamodb-connector with Apache License 2.0 | 6 votes |
@Test public void testCheckTableSchemaMappingMissingColumn() throws MetaException { TableDescription description = getHashRangeTable(); Table table = new Table(); Map<String, String> parameters = Maps.newHashMap(); parameters.put(DynamoDBConstants.DYNAMODB_COLUMN_MAPPING, "col1:dynamo_col1$,hashMap:hashMap"); table.setParameters(parameters); StorageDescriptor sd = new StorageDescriptor(); List<FieldSchema> cols = Lists.newArrayList(); cols.add(new FieldSchema("col1", "string", "")); cols.add(new FieldSchema("col2", "tinyint", "")); cols.add(new FieldSchema("col3", "string", "")); cols.add(new FieldSchema("hashMap", "map<string,string>", "")); sd.setCols(cols); table.setSd(sd); exceptionRule.expect(MetaException.class); exceptionRule.expectMessage("Could not find column mapping for column: col2"); storageHandler.checkTableSchemaMapping(description, table); }
Example #18
Source File: HiveClientWrapper.java From pxf with Apache License 2.0 | 6 votes |
/** * Populates the given metadata object with the given table's fields and partitions, * The partition fields are added at the end of the table schema. * Throws an exception if the table contains unsupported field types. * Supported HCatalog types: TINYINT, * SMALLINT, INT, BIGINT, BOOLEAN, FLOAT, DOUBLE, STRING, BINARY, TIMESTAMP, * DATE, DECIMAL, VARCHAR, CHAR. * * @param tbl Hive table * @param metadata schema of given table */ public void getSchema(Table tbl, Metadata metadata) { int hiveColumnsSize = tbl.getSd().getColsSize(); int hivePartitionsSize = tbl.getPartitionKeysSize(); LOG.debug("Hive table: {} fields. {} partitions.", hiveColumnsSize, hivePartitionsSize); // check hive fields try { List<FieldSchema> hiveColumns = tbl.getSd().getCols(); for (FieldSchema hiveCol : hiveColumns) { metadata.addField(HiveUtilities.mapHiveType(hiveCol)); } // check partition fields List<FieldSchema> hivePartitions = tbl.getPartitionKeys(); for (FieldSchema hivePart : hivePartitions) { metadata.addField(HiveUtilities.mapHiveType(hivePart)); } } catch (UnsupportedTypeException e) { String errorMsg = "Failed to retrieve metadata for table " + metadata.getItem() + ". " + e.getMessage(); throw new UnsupportedTypeException(errorMsg); } }
Example #19
Source File: HiveTableUtil.java From flink with Apache License 2.0 | 6 votes |
/** * Create properties info to initialize a SerDe. * @param storageDescriptor * @return */ public static Properties createPropertiesFromStorageDescriptor(StorageDescriptor storageDescriptor) { SerDeInfo serDeInfo = storageDescriptor.getSerdeInfo(); Map<String, String> parameters = serDeInfo.getParameters(); Properties properties = new Properties(); properties.setProperty( serdeConstants.SERIALIZATION_FORMAT, parameters.get(serdeConstants.SERIALIZATION_FORMAT)); List<String> colTypes = new ArrayList<>(); List<String> colNames = new ArrayList<>(); List<FieldSchema> cols = storageDescriptor.getCols(); for (FieldSchema col: cols){ colTypes.add(col.getType()); colNames.add(col.getName()); } properties.setProperty(serdeConstants.LIST_COLUMNS, StringUtils.join(colNames, String.valueOf(SerDeUtils.COMMA))); // Note: serdeConstants.COLUMN_NAME_DELIMITER is not defined in previous Hive. We use a literal to save on shim properties.setProperty("column.name.delimite", String.valueOf(SerDeUtils.COMMA)); properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, StringUtils.join(colTypes, DEFAULT_LIST_COLUMN_TYPES_SEPARATOR)); properties.setProperty(serdeConstants.SERIALIZATION_NULL_FORMAT, "NULL"); properties.putAll(parameters); return properties; }
Example #20
Source File: HiveUtilitiesTest.java From pxf with Apache License 2.0 | 5 votes |
@Test public void mapHiveTypeUnsupported() throws Exception { hiveColumn = new FieldSchema("complex", "someTypeWeDontSupport", null); try { HiveUtilities.mapHiveType(hiveColumn); fail("unsupported type"); } catch (UnsupportedTypeException e) { assertEquals("Unable to map Hive's type: " + hiveColumn.getType() + " to GPDB's type", e.getMessage()); } }
Example #21
Source File: FederatedHMSHandler.java From waggle-dance with Apache License 2.0 | 5 votes |
@Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public List<FieldSchema> get_fields(String db_name, String table_name) throws MetaException, UnknownTableException, UnknownDBException, TException { DatabaseMapping mapping = databaseMappingService.databaseMapping(db_name); return mapping.getClient().get_fields(mapping.transformInboundDatabaseName(db_name), table_name); }
Example #22
Source File: HiveMetadataFetcherTest.java From pxf with Apache License 2.0 | 5 votes |
@Test public void getTableMetadata() throws Exception { fetcher = new HiveMetadataFetcher(context, mockConfigurationFactory, fakeHiveClientWrapper); String tableName = "cause"; // mock hive table returned from hive client List<FieldSchema> fields = new ArrayList<>(); fields.add(new FieldSchema("field1", "string", null)); fields.add(new FieldSchema("field2", "int", null)); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(fields); sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat"); Table hiveTable = new Table(); hiveTable.setTableType("MANAGED_TABLE"); hiveTable.setSd(sd); hiveTable.setPartitionKeys(new ArrayList<>()); when(mockHiveClient.getTable("default", tableName)).thenReturn(hiveTable); // Get metadata metadataList = fetcher.getMetadata(tableName); Metadata metadata = metadataList.get(0); assertEquals("default.cause", metadata.getItem().toString()); List<Metadata.Field> resultFields = metadata.getFields(); assertNotNull(resultFields); assertEquals(2, resultFields.size()); Metadata.Field field = resultFields.get(0); assertEquals("field1", field.getName()); assertEquals("text", field.getType().getTypeName()); // converted type field = resultFields.get(1); assertEquals("field2", field.getName()); assertEquals("int4", field.getType().getTypeName()); }
Example #23
Source File: PartitionsAndStatisticsTest.java From circus-train with Apache License 2.0 | 5 votes |
@Test public void emptyListOfPartitions() throws Exception { List<FieldSchema> partitionKeys = Lists.newArrayList(newFieldSchema("a")); List<Partition> partitions = Lists.newArrayList(); PartitionsAndStatistics partitionsAndStatistics = new PartitionsAndStatistics(partitionKeys, partitions, statisticsPerPartitionName); assertThat(partitionsAndStatistics.getPartitionNames(), is(empty())); assertThat(partitionsAndStatistics.getPartitions(), is(empty())); assertThat(partitionsAndStatistics.getPartitionKeys(), is(partitionKeys)); }
Example #24
Source File: TableAndMetadataComparatorTest.java From circus-train with Apache License 2.0 | 5 votes |
@Test public void sdColsShortCircuit() { left.getTable().getSd().setCols(ImmutableList.of(new FieldSchema("left", "type", "comment"))); List<Diff<Object, Object>> diffs = newTableAndMetadataComparator(SHORT_CIRCUIT).compare(left, right); assertThat(diffs, is(notNullValue())); assertThat(diffs.size(), is(1)); assertThat(diffs.get(0), is(newDiff( "Collection table.sd.cols of class com.google.common.collect.SingletonImmutableList has different size: left.size()=1 and right.size()=2", left.getTable().getSd().getCols(), right.getTable().getSd().getCols()))); }
Example #25
Source File: TestSchemaConversion.java From kite with Apache License 2.0 | 5 votes |
@Test public void testConvertSchemaWithSimpleRecord() { // convertSchema returns a list of FieldSchema objects rather than TypeInfo List<FieldSchema> fields = HiveSchemaConverter.convertSchema(SIMPLE_RECORD); Assert.assertEquals("Field names should match", Lists.newArrayList("id", "name"), Lists.transform(fields, GET_NAMES)); Assert.assertEquals("Field types should match", Lists.newArrayList( INT_TYPE_INFO.toString(), STRING_TYPE_INFO.toString()), Lists.transform(fields, GET_TYPE_STRINGS)); }
Example #26
Source File: ExpressionHelperTest.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 5 votes |
@Before public void setUp() { List<FieldSchema> partitionKeys = ImmutableList.of( makeFieldSchema("name", "string"), makeFieldSchema("birthday", "date"), makeFieldSchema("age", "int") ); table = mock(org.apache.hadoop.hive.metastore.api.Table.class); when(table.getPartitionKeys()).thenReturn(partitionKeys); }
Example #27
Source File: HiveTableUtil.java From flink with Apache License 2.0 | 5 votes |
/** * Create Hive columns from Flink TableSchema. */ public static List<FieldSchema> createHiveColumns(TableSchema schema) { String[] fieldNames = schema.getFieldNames(); DataType[] fieldTypes = schema.getFieldDataTypes(); List<FieldSchema> columns = new ArrayList<>(fieldNames.length); for (int i = 0; i < fieldNames.length; i++) { columns.add( new FieldSchema(fieldNames[i], HiveTypeUtil.toHiveTypeInfo(fieldTypes[i], true).getTypeName(), null)); } return columns; }
Example #28
Source File: HiveEndpoint.java From circus-train with Apache License 2.0 | 5 votes |
private List<String> getColumnNames(Table table) { List<FieldSchema> fields = table.getSd().getCols(); List<String> columnNames = new ArrayList<>(fields.size()); for (FieldSchema field : fields) { columnNames.add(field.getName()); } return columnNames; }
Example #29
Source File: HiveTablePartition.java From pxf with Apache License 2.0 | 5 votes |
HiveTablePartition(StorageDescriptor storageDesc, Properties properties, Partition partition, List<FieldSchema> partitionKeys, String tableName) { this.storageDesc = storageDesc; this.properties = properties; this.partition = partition; this.partitionKeys = partitionKeys; this.tableName = tableName; }
Example #30
Source File: FederatedHMSHandler.java From waggle-dance with Apache License 2.0 | 5 votes |
@Override @Loggable(value = Loggable.DEBUG, skipResult = true, name = INVOCATION_LOG_NAME) public List<FieldSchema> get_fields_with_environment_context( String db_name, String table_name, EnvironmentContext environment_context) throws MetaException, UnknownTableException, UnknownDBException, TException { DatabaseMapping mapping = databaseMappingService.databaseMapping(db_name); return mapping .getClient() .get_fields_with_environment_context(mapping.transformInboundDatabaseName(db_name), table_name, environment_context); }