org.apache.hadoop.hive.conf.HiveConf Java Examples
The following examples show how to use
org.apache.hadoop.hive.conf.HiveConf.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestHiveUtils.java From kite with Apache License 2.0 | 6 votes |
@Test public void testRoundTripDescriptorNoCompressionProperty() throws Exception { String namespace = "test_ns"; String name = "test_table"; DatasetDescriptor original = new DatasetDescriptor.Builder() .schemaUri("resource:schema/user.avsc") .location("file:/tmp/data/test_table") .build(); boolean external = true; Table table = HiveUtils.tableForDescriptor(namespace, name, original, external); assertEquals("snappy", table.getParameters().get("kite.compression.type")); table.getParameters().remove("kite.compression.type"); Configuration conf = new HiveConf(); DatasetDescriptor result = HiveUtils.descriptorForTable(conf, table); assertEquals(original, result); }
Example #2
Source File: RangerHiveAuthorizerBase.java From ranger with Apache License 2.0 | 6 votes |
@Override public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPluginException { LOG.debug("RangerHiveAuthorizerBase.applyAuthorizationConfigPolicy()"); // from SQLStdHiveAccessController.applyAuthorizationConfigPolicy() if (mSessionContext != null && mSessionContext.getClientType() == CLIENT_TYPE.HIVESERVER2) { // Configure PREEXECHOOKS with DisallowTransformHook to disallow transform queries String hooks = hiveConf.getVar(ConfVars.PREEXECHOOKS).trim(); if (hooks.isEmpty()) { hooks = DisallowTransformHook.class.getName(); } else { hooks = hooks + "," + DisallowTransformHook.class.getName(); } hiveConf.setVar(ConfVars.PREEXECHOOKS, hooks); SettableConfigUpdater.setHiveConfWhiteList(hiveConf); } }
Example #3
Source File: SentryAuthorizerFactory.java From incubator-sentry with Apache License 2.0 | 6 votes |
/** * Get instance of SentryAccessController from configuration * Default return DefaultSentryAccessController * * @param conf * @param authzConf * @param hiveAuthzBinding * @param authenticator * @throws HiveAuthzPluginException */ public static SentryHiveAccessController getAccessController(HiveConf conf, HiveAuthzConf authzConf, HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws HiveAuthzPluginException { Class<? extends SentryHiveAccessController> clazz = conf.getClass(HIVE_SENTRY_ACCESS_CONTROLLER, DefaultSentryAccessController.class, SentryHiveAccessController.class); if (clazz == null) { // should not happen as default value is set throw new HiveAuthzPluginException("Configuration value " + HIVE_SENTRY_ACCESS_CONTROLLER + " is not set to valid SentryAccessController subclass"); } try { return new DefaultSentryAccessController(conf, authzConf, authenticator, ctx); } catch (Exception e) { throw new HiveAuthzPluginException(e); } }
Example #4
Source File: CommonBeans.java From circus-train with Apache License 2.0 | 6 votes |
private Supplier<CloseableMetaStoreClient> metaStoreClientSupplier( HiveConf hiveConf, String name, MetastoreTunnel metastoreTunnel, MetaStoreClientFactory metaStoreClientFactory) { if (metastoreTunnel != null) { return new TunnellingMetaStoreClientSupplierBuilder() .withName(name) .withRoute(metastoreTunnel.getRoute()) .withKnownHosts(metastoreTunnel.getKnownHosts()) .withLocalHost(metastoreTunnel.getLocalhost()) .withPort(metastoreTunnel.getPort()) .withPrivateKeys(metastoreTunnel.getPrivateKeys()) .withTimeout(metastoreTunnel.getTimeout()) .withStrictHostKeyChecking(metastoreTunnel.getStrictHostKeyChecking()) .build(hiveConf, metaStoreClientFactory); } else { return new HiveMetaStoreClientSupplier(metaStoreClientFactory, hiveConf, name); } }
Example #5
Source File: HiveMetaStoreClientFactoryTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Test public void testCreate() throws TException { HiveConf hiveConf = new HiveConf(); HiveMetaStoreClientFactory factory = new HiveMetaStoreClientFactory(hiveConf); // Since we havE a specified hive-site in the classpath, so have to null it out here to proceed the test // The original value it will get if no local hive-site is placed, will be an empty string. hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, ""); hiveConf.set(HIVE_METASTORE_TOKEN_SIGNATURE, ""); IMetaStoreClient msc = factory.create(); String dbName = "test_db"; String description = "test database"; String location = "file:/tmp/" + dbName; Database db = new Database(dbName, description, location, null); msc.dropDatabase(dbName, true, true); msc.createDatabase(db); db = msc.getDatabase(dbName); Assert.assertEquals(db.getName(), dbName); Assert.assertEquals(db.getDescription(), description); Assert.assertEquals(db.getLocationUri(), location); }
Example #6
Source File: FlinkStandaloneHiveServerContext.java From flink with Apache License 2.0 | 6 votes |
private void configureMetaStore(HiveConf conf) { String jdbcDriver = org.apache.derby.jdbc.EmbeddedDriver.class.getName(); try { Class.forName(jdbcDriver); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } // Set the Hive Metastore DB driver hiveConf.set("datanucleus.schema.autoCreateAll", "true"); hiveConf.set("hive.metastore.schema.verification", "false"); hiveConf.set("hive.metastore.uris", toHmsURI()); // No pooling needed. This will save us a lot of threads hiveConf.set("datanucleus.connectionPoolingType", "None"); conf.setBoolVar(METASTORE_VALIDATE_CONSTRAINTS, true); conf.setBoolVar(METASTORE_VALIDATE_COLUMNS, true); conf.setBoolVar(METASTORE_VALIDATE_TABLES, true); // disable authorization to avoid NPE conf.set(HIVE_AUTHORIZATION_MANAGER.varname, "org.apache.hive.hcatalog.storagehandler.DummyHCatAuthProvider"); }
Example #7
Source File: HiveConfFactoryTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Test public void testHiveConfFactory() throws Exception { HiveConf hiveConf = HiveConfFactory.get(Optional.absent(), SharedResourcesBrokerFactory.getImplicitBroker()); HiveConf hiveConf1 = HiveConfFactory.get(Optional.absent(), SharedResourcesBrokerFactory.getImplicitBroker()); Assert.assertEquals(hiveConf, hiveConf1); // When there's no hcatURI specified, the default hive-site should be loaded. Assert.assertTrue(hiveConf.getVar(METASTOREURIS).equals("file:///test")); Assert.assertTrue(hiveConf.get(HIVE_METASTORE_TOKEN_SIGNATURE).equals("file:///test")); HiveConf hiveConf2 = HiveConfFactory.get(Optional.of("hcat1"), SharedResourcesBrokerFactory.getImplicitBroker()); HiveConf hiveConf3 = HiveConfFactory.get(Optional.of("hcat1"), SharedResourcesBrokerFactory.getImplicitBroker()); Assert.assertEquals(hiveConf2, hiveConf3); HiveConf hiveConf4 = HiveConfFactory.get(Optional.of("hcat11"), SharedResourcesBrokerFactory.getImplicitBroker()); Assert.assertNotEquals(hiveConf3, hiveConf4); Assert.assertNotEquals(hiveConf4, hiveConf); // THe uri should be correctly set. Assert.assertEquals(hiveConf3.getVar(METASTOREURIS), "hcat1"); Assert.assertEquals(hiveConf3.get(HIVE_METASTORE_TOKEN_SIGNATURE), "hcat1"); Assert.assertEquals(hiveConf4.getVar(METASTOREURIS), "hcat11"); Assert.assertEquals(hiveConf4.get(HIVE_METASTORE_TOKEN_SIGNATURE), "hcat11"); }
Example #8
Source File: BaseTestHiveImpersonation.java From dremio-oss with Apache License 2.0 | 6 votes |
protected static void prepHiveConfAndData() throws Exception { hiveConf = new HiveConf(); // Configure metastore persistence db location on local filesystem final String dbUrl = String.format("jdbc:derby:;databaseName=%s;create=true", getTempDir("metastore_db")); hiveConf.set(ConfVars.METASTORECONNECTURLKEY.varname, dbUrl); hiveConf.set(ConfVars.SCRATCHDIR.varname, "file:///" + getTempDir("scratch_dir")); hiveConf.set(ConfVars.LOCALSCRATCHDIR.varname, getTempDir("local_scratch_dir")); hiveConf.set(ConfVars.METASTORE_SCHEMA_VERIFICATION.varname, "false"); hiveConf.set(ConfVars.METASTORE_AUTO_CREATE_ALL.varname, "true"); hiveConf.set(ConfVars.HIVE_CBO_ENABLED.varname, "false"); // Set MiniDFS conf in HiveConf hiveConf.set(FS_DEFAULT_NAME_KEY, dfsConf.get(FS_DEFAULT_NAME_KEY)); whDir = hiveConf.get(ConfVars.METASTOREWAREHOUSE.varname); FileSystem.mkdirs(fs, new Path(whDir), new FsPermission((short) 0777)); studentData = getPhysicalFileFromResource("student.txt"); voterData = getPhysicalFileFromResource("voter.txt"); }
Example #9
Source File: HiveMetadataService.java From streamline with Apache License 2.0 | 6 votes |
/** * Creates secure {@link HiveMetadataService}, which delegates to {@link HiveMetaStoreClient} * instantiated with the {@link HiveConf} provided using the first parameter */ public static HiveMetadataService newInstance(HiveConf hiveConf, SecurityContext securityContext, Subject subject, Component hiveMetastore, Collection<ComponentProcess> hiveMetastoreProcesses) throws MetaException, IOException, EntityNotFoundException, PrivilegedActionException { if (SecurityUtil.isKerberosAuthenticated(securityContext)) { UserGroupInformation.setConfiguration(hiveConf); // Sets Kerberos rules UserGroupInformation.getUGIFromSubject(subject); // Adds User principal to this subject return new HiveMetadataService( SecurityUtil.execute(() -> new HiveMetaStoreClient(hiveConf), securityContext, subject), hiveConf, securityContext, subject, hiveMetastore, hiveMetastoreProcesses); } else { return new HiveMetadataService(new HiveMetaStoreClient(hiveConf), hiveConf, securityContext, subject, hiveMetastore, hiveMetastoreProcesses); } }
Example #10
Source File: HiveSyncTool.java From hudi with Apache License 2.0 | 6 votes |
public HiveSyncTool(HiveSyncConfig cfg, HiveConf configuration, FileSystem fs) { this.hoodieHiveClient = new HoodieHiveClient(cfg, configuration, fs); this.cfg = cfg; // Set partitionFields to empty, when the NonPartitionedExtractor is used if (NonPartitionedExtractor.class.getName().equals(cfg.partitionValueExtractorClass)) { LOG.warn("Set partitionFields to empty, since the NonPartitionedExtractor is used"); cfg.partitionFields = new ArrayList<>(); } switch (hoodieHiveClient.getTableType()) { case COPY_ON_WRITE: this.snapshotTableName = cfg.tableName; this.roTableTableName = Option.empty(); break; case MERGE_ON_READ: this.snapshotTableName = cfg.tableName + SUFFIX_SNAPSHOT_TABLE; this.roTableTableName = cfg.skipROSuffix ? Option.of(cfg.tableName) : Option.of(cfg.tableName + SUFFIX_READ_OPTIMIZED_TABLE); break; default: LOG.error("Unknown table type " + hoodieHiveClient.getTableType()); throw new InvalidTableException(hoodieHiveClient.getBasePath()); } }
Example #11
Source File: HiveTestDataGenerator.java From dremio-oss with Apache License 2.0 | 6 votes |
private HiveConf newHiveConf() { HiveConf conf = new HiveConf(SessionState.class); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_URIS, "thrift://localhost:" + port); conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///"); // Metastore needs to be set, and WITH the deprecated key :( // Otherwise, will default to /user/hive/warehouse when trying to create a new database // (database location is now sent by the client to the server...) HiveConf.setVar(conf, ConfVars.METASTOREWAREHOUSE, whDir); conf.set("mapred.job.tracker", "local"); HiveConf.setVar(conf, ConfVars.SCRATCHDIR, getTempDir("scratch_dir")); HiveConf.setVar(conf, ConfVars.LOCALSCRATCHDIR, getTempDir("local_scratch_dir")); HiveConf.setVar(conf, ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); HiveConf.setBoolVar(conf, ConfVars.HIVE_CBO_ENABLED, false); return conf; }
Example #12
Source File: GlueMetastoreClientDelegateTest.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HiveConf(); glueClient = mock(AWSGlue.class); wh = mock(Warehouse.class); metastoreClientDelegate = new GlueMetastoreClientDelegate(conf, new DefaultAWSGlueMetastore(conf, glueClient), wh); // Create a client delegate with CatalogId hiveConfCatalogId = new HiveConf(); hiveConfCatalogId.set(GlueMetastoreClientDelegate.CATALOG_ID_CONF, CATALOG_ID); metastoreClientDelegateCatalogId = new GlueMetastoreClientDelegate(hiveConfCatalogId, new DefaultAWSGlueMetastore(hiveConfCatalogId, glueClient), wh); testDb = getTestDatabase(); testTbl= getTestTable(testDb.getName()); setupMockWarehouseForPath(new Path(testTbl.getStorageDescriptor().getLocation().toString()), false, true); }
Example #13
Source File: OrcFlowFileWriter.java From nifi with Apache License 2.0 | 6 votes |
StringTreeWriter(int columnId, ObjectInspector inspector, StreamFactory writer, boolean nullable) throws IOException { super(columnId, inspector, writer, nullable); this.isDirectV2 = isNewWriteFormat(writer); stringOutput = writer.createStream(id, OrcProto.Stream.Kind.DICTIONARY_DATA); lengthOutput = createIntegerWriter(writer.createStream(id, OrcProto.Stream.Kind.LENGTH), false, isDirectV2, writer); rowOutput = createIntegerWriter(writer.createStream(id, OrcProto.Stream.Kind.DATA), false, isDirectV2, writer); recordPosition(rowIndexPosition); rowIndexValueCount.add(0L); buildIndex = writer.buildIndex(); directStreamOutput = writer.createStream(id, OrcProto.Stream.Kind.DATA); directLengthOutput = createIntegerWriter(writer.createStream(id, OrcProto.Stream.Kind.LENGTH), false, isDirectV2, writer); dictionaryKeySizeThreshold = writer.getConfiguration().getFloat( HiveConf.ConfVars.HIVE_ORC_DICTIONARY_KEY_SIZE_THRESHOLD.varname, HiveConf.ConfVars.HIVE_ORC_DICTIONARY_KEY_SIZE_THRESHOLD.defaultFloatVal); strideDictionaryCheck = writer.getConfiguration().getBoolean( HiveConf.ConfVars.HIVE_ORC_ROW_INDEX_STRIDE_DICTIONARY_CHECK.varname, HiveConf.ConfVars.HIVE_ORC_ROW_INDEX_STRIDE_DICTIONARY_CHECK.defaultBoolVal); doneDictionaryCheck = false; }
Example #14
Source File: TableEnvHiveConnectorITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testDefaultPartitionName() throws Exception { TableEnvironment tableEnv = getTableEnvWithHiveCatalog(); tableEnv.executeSql("create database db1"); tableEnv.executeSql("create table db1.src (x int, y int)"); tableEnv.executeSql("create table db1.part (x int) partitioned by (y int)"); HiveTestUtils.createTextTableInserter(hiveShell, "db1", "src").addRow(new Object[]{1, 1}).addRow(new Object[]{2, null}).commit(); // test generating partitions with default name TableEnvUtil.execInsertSqlAndWaitResult(tableEnv, "insert into db1.part select * from db1.src"); HiveConf hiveConf = hiveShell.getHiveConf(); String defaultPartName = hiveConf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); Table hiveTable = hmsClient.getTable("db1", "part"); Path defaultPartPath = new Path(hiveTable.getSd().getLocation(), "y=" + defaultPartName); FileSystem fs = defaultPartPath.getFileSystem(hiveConf); assertTrue(fs.exists(defaultPartPath)); TableImpl flinkTable = (TableImpl) tableEnv.sqlQuery("select y, x from db1.part order by x"); List<Row> rows = Lists.newArrayList(flinkTable.execute().collect()); assertEquals(Arrays.toString(new String[]{"1,1", "null,2"}), rows.toString()); tableEnv.executeSql("drop database db1 cascade"); }
Example #15
Source File: HdfsSnapshotLocationManager.java From circus-train with Apache License 2.0 | 6 votes |
HdfsSnapshotLocationManager( HiveConf sourceHiveConf, String eventId, Table sourceTable, List<Partition> sourcePartitions, boolean snapshotsDisabled, String tableBasePath, FileSystemFactory fileSystemFactory, SourceCatalogListener sourceCatalogListener) throws IOException { this.sourceHiveConf = sourceHiveConf; this.eventId = eventId; this.sourceTable = sourceTable; this.snapshotsDisabled = snapshotsDisabled; this.sourceCatalogListener = sourceCatalogListener; this.fileSystemFactory = fileSystemFactory; String sourceDataLocation; if (StringUtils.isNotBlank(tableBasePath)) { sourceDataLocation = tableBasePath; } else { sourceDataLocation = sourceTable.getSd().getLocation(); } sourceDataPath = new Path(sourceDataLocation); copyBasePath = createSnapshot(); String copyBaseLocation = copyBasePath.toString(); subPaths = calculateSubPaths(sourcePartitions, sourceDataLocation, copyBaseLocation); }
Example #16
Source File: RelaxedSQLStdHiveAccessController.java From beeju with Apache License 2.0 | 5 votes |
public RelaxedSQLStdHiveAccessController( HiveMetastoreClientFactory metastoreClientFactory, HiveConf conf, HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws HiveAuthzPluginException { super(metastoreClientFactory, conf, authenticator, ctx); }
Example #17
Source File: HiveLocalMetaStoreTest.java From hadoop-mini-clusters with Apache License 2.0 | 5 votes |
public static HiveConf buildHiveConf() { HiveConf hiveConf = new HiveConf(); hiveConf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON.varname, "true"); hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS.varname, "5"); hiveConf.set("hive.root.logger", "DEBUG,console"); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); return hiveConf; }
Example #18
Source File: HCatalogIOTestUtils.java From beam with Apache License 2.0 | 5 votes |
/** Returns config params for the test datastore as a Map. */ public static Map<String, String> getConfigPropertiesAsMap(HiveConf hiveConf) { Map<String, String> map = new HashMap<>(); for (Entry<String, String> kv : hiveConf) { map.put(kv.getKey(), kv.getValue()); } return map; }
Example #19
Source File: ScanWithDremioReader.java From dremio-oss with Apache License 2.0 | 5 votes |
static Iterator<RecordReader> createReaders( final HiveConf hiveConf, final BaseHiveStoragePlugin hiveStoragePlugin, final FragmentExecutionContext fragmentExecContext, final OperatorContext context, final HiveProxyingSubScan config, final HiveTableXattr tableXattr, final CompositeReaderConfig compositeReader, final UserGroupInformation readerUGI, List<SplitAndPartitionInfo> splits) { try (ContextClassLoaderSwapper ccls = ContextClassLoaderSwapper.newInstance()) { if(splits.isEmpty()) { return Collections.emptyIterator(); } final JobConf jobConf = new JobConf(hiveConf); final List<HiveParquetSplit> sortedSplits = Lists.newArrayList(); for (SplitAndPartitionInfo split : splits) { sortedSplits.add(new HiveParquetSplit(split)); } Collections.sort(sortedSplits); return new HiveParquetSplitReaderIterator( jobConf, context, config, sortedSplits, readerUGI, compositeReader, hiveStoragePlugin, tableXattr); } catch (final Exception e) { throw Throwables.propagate(e); } }
Example #20
Source File: HiveTableUtil.java From flink with Apache License 2.0 | 5 votes |
public static Table alterTableViaCatalogBaseTable( ObjectPath tablePath, CatalogBaseTable baseTable, Table oldHiveTable, HiveConf hiveConf) { Table newHiveTable = instantiateHiveTable(tablePath, baseTable, hiveConf); // client.alter_table() requires a valid location // thus, if new table doesn't have that, it reuses location of the old table if (!newHiveTable.getSd().isSetLocation()) { newHiveTable.getSd().setLocation(oldHiveTable.getSd().getLocation()); } return newHiveTable; }
Example #21
Source File: HiveJdbcCommon.java From nifi with Apache License 2.0 | 5 votes |
public static Configuration getConfigurationFromFiles(final String configFiles) { final Configuration hiveConfig = new HiveConf(); if (StringUtils.isNotBlank(configFiles)) { for (final String configFile : configFiles.split(",")) { hiveConfig.addResource(new Path(configFile.trim())); } } return hiveConfig; }
Example #22
Source File: SentryMetastorePostEventListener.java From incubator-sentry with Apache License 2.0 | 5 votes |
public SentryMetastorePostEventListener(Configuration config) { super(config); if (!(config instanceof HiveConf)) { String error = "Could not initialize Plugin - Configuration is not an instanceof HiveConf"; LOGGER.error(error); throw new RuntimeException(error); } authzConf = HiveAuthzConf.getAuthzConf((HiveConf)config); server = new Server(authzConf.get(AuthzConfVars.AUTHZ_SERVER_NAME.getVar())); Iterable<String> pluginClasses = ConfUtilties.CLASS_SPLITTER .split(config.get(ServerConfig.SENTRY_METASTORE_PLUGINS, ServerConfig.SENTRY_METASTORE_PLUGINS_DEFAULT).trim()); try { for (String pluginClassStr : pluginClasses) { Class<?> clazz = config.getClassByName(pluginClassStr); if (!SentryMetastoreListenerPlugin.class.isAssignableFrom(clazz)) { throw new IllegalArgumentException("Class [" + pluginClassStr + "] is not a " + SentryMetastoreListenerPlugin.class.getName()); } SentryMetastoreListenerPlugin plugin = (SentryMetastoreListenerPlugin) clazz .getConstructor(Configuration.class, Configuration.class) .newInstance(config, authzConf); sentryPlugins.add(plugin); } } catch (Exception e) { LOGGER.error("Could not initialize Plugin !!", e); throw new RuntimeException(e); } }
Example #23
Source File: HiveMetadataUtils.java From dremio-oss with Apache License 2.0 | 5 votes |
/** * Applies Hive configuration if Orc fileIds are not supported by the table's underlying filesystem. * * @param storageCapabilities The storageCapabilities. * @param tableOrPartitionProperties Properties of the table or partition which may be altered. */ public static void injectOrcIncludeFileIdInSplitsConf(final HiveStorageCapabilities storageCapabilities, final Properties tableOrPartitionProperties) { if (!storageCapabilities.supportsOrcSplitFileIds()) { tableOrPartitionProperties.put(HiveConf.ConfVars.HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS.varname, "false"); } }
Example #24
Source File: HiveMetadataUtils.java From dremio-oss with Apache License 2.0 | 5 votes |
public static BatchSchema getBatchSchema(Table table, final HiveConf hiveConf, boolean includeComplexParquetCols) { InputFormat<?, ?> format = getInputFormat(table, hiveConf); final List<Field> fields = new ArrayList<>(); final List<String> partitionColumns = new ArrayList<>(); HiveMetadataUtils.populateFieldsAndPartitionColumns(table, fields, partitionColumns, format, includeComplexParquetCols); return BatchSchema.newBuilder().addFields(fields).build(); }
Example #25
Source File: HiveTestDataGenerator.java From dremio-oss with Apache License 2.0 | 5 votes |
public void executeDDL(String query, Map<String,String> confOverrides) throws IOException { final HiveConf conf = newHiveConf(); for(Map.Entry<String,String> entry : confOverrides.entrySet()) { conf.set(entry.getKey(), entry.getValue()); } runDDL(query, conf); }
Example #26
Source File: AWSGlueMetastoreCacheDecorator.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 5 votes |
public AWSGlueMetastoreCacheDecorator(HiveConf conf, AWSGlueMetastore awsGlueMetastore) { super(awsGlueMetastore); checkNotNull(conf, "conf can not be null"); this.conf = conf; databaseCacheEnabled = conf.getBoolean(AWS_GLUE_DB_CACHE_ENABLE, false); if(databaseCacheEnabled) { int dbCacheSize = conf.getInt(AWS_GLUE_DB_CACHE_SIZE, 0); int dbCacheTtlMins = conf.getInt(AWS_GLUE_DB_CACHE_TTL_MINS, 0); //validate config values for size and ttl validateConfigValueIsGreaterThanZero(AWS_GLUE_DB_CACHE_SIZE, dbCacheSize); validateConfigValueIsGreaterThanZero(AWS_GLUE_DB_CACHE_TTL_MINS, dbCacheTtlMins); //initialize database cache databaseCache = CacheBuilder.newBuilder().maximumSize(dbCacheSize) .expireAfterWrite(dbCacheTtlMins, TimeUnit.MINUTES).build(); } else { databaseCache = null; } tableCacheEnabled = conf.getBoolean(AWS_GLUE_TABLE_CACHE_ENABLE, false); if(tableCacheEnabled) { int tableCacheSize = conf.getInt(AWS_GLUE_TABLE_CACHE_SIZE, 0); int tableCacheTtlMins = conf.getInt(AWS_GLUE_TABLE_CACHE_TTL_MINS, 0); //validate config values for size and ttl validateConfigValueIsGreaterThanZero(AWS_GLUE_TABLE_CACHE_SIZE, tableCacheSize); validateConfigValueIsGreaterThanZero(AWS_GLUE_TABLE_CACHE_TTL_MINS, tableCacheTtlMins); //initialize table cache tableCache = CacheBuilder.newBuilder().maximumSize(tableCacheSize) .expireAfterWrite(tableCacheTtlMins, TimeUnit.MINUTES).build(); } else { tableCache = null; } logger.info("Constructed"); }
Example #27
Source File: HiveConfFactory.java From incubator-gobblin with Apache License 2.0 | 5 votes |
@Override public SharedResourceFactoryResponse<HiveConf> createResource(SharedResourcesBroker<S> broker, ScopedConfigView<S, SharedHiveConfKey> config) throws NotConfiguredException { SharedHiveConfKey sharedHiveConfKey = config.getKey(); HiveConf rawConf = new HiveConf(); if (!sharedHiveConfKey.hiveConfUri.equals(SharedHiveConfKey.INSTANCE.toConfigurationKey()) && StringUtils .isNotEmpty(sharedHiveConfKey.hiveConfUri)) { rawConf.setVar(HiveConf.ConfVars.METASTOREURIS, sharedHiveConfKey.hiveConfUri); rawConf.set(HIVE_METASTORE_TOKEN_SIGNATURE, sharedHiveConfKey.hiveConfUri); } return new ResourceInstance<>(rawConf); }
Example #28
Source File: HiveDialectITCase.java From flink with Apache License 2.0 | 5 votes |
@Before public void setup() { hiveCatalog = HiveTestUtils.createHiveCatalog(); hiveCatalog.getHiveConf().setBoolVar(HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES, false); hiveCatalog.open(); warehouse = hiveCatalog.getHiveConf().getVar(HiveConf.ConfVars.METASTOREWAREHOUSE); tableEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode(); tableEnv.getConfig().setSqlDialect(SqlDialect.HIVE); tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog); tableEnv.useCatalog(hiveCatalog.getName()); }
Example #29
Source File: ComparisonTool.java From circus-train with Apache License 2.0 | 5 votes |
@Bean HiveEndpoint replica( ReplicaCatalog replicaCatalog, HiveConf replicaHiveConf, Supplier<CloseableMetaStoreClient> replicaMetaStoreClientSupplier) { return new ReplicaHiveEndpoint(replicaCatalog.getName(), replicaHiveConf, replicaMetaStoreClientSupplier); }
Example #30
Source File: ReplicaTableFactoryProvider.java From circus-train with Apache License 2.0 | 5 votes |
@Autowired public ReplicaTableFactoryProvider( @Value("#{sourceHiveConf}") HiveConf sourceHiveConf, @Value("#{checksumFunction}") Function<Path, String> checksumFunction, TableTransformation tableTransformation, PartitionTransformation partitionTransformation, ColumnStatisticsTransformation columnStatisticsTransformation) { this.sourceHiveConf = sourceHiveConf; this.checksumFunction = checksumFunction; this.tableTransformation = tableTransformation; this.partitionTransformation = partitionTransformation; this.columnStatisticsTransformation = columnStatisticsTransformation; }