Java Code Examples for org.apache.hadoop.hive.conf.HiveConf#set()
The following examples show how to use
org.apache.hadoop.hive.conf.HiveConf#set() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseTestHiveImpersonation.java From dremio-oss with Apache License 2.0 | 6 votes |
protected static void prepHiveConfAndData() throws Exception { hiveConf = new HiveConf(); // Configure metastore persistence db location on local filesystem final String dbUrl = String.format("jdbc:derby:;databaseName=%s;create=true", getTempDir("metastore_db")); hiveConf.set(ConfVars.METASTORECONNECTURLKEY.varname, dbUrl); hiveConf.set(ConfVars.SCRATCHDIR.varname, "file:///" + getTempDir("scratch_dir")); hiveConf.set(ConfVars.LOCALSCRATCHDIR.varname, getTempDir("local_scratch_dir")); hiveConf.set(ConfVars.METASTORE_SCHEMA_VERIFICATION.varname, "false"); hiveConf.set(ConfVars.METASTORE_AUTO_CREATE_ALL.varname, "true"); hiveConf.set(ConfVars.HIVE_CBO_ENABLED.varname, "false"); // Set MiniDFS conf in HiveConf hiveConf.set(FS_DEFAULT_NAME_KEY, dfsConf.get(FS_DEFAULT_NAME_KEY)); whDir = hiveConf.get(ConfVars.METASTOREWAREHOUSE.varname); FileSystem.mkdirs(fs, new Path(whDir), new FsPermission((short) 0777)); studentData = getPhysicalFileFromResource("student.txt"); voterData = getPhysicalFileFromResource("voter.txt"); }
Example 2
Source File: HiveMetaStore.java From streamx with Apache License 2.0 | 6 votes |
public HiveMetaStore(Configuration conf, HdfsSinkConnectorConfig connectorConfig) throws HiveMetaStoreException { HiveConf hiveConf = new HiveConf(conf, HiveConf.class); String hiveConfDir = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_CONF_DIR_CONFIG); String hiveMetaStoreURIs = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_METASTORE_URIS_CONFIG); if (hiveMetaStoreURIs.isEmpty()) { log.warn("hive.metastore.uris empty, an embedded Hive metastore will be " + "created in the directory the connector is started. " + "You need to start Hive in that specific directory to query the data."); } if (!hiveConfDir.equals("")) { String hiveSitePath = hiveConfDir + "/hive-site.xml"; File hiveSite = new File(hiveSitePath); if (!hiveSite.exists()) { log.warn("hive-site.xml does not exist in provided Hive configuration directory {}.", hiveConf); } hiveConf.addResource(new Path(hiveSitePath)); } hiveConf.set("hive.metastore.uris", hiveMetaStoreURIs); try { client = HCatUtil.getHiveMetastoreClient(hiveConf); } catch (IOException | MetaException e) { throw new HiveMetaStoreException(e); } }
Example 3
Source File: FlinkStandaloneHiveServerContext.java From flink with Apache License 2.0 | 6 votes |
private void configureMetaStore(HiveConf conf) { String jdbcDriver = org.apache.derby.jdbc.EmbeddedDriver.class.getName(); try { Class.forName(jdbcDriver); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } // Set the Hive Metastore DB driver hiveConf.set("datanucleus.schema.autoCreateAll", "true"); hiveConf.set("hive.metastore.schema.verification", "false"); hiveConf.set("hive.metastore.uris", toHmsURI()); // No pooling needed. This will save us a lot of threads hiveConf.set("datanucleus.connectionPoolingType", "None"); conf.setBoolVar(METASTORE_VALIDATE_CONSTRAINTS, true); conf.setBoolVar(METASTORE_VALIDATE_COLUMNS, true); conf.setBoolVar(METASTORE_VALIDATE_TABLES, true); // disable authorization to avoid NPE conf.set(HIVE_AUTHORIZATION_MANAGER.varname, "org.apache.hive.hcatalog.storagehandler.DummyHCatAuthProvider"); }
Example 4
Source File: TestHiveCatalogStore.java From tajo with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUp() throws Exception { formatFactory = new StorageFormatFactory(); Path testPath = CommonTestingUtil.getTestDir(); warehousePath = new Path(testPath, "warehouse"); //create local hiveMeta HiveConf conf = new HiveConf(); String jdbcUri = "jdbc:derby:;databaseName="+testPath.toUri().getPath()+"metastore_db;create=true"; conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehousePath.toUri().toString()); conf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, jdbcUri); conf.set(TajoConf.ConfVars.WAREHOUSE_DIR.varname, warehousePath.toUri().toString()); conf.setBoolean("datanucleus.schema.autoCreateAll", true); // create local HiveCatalogStore. TajoConf tajoConf = new TajoConf(conf); store = new HiveCatalogStore(tajoConf); store.createDatabase(DB_NAME, null); }
Example 5
Source File: HiveClientTest.java From garmadon with Apache License 2.0 | 6 votes |
@Before public void setup() throws IOException { hdfsTemp = Files.createTempDirectory("hdfs"); derbyDBPath = Files.createTempDirectory("derbyDB"); HiveConf hiveConf = new HiveConf(); hiveConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(), "jdbc:derby:;databaseName=" + derbyDBPath.toString() + "/derbyDB" + ";create=true"); ServerSocket s = new ServerSocket(0); port = String.valueOf(s.getLocalPort()); hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT.varname, port); // Required to avoid NoSuchMethodError org.apache.hive.service.cli.operation.LogDivertAppender.setWriter hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED.varname, "false"); hiveServer2 = new HiveServer2(); hiveServer2.init(hiveConf); s.close(); hiveServer2.start(); }
Example 6
Source File: AbstractMetastoreTestWithStaticConfiguration.java From incubator-sentry with Apache License 2.0 | 6 votes |
public void execHiveSQLwithOverlay(final String sqlStmt, final String userName, Map<String, String> overLay) throws Exception { final HiveConf hiveConf = new HiveConf(); for (Map.Entry<String, String> entry : overLay.entrySet()) { hiveConf.set(entry.getKey(), entry.getValue()); } UserGroupInformation clientUgi = UserGroupInformation .createRemoteUser(userName); clientUgi.doAs(new PrivilegedExceptionAction<Object>() { @Override public Void run() throws Exception { Driver driver = new Driver(hiveConf, userName); SessionState.start(new CliSessionState(hiveConf)); CommandProcessorResponse cpr = driver.run(sqlStmt); if (cpr.getResponseCode() != 0) { throw new IOException("Failed to execute \"" + sqlStmt + "\". Driver returned " + cpr.getResponseCode() + " Error: " + cpr.getErrorMessage()); } driver.close(); SessionState.get().close(); return null; } }); }
Example 7
Source File: HiveLocalServer2IntegrationTest.java From hadoop-mini-clusters with Apache License 2.0 | 6 votes |
public static HiveConf buildHiveConf() { // Handle Windows WindowsLibsUtils.setHadoopHome(); HiveConf hiveConf = new HiveConf(); hiveConf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON.varname, "true"); hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS.varname, "5"); hiveConf.set("hive.root.logger", "DEBUG,console"); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); return hiveConf; }
Example 8
Source File: HiveTestDataGenerator.java From dremio-oss with Apache License 2.0 | 6 votes |
private HiveConf newHiveConf() { HiveConf conf = new HiveConf(SessionState.class); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_URIS, "thrift://localhost:" + port); conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///"); // Metastore needs to be set, and WITH the deprecated key :( // Otherwise, will default to /user/hive/warehouse when trying to create a new database // (database location is now sent by the client to the server...) HiveConf.setVar(conf, ConfVars.METASTOREWAREHOUSE, whDir); conf.set("mapred.job.tracker", "local"); HiveConf.setVar(conf, ConfVars.SCRATCHDIR, getTempDir("scratch_dir")); HiveConf.setVar(conf, ConfVars.LOCALSCRATCHDIR, getTempDir("local_scratch_dir")); HiveConf.setVar(conf, ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); HiveConf.setBoolVar(conf, ConfVars.HIVE_CBO_ENABLED, false); return conf; }
Example 9
Source File: HiveLocalServer2Test.java From hadoop-mini-clusters with Apache License 2.0 | 5 votes |
public static HiveConf buildHiveConf() { HiveConf hiveConf = new HiveConf(); hiveConf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON.varname, "true"); hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS.varname, "5"); hiveConf.set("hive.root.logger", "DEBUG,console"); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); return hiveConf; }
Example 10
Source File: HiveTestDataGenerator.java From dremio-oss with Apache License 2.0 | 5 votes |
public void executeDDL(String query, Map<String,String> confOverrides) throws IOException { final HiveConf conf = newHiveConf(); for(Map.Entry<String,String> entry : confOverrides.entrySet()) { conf.set(entry.getKey(), entry.getValue()); } runDDL(query, conf); }
Example 11
Source File: EmbeddedMetastoreService.java From beam with Apache License 2.0 | 5 votes |
public EmbeddedMetastoreService(String baseDirPath) throws IOException { FileUtils.forceDeleteOnExit(new File(baseDirPath)); String hiveDirPath = makePathASafeFileName(baseDirPath + "/hive"); String testDataDirPath = makePathASafeFileName( hiveDirPath + "/data/" + EmbeddedMetastoreService.class.getCanonicalName() + System.currentTimeMillis()); String testWarehouseDirPath = makePathASafeFileName(testDataDirPath + "/warehouse"); hiveConf = new HiveConf(getClass()); hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, ""); hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, ""); hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, testWarehouseDirPath); hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, true); hiveConf.setVar( HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd." + "SQLStdHiveAuthorizerFactory"); hiveConf.set("test.tmp.dir", hiveDirPath); System.setProperty("derby.stream.error.file", "/dev/null"); driver = new Driver(hiveConf); sessionState = SessionState.start(new SessionState(hiveConf)); }
Example 12
Source File: HiveAuthzBindingSessionHookV2.java From incubator-sentry with Apache License 2.0 | 5 votes |
/** * The session hook for sentry authorization that sets the required session level configuration 1. * Setup the sentry hooks - semantic, exec and filter hooks 2. Set additional config properties * required for auth set HIVE_EXTENDED_ENITITY_CAPTURE = true set SCRATCHDIRPERMISSION = 700 3. * Add sensitive config parameters to the config restrict list so that they can't be overridden by * users */ @Override public void run(HiveSessionHookContext sessionHookContext) throws HiveSQLException { // Add sentry hooks to the session configuration HiveConf sessionConf = sessionHookContext.getSessionConf(); appendConfVar(sessionConf, ConfVars.SEMANTIC_ANALYZER_HOOK.varname, SEMANTIC_HOOK); // enable sentry authorization V2 sessionConf.setBoolean(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED.varname, true); sessionConf.setBoolean(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, false); sessionConf.set(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER.varname, "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator"); // grant all privileges for table to its owner sessionConf.setVar(ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS, ""); // Enable compiler to capture transform URI referred in the query sessionConf.setBoolVar(ConfVars.HIVE_CAPTURE_TRANSFORM_ENTITY, true); // set security command list HiveAuthzConf authzConf = HiveAuthzBindingHook.loadAuthzConf(sessionConf); String commandWhitelist = authzConf.get(HiveAuthzConf.HIVE_SENTRY_SECURITY_COMMAND_WHITELIST, HiveAuthzConf.HIVE_SENTRY_SECURITY_COMMAND_WHITELIST_DEFAULT); sessionConf.setVar(ConfVars.HIVE_SECURITY_COMMAND_WHITELIST, commandWhitelist); // set additional configuration properties required for auth sessionConf.setVar(ConfVars.SCRATCHDIRPERMISSION, SCRATCH_DIR_PERMISSIONS); // setup restrict list sessionConf.addToRestrictList(ACCESS_RESTRICT_LIST); // set user name sessionConf.set(HiveAuthzConf.HIVE_ACCESS_SUBJECT_NAME, sessionHookContext.getSessionUser()); sessionConf.set(HiveAuthzConf.HIVE_SENTRY_SUBJECT_NAME, sessionHookContext.getSessionUser()); // Set MR ACLs to session user appendConfVar(sessionConf, JobContext.JOB_ACL_VIEW_JOB, sessionHookContext.getSessionUser()); appendConfVar(sessionConf, JobContext.JOB_ACL_MODIFY_JOB, sessionHookContext.getSessionUser()); }
Example 13
Source File: HiveLanguageParser.java From circus-train with Apache License 2.0 | 5 votes |
public HiveLanguageParser(HiveConf hiveConfiguration) { hiveConf = new HiveConf(hiveConfiguration); if (hiveConf.get(HDFS_SESSION_PATH_KEY) == null) { hiveConf.set(HDFS_SESSION_PATH_KEY, hdfsTemporaryDirectory(hiveConf)); } if (hiveConf.get(LOCAL_SESSION_PATH_KEY) == null) { hiveConf.set(LOCAL_SESSION_PATH_KEY, localTemporaryDirectory()); } }
Example 14
Source File: HiveLocalMetaStoreTest.java From hadoop-mini-clusters with Apache License 2.0 | 5 votes |
public static HiveConf buildHiveConf() { HiveConf hiveConf = new HiveConf(); hiveConf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON.varname, "true"); hiveConf.set(HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS.varname, "5"); hiveConf.set("hive.root.logger", "DEBUG,console"); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); return hiveConf; }
Example 15
Source File: HiveAuthzBindingSessionHookV2.java From incubator-sentry with Apache License 2.0 | 5 votes |
private void appendConfVar(HiveConf sessionConf, String confVar, String sentryConfVal) { String currentValue = sessionConf.get(confVar, "").trim(); if (currentValue.isEmpty()) { currentValue = sentryConfVal; } else { currentValue = sentryConfVal + "," + currentValue; } sessionConf.set(confVar, currentValue); }
Example 16
Source File: HiveEmbeddedServer2.java From elasticsearch-hadoop with Apache License 2.0 | 4 votes |
private HiveConf configure() throws Exception { String scratchDir = NTFSLocalFileSystem.SCRATCH_DIR; File scratchDirFile = new File(scratchDir); TestUtils.delete(scratchDirFile); Configuration cfg = new Configuration(); HiveConf conf = new HiveConf(cfg, HiveConf.class); conf.addToRestrictList("columns.comments"); refreshConfig(conf); HdpBootstrap.hackHadoopStagingOnWin(); // work-around for NTFS FS // set permissive permissions since otherwise, on some OS it fails if (TestUtils.isWindows()) { conf.set("fs.file.impl", NTFSLocalFileSystem.class.getName()); conf.set("hive.scratch.dir.permission", "650"); conf.setVar(ConfVars.SCRATCHDIRPERMISSION, "650"); conf.set("hive.server2.enable.doAs", "false"); conf.set("hive.execution.engine", "mr"); //conf.set("hadoop.bin.path", getClass().getClassLoader().getResource("hadoop.cmd").getPath()); System.setProperty("path.separator", ";"); conf.setVar(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, DummyHiveAuthenticationProvider.class.getName()); } else { conf.set("hive.scratch.dir.permission", "777"); conf.setVar(ConfVars.SCRATCHDIRPERMISSION, "777"); scratchDirFile.mkdirs(); // also set the permissions manually since Hive doesn't do it... scratchDirFile.setWritable(true, false); } int random = new Random().nextInt(); conf.set("hive.metastore.warehouse.dir", scratchDir + "/warehouse" + random); conf.set("hive.metastore.metadb.dir", scratchDir + "/metastore_db" + random); conf.set("hive.exec.scratchdir", scratchDir); conf.set("fs.permissions.umask-mode", "022"); conf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:;databaseName=" + scratchDir + "/metastore_db" + random + ";create=true"); conf.set("hive.metastore.local", "true"); conf.set("hive.aux.jars.path", ""); conf.set("hive.added.jars.path", ""); conf.set("hive.added.files.path", ""); conf.set("hive.added.archives.path", ""); conf.set("fs.default.name", "file:///"); // clear mapred.job.tracker - Hadoop defaults to 'local' if not defined. Hive however expects this to be set to 'local' - if it's not, it does a remote execution (i.e. no child JVM) Field field = Configuration.class.getDeclaredField("properties"); field.setAccessible(true); Properties props = (Properties) field.get(conf); props.remove("mapred.job.tracker"); props.remove("mapreduce.framework.name"); props.setProperty("fs.default.name", "file:///"); // intercept SessionState to clean the threadlocal Field tss = SessionState.class.getDeclaredField("tss"); tss.setAccessible(true); //tss.set(null, new InterceptingThreadLocal()); return new HiveConf(conf); }
Example 17
Source File: BlurSerDeTest.java From incubator-retired-blur with Apache License 2.0 | 4 votes |
private int runLoad(boolean disableMrUpdate) throws IOException, InterruptedException, ClassNotFoundException, SQLException { Configuration configuration = miniCluster.getMRConfiguration(); writeSiteFiles(configuration); HiveConf hiveConf = new HiveConf(configuration, getClass()); hiveConf.set("hive.server2.thrift.port", "0"); HiveServer2 hiveServer2 = new HiveServer2(); hiveServer2.init(hiveConf); hiveServer2.start(); int port = waitForStartupAndGetPort(hiveServer2); Class.forName(HiveDriver.class.getName()); String userName = UserGroupInformation.getCurrentUser().getShortUserName(); Connection connection = DriverManager.getConnection("jdbc:hive2://localhost:" + port, userName, ""); UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); run(connection, "set blur.user.name=" + currentUser.getUserName()); run(connection, "set blur.mr.update.disabled=" + disableMrUpdate); run(connection, "set hive.metastore.warehouse.dir=" + WAREHOUSE.toURI().toString()); run(connection, "create database if not exists testdb"); run(connection, "use testdb"); run(connection, "CREATE TABLE if not exists testtable ROW FORMAT SERDE 'org.apache.blur.hive.BlurSerDe' " + "WITH SERDEPROPERTIES ( 'blur.zookeeper.connection'='" + miniCluster.getZkConnectionString() + "', " + "'blur.table'='" + TEST + "', 'blur.family'='" + FAM + "' ) " + "STORED BY 'org.apache.blur.hive.BlurHiveStorageHandler'"); run(connection, "desc testtable"); String createLoadTable = buildCreateLoadTable(connection); run(connection, createLoadTable); File dbDir = new File(WAREHOUSE, "testdb.db"); File tableDir = new File(dbDir, "loadtable"); int totalRecords = 100; generateData(tableDir, totalRecords); run(connection, "select * from loadtable"); run(connection, "set " + BlurSerDe.BLUR_BLOCKING_APPLY + "=true"); run(connection, "insert into table testtable select * from loadtable"); connection.close(); hiveServer2.stop(); return totalRecords; }
Example 18
Source File: FlinkStandaloneHiveServerContext.java From flink with Apache License 2.0 | 4 votes |
private void createAndSetFolderProperty(String key, String folder, HiveConf conf, TemporaryFolder basedir) { conf.set(key, newFolder(basedir, folder).getAbsolutePath()); }
Example 19
Source File: HIVERangerAuthorizerTest.java From ranger with Apache License 2.0 | 4 votes |
@org.junit.BeforeClass public static void setup() throws Exception { // Get a random port ServerSocket serverSocket = new ServerSocket(0); port = serverSocket.getLocalPort(); serverSocket.close(); HiveConf conf = new HiveConf(); // Warehouse File warehouseDir = new File("./target/hdfs/warehouse").getAbsoluteFile(); conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehouseDir.getPath()); // Scratchdir File scratchDir = new File("./target/hdfs/scratchdir").getAbsoluteFile(); conf.set("hive.exec.scratchdir", scratchDir.getPath()); // REPL DUMP target folder File replRootDir = new File("./target/user/hive").getAbsoluteFile(); conf.set("hive.repl.rootdir", replRootDir.getPath()); // Create a temporary directory for the Hive metastore File metastoreDir = new File("./metastore_db/").getAbsoluteFile(); conf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, String.format("jdbc:derby:;databaseName=%s;create=true", metastoreDir.getPath())); conf.set(HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL.varname, "true"); conf.set(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT.varname, "" + port); conf.set(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString(), "false"); conf.set(HiveConf.ConfVars.HIVE_SERVER2_WEBUI_PORT.varname, "0"); conf.set(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname,"mr"); hiveServer = new HiveServer2(); hiveServer.init(conf); hiveServer.start(); Class.forName("org.apache.hive.jdbc.HiveDriver"); // Create database String initialUrl = "jdbc:hive2://localhost:" + port; Connection connection = DriverManager.getConnection(initialUrl, "admin", "admin"); Statement statement = connection.createStatement(); statement.execute("CREATE DATABASE IF NOT EXISTS rangerauthz with dbproperties ('repl.source.for'='1,2,3')"); statement.execute("CREATE DATABASE IF NOT EXISTS demo"); statement.close(); connection.close(); // Load data into HIVE String url = "jdbc:hive2://localhost:" + port + "/rangerauthz"; connection = DriverManager.getConnection(url, "admin", "admin"); statement = connection.createStatement(); // statement.execute("CREATE TABLE WORDS (word STRING, count INT)"); statement.execute("create table if not exists words (word STRING, count INT) row format delimited fields terminated by '\t' stored as textfile"); // Copy "wordcount.txt" to "target" to avoid overwriting it during load File inputFile = new File(HIVERangerAuthorizerTest.class.getResource("../../../../../wordcount.txt").toURI()); Path outputPath = Paths.get(inputFile.toPath().getParent().getParent().toString() + File.separator + "wordcountout.txt"); Files.copy(inputFile.toPath(), outputPath); statement.execute("LOAD DATA INPATH '" + outputPath + "' OVERWRITE INTO TABLE words"); // Just test to make sure it's working ResultSet resultSet = statement.executeQuery("SELECT * FROM words where count == '100'"); if (resultSet.next()) { Assert.assertEquals("Mr.", resultSet.getString(1)); } else { Assert.fail("No ResultSet found"); } statement.close(); connection.close(); // Enable ranger authorization after the initial db setup and table creating is done. conf.set(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED.varname, "true"); conf.set(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "true"); conf.set(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER.varname, "org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizerFactory"); }
Example 20
Source File: HiveTarget.java From datacollector with Apache License 2.0 | 4 votes |
@Override protected List<ConfigIssue> init() { List<ConfigIssue> issues = super.init(); errorRecordHandler = new DefaultErrorRecordHandler(getContext()); partitionsToFields = new HashMap<>(); columnsToFields = new HashMap<>(); hiveConf = new HiveConf(); // either the hiveConfDir should be set and valid, or the metastore URL must be set. (it's possible both are true!) if (null != hiveConfDir && !hiveConfDir.isEmpty()) { initHiveConfDir(issues); } else if (hiveThriftUrl == null || hiveThriftUrl.isEmpty()) { issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveThriftUrl", Errors.HIVE_13)); } // Specified URL overrides what's in the Hive Conf iff it's present if (hiveThriftUrl != null && !hiveThriftUrl.isEmpty()) { hiveConf.set(HIVE_METASTORE_URI, hiveThriftUrl); } if (!validateHiveThriftUrl(issues)) { return issues; } // Add any additional hive conf overrides for (Map.Entry<String, String> entry : additionalHiveProperties.entrySet()) { hiveConf.set(entry.getKey(), entry.getValue()); } captureLoginUGI(issues); initHiveMetaStoreClient(issues); applyCustomMappings(issues); dataGeneratorFactory = createDataGeneratorFactory(); // Note that cleanup is done synchronously by default while servicing .get hiveConnectionPool = CacheBuilder.newBuilder() .maximumSize(10) .expireAfterAccess(10, TimeUnit.MINUTES) .removalListener(new HiveConnectionRemovalListener()) .build(new HiveConnectionLoader()); hiveConnectionCacheCleaner = new CacheCleaner(hiveConnectionPool, "HiveTarget connection pool", 10 * 60 * 1000); recordWriterPool = CacheBuilder.newBuilder() .maximumSize(10) .expireAfterAccess(10, TimeUnit.MINUTES) .build(new HiveRecordWriterLoader()); recordWriterCacheCleaner = new CacheCleaner(recordWriterPool, "HiveTarget record writer pool", 10 * 60 * 1000); LineageEvent event = getContext().createLineageEvent(LineageEventType.ENTITY_WRITTEN); if(hiveThriftUrl != null && !hiveThriftUrl.isEmpty()) { event.setSpecificAttribute(LineageSpecificAttribute.DESCRIPTION, hiveThriftUrl); } else { event.setSpecificAttribute(LineageSpecificAttribute.DESCRIPTION,hiveConfDir); } event.setSpecificAttribute(LineageSpecificAttribute.ENDPOINT_TYPE, EndPointType.HIVE.name()); event.setSpecificAttribute(LineageSpecificAttribute.ENTITY_NAME, schema + " " + tableName); getContext().publishLineageEvent(event); LOG.debug("Total issues: {}", issues.size()); return issues; }