org.apache.hadoop.hive.metastore.IHMSHandler Java Examples
The following examples show how to use
org.apache.hadoop.hive.metastore.IHMSHandler.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HiveTableBaseTest.java From iceberg with Apache License 2.0 | 6 votes |
private TServer thriftServer() throws IOException, TTransportException, MetaException, InvocationTargetException, NoSuchMethodException, IllegalAccessException, NoSuchFieldException { final TServerSocketKeepAlive socket = new TServerSocketKeepAlive(new TServerSocket(0)); this.hiveConf = hiveConf(new Configuration(), socket.getServerSocket().getLocalPort()); HiveMetaStore.HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", hiveConf); IHMSHandler handler = RetryingHMSHandler.getProxy(hiveConf, baseHandler, true); final TTransportFactory transportFactory = new TTransportFactory(); final TSetIpAddressProcessor<IHMSHandler> processor = new TSetIpAddressProcessor<>(handler); TThreadPoolServer.Args args = new TThreadPoolServer.Args(socket) .processor(processor) .transportFactory(transportFactory) .protocolFactory(new TBinaryProtocol.Factory()) .minWorkerThreads(3) .maxWorkerThreads(5); return new TThreadPoolServer(args); }
Example #2
Source File: TSetIpAddressProcessorFactory.java From waggle-dance with Apache License 2.0 | 6 votes |
@Override public TProcessor getProcessor(TTransport transport) { try { if (transport instanceof TSocket) { Socket socket = ((TSocket) transport).getSocket(); log.debug("Received a connection from ip: {}", socket.getInetAddress().getHostAddress()); } CloseableIHMSHandler baseHandler = federatedHMSHandlerFactory.create(); IHMSHandler handler = newRetryingHMSHandler(ExceptionWrappingHMSHandler.newProxyInstance(baseHandler), hiveConf, false); transportMonitor.monitor(transport, baseHandler); return new TSetIpAddressProcessor<>(handler); } catch (MetaException | ReflectiveOperationException | RuntimeException e) { throw new RuntimeException("Error creating TProcessor", e); } }
Example #3
Source File: TestHiveMetastore.java From iceberg with Apache License 2.0 | 5 votes |
private TServer newThriftServer(TServerSocket socket, HiveConf conf) throws Exception { HiveConf serverConf = new HiveConf(conf); serverConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:" + getDerbyPath() + ";create=true"); HiveMetaStore.HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", serverConf); IHMSHandler handler = RetryingHMSHandler.getProxy(serverConf, baseHandler, false); TThreadPoolServer.Args args = new TThreadPoolServer.Args(socket) .processor(new TSetIpAddressProcessor<>(handler)) .transportFactory(new TTransportFactory()) .protocolFactory(new TBinaryProtocol.Factory()) .minWorkerThreads(3) .maxWorkerThreads(5); return new TThreadPoolServer(args); }
Example #4
Source File: MetastoreCacheInitializer.java From incubator-sentry with Apache License 2.0 | 5 votes |
MetastoreCacheInitializer(IHMSHandler hmsHandler, Configuration conf) { this.hmsHandler = hmsHandler; this.maxPartitionsPerCall = conf.getInt( ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_PART_PER_RPC, ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_PART_PER_RPC_DEFAULT); this.maxTablesPerCall = conf.getInt( ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_TABLES_PER_RPC, ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_TABLES_PER_RPC_DEFAULT); threadPool = Executors.newFixedThreadPool(conf.getInt( ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_INIT_THREADS, ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_INIT_THREADS_DEFAULT)); maxRetries = conf.getInt( ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_MAX_NUM, ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_MAX_NUM_DEFAULT); waitDurationMillis = conf.getInt( ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_WAIT_DURAION_IN_MILLIS, ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_WAIT_DURAION_IN_MILLIS_DEFAULT); failOnRetry = conf.getBoolean( ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_FAIL_ON_PARTIAL_UPDATE, ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_FAIL_ON_PARTIAL_UPDATE_DEFAULT); }
Example #5
Source File: ExceptionWrappingHMSHandlerTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void get_databaseWaggleDanceServerException() throws Exception { expectedException.expect(MetaException.class); IHMSHandler handler = ExceptionWrappingHMSHandler.newProxyInstance(baseHandler); when(baseHandler.get_database("bdp")).thenThrow(new WaggleDanceServerException("waggle waggle!")); handler.get_database("bdp"); }
Example #6
Source File: ExceptionWrappingHMSHandlerTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void get_databasNotAllowedException() throws Exception { expectedException.expect(MetaException.class); IHMSHandler handler = ExceptionWrappingHMSHandler.newProxyInstance(baseHandler); when(baseHandler.get_database("bdp")).thenThrow(new NotAllowedException("waggle waggle!")); handler.get_database("bdp"); }
Example #7
Source File: ExceptionWrappingHMSHandlerTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void get_databaseRunTimeExceptionIsNotWrapped() throws Exception { expectedException.expect(RuntimeException.class); expectedException.expectMessage("generic non waggle dance exception"); IHMSHandler handler = ExceptionWrappingHMSHandler.newProxyInstance(baseHandler); when(baseHandler.get_database("bdp")).thenThrow(new RuntimeException("generic non waggle dance exception")); handler.get_database("bdp"); }
Example #8
Source File: ExceptionWrappingHMSHandlerTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void get_databaseCheckedExceptionIsNotWrapped() throws Exception { expectedException.expect(NoSuchObjectException.class); expectedException.expectMessage("Does not exist!"); IHMSHandler handler = ExceptionWrappingHMSHandler.newProxyInstance(baseHandler); when(baseHandler.get_database("bdp")).thenThrow(new NoSuchObjectException("Does not exist!")); handler.get_database("bdp"); }
Example #9
Source File: HiveService.java From kite with Apache License 2.0 | 4 votes |
public TServer startMetaStore(String forceBindIP, int port, HiveConf conf) throws IOException { try { // Server will create new threads up to max as necessary. After an idle // period, it will destory threads to keep the number of threads in the // pool to min. int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS); int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS); boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE); boolean useFramedTransport = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT); // don't support SASL yet //boolean useSasl = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL); TServerTransport serverTransport; if (forceBindIP != null) { InetSocketAddress address = new InetSocketAddress(forceBindIP, port); serverTransport = tcpKeepAlive ? new TServerSocketKeepAlive(address) : new TServerSocket(address); } else { serverTransport = tcpKeepAlive ? new TServerSocketKeepAlive(port) : new TServerSocket(port); } TProcessor processor; TTransportFactory transFactory; IHMSHandler handler = (IHMSHandler) HiveMetaStore .newRetryingHMSHandler("new db based metaserver", conf, true); if (conf.getBoolVar(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI)) { transFactory = useFramedTransport ? new ChainedTTransportFactory(new TFramedTransport.Factory(), new TUGIContainingTransport.Factory()) : new TUGIContainingTransport.Factory(); processor = new TUGIBasedProcessor<IHMSHandler>(handler); LOG.info("Starting DB backed MetaStore Server with SetUGI enabled"); } else { transFactory = useFramedTransport ? new TFramedTransport.Factory() : new TTransportFactory(); processor = new TSetIpAddressProcessor<IHMSHandler>(handler); LOG.info("Starting DB backed MetaStore Server"); } TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverTransport) .processor(processor) .transportFactory(transFactory) .protocolFactory(new TBinaryProtocol.Factory()) .minWorkerThreads(minWorkerThreads) .maxWorkerThreads(maxWorkerThreads); final TServer tServer = new TThreadPoolServer(args); executorService.submit(new Runnable() { @Override public void run() { tServer.serve(); } }); return tServer; } catch (Throwable x) { throw new IOException(x); } }
Example #10
Source File: TestMetastoreCacheInitializer.java From incubator-sentry with Apache License 2.0 | 4 votes |
@Test public void testExceptionInTask() throws Exception { //Set up mocks: db1.tb1, with tb1 returning a wrong dbname (db2) Database db1 = Mockito.mock(Database.class); Mockito.when(db1.getName()).thenReturn("db1"); Mockito.when(db1.getLocationUri()).thenReturn("hdfs:///db1"); Table tab1 = Mockito.mock(Table.class); //Return a wrong db name, so that this triggers an exception Mockito.when(tab1.getDbName()).thenReturn("db2"); Mockito.when(tab1.getTableName()).thenReturn("tab1"); IHMSHandler hmsHandler = Mockito.mock(IHMSHandler.class); Mockito.when(hmsHandler.get_all_databases()).thenReturn(Lists .newArrayList("db1")); Mockito.when(hmsHandler.get_database("db1")).thenReturn(db1); Mockito.when(hmsHandler.get_table_objects_by_name("db1", Lists.newArrayList("tab1"))) .thenReturn(Lists.newArrayList(tab1)); Mockito.when(hmsHandler.get_all_tables("db1")).thenReturn(Lists .newArrayList("tab1")); Configuration conf = new Configuration(); conf.setInt(ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_PART_PER_RPC, 1); conf.setInt(ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_TABLES_PER_RPC, 1); conf.setInt(ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_INIT_THREADS, 1); conf.setInt(ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_RETRY_MAX_NUM, 2); try { MetastoreCacheInitializer cacheInitializer = new MetastoreCacheInitializer(hmsHandler, conf); cacheInitializer.createInitialUpdate(); Assert.fail("Expected cacheInitializer to fail"); } catch (Exception e) { Assert.assertTrue(e instanceof RuntimeException); } }
Example #11
Source File: TestMetastoreCacheInitializer.java From incubator-sentry with Apache License 2.0 | 4 votes |
@Test public void testInitializer() throws Exception { Database db1 = Mockito.mock(Database.class); Mockito.when(db1.getName()).thenReturn("db1"); Mockito.when(db1.getLocationUri()).thenReturn("hdfs:///db1"); Database db2 = Mockito.mock(Database.class); Mockito.when(db2.getName()).thenReturn("db2"); Mockito.when(db2.getLocationUri()).thenReturn("hdfs:///db2"); Database db3 = Mockito.mock(Database.class); Mockito.when(db3.getName()).thenReturn("db3"); Mockito.when(db3.getLocationUri()).thenReturn("hdfs:///db3"); Table tab21 = Mockito.mock(Table.class); Mockito.when(tab21.getDbName()).thenReturn("db2"); Mockito.when(tab21.getTableName()).thenReturn("tab21"); StorageDescriptor sd21 = Mockito.mock(StorageDescriptor.class); Mockito.when(sd21.getLocation()).thenReturn("hdfs:///db2/tab21"); Mockito.when(tab21.getSd()).thenReturn(sd21); Table tab31 = Mockito.mock(Table.class); Mockito.when(tab31.getDbName()).thenReturn("db3"); Mockito.when(tab31.getTableName()).thenReturn("tab31"); StorageDescriptor sd31 = Mockito.mock(StorageDescriptor.class); Mockito.when(sd31.getLocation()).thenReturn("hdfs:///db3/tab31"); Mockito.when(tab31.getSd()).thenReturn(sd31); Partition part311 = Mockito.mock(Partition.class); StorageDescriptor sd311 = Mockito.mock(StorageDescriptor.class); Mockito.when(sd311.getLocation()).thenReturn("hdfs:///db3/tab31/part311"); Mockito.when(part311.getSd()).thenReturn(sd311); Partition part312 = Mockito.mock(Partition.class); StorageDescriptor sd312 = Mockito.mock(StorageDescriptor.class); Mockito.when(sd312.getLocation()).thenReturn("hdfs:///db3/tab31/part312"); Mockito.when(part312.getSd()).thenReturn(sd312); IHMSHandler hmsHandler = Mockito.mock(IHMSHandler.class); Mockito.when(hmsHandler.get_all_databases()).thenReturn(Lists .newArrayList("db1", "db2", "db3")); Mockito.when(hmsHandler.get_database("db1")).thenReturn(db1); Mockito.when(hmsHandler.get_all_tables("db1")).thenReturn(new ArrayList<String>()); Mockito.when(hmsHandler.get_database("db2")).thenReturn(db2); Mockito.when(hmsHandler.get_all_tables("db2")).thenReturn(Lists .newArrayList("tab21")); Mockito.when(hmsHandler.get_table_objects_by_name("db2", Lists.newArrayList("tab21"))) .thenReturn(Lists.newArrayList(tab21)); Mockito.when(hmsHandler.get_partition_names("db2", "tab21", (short) -1)) .thenReturn(new ArrayList<String>()); Mockito.when(hmsHandler.get_database("db3")).thenReturn(db3); Mockito.when(hmsHandler.get_all_tables("db3")).thenReturn(Lists .newArrayList("tab31")); Mockito.when(hmsHandler.get_table_objects_by_name("db3", Lists.newArrayList("tab31"))) .thenReturn(Lists.newArrayList(tab31)); Mockito.when(hmsHandler.get_partition_names("db3", "tab31", (short) -1)) .thenReturn(Lists.newArrayList("part311", "part312")); Mockito.when(hmsHandler.get_partitions_by_names("db3", "tab31", Lists.newArrayList("part311"))) .thenReturn(Lists.newArrayList(part311)); Mockito.when(hmsHandler.get_partitions_by_names("db3", "tab31", Lists.newArrayList("part312"))) .thenReturn(Lists.newArrayList(part312)); Configuration conf = new Configuration(); conf.setInt(ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_PART_PER_RPC, 1); conf.setInt(ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_MAX_TABLES_PER_RPC, 1); conf.setInt(ServiceConstants.ServerConfig .SENTRY_HDFS_SYNC_METASTORE_CACHE_INIT_THREADS, 1); MetastoreCacheInitializer cacheInitializer = new MetastoreCacheInitializer(hmsHandler, conf); UpdateableAuthzPaths update = cacheInitializer.createInitialUpdate(); Assert.assertEquals(new HashSet<String>(Arrays.asList("db1")), update.findAuthzObjectExactMatches(new String[]{"db1"})); Assert.assertEquals(new HashSet<String>(Arrays.asList("db2")), update.findAuthzObjectExactMatches(new String[]{"db2"})); Assert.assertEquals(new HashSet<String>(Arrays.asList("db2.tab21")), update.findAuthzObjectExactMatches(new String[]{"db2", "tab21"})); Assert.assertEquals(new HashSet<String>(Arrays.asList("db3")), update.findAuthzObjectExactMatches(new String[]{"db3"})); Assert.assertEquals(new HashSet<String>(Arrays.asList("db3.tab31")), update.findAuthzObjectExactMatches(new String[]{"db3", "tab31"})); Assert.assertEquals(new HashSet<String>(Arrays.asList("db3.tab31")), update.findAuthzObjectExactMatches(new String[]{"db3", "tab31", "part311"})); Assert.assertEquals(new HashSet<String>(Arrays.asList("db3.tab31")), update.findAuthzObjectExactMatches(new String[]{"db3", "tab31", "part312"})); cacheInitializer.close(); }
Example #12
Source File: HiveTestService.java From hudi with Apache License 2.0 | 4 votes |
public TServer startMetaStore(String forceBindIP, int port, HiveConf conf) throws IOException { try { // Server will create new threads up to max as necessary. After an idle // period, it will destory threads to keep the number of threads in the // pool to min. int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS); int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS); boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE); boolean useFramedTransport = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT); // don't support SASL yet // boolean useSasl = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL); TServerTransport serverTransport; if (forceBindIP != null) { InetSocketAddress address = new InetSocketAddress(forceBindIP, port); serverTransport = tcpKeepAlive ? new TServerSocketKeepAlive(address) : new TServerSocket(address); } else { serverTransport = tcpKeepAlive ? new TServerSocketKeepAlive(port) : new TServerSocket(port); } TProcessor processor; TTransportFactory transFactory; HiveMetaStore.HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", conf, false); IHMSHandler handler = RetryingHMSHandler.getProxy(conf, baseHandler, true); if (conf.getBoolVar(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI)) { transFactory = useFramedTransport ? new ChainedTTransportFactory(new TFramedTransport.Factory(), new TUGIContainingTransport.Factory()) : new TUGIContainingTransport.Factory(); processor = new TUGIBasedProcessor<>(handler); LOG.info("Starting DB backed MetaStore Server with SetUGI enabled"); } else { transFactory = useFramedTransport ? new TFramedTransport.Factory() : new TTransportFactory(); processor = new TSetIpAddressProcessor<>(handler); LOG.info("Starting DB backed MetaStore Server"); } TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverTransport).processor(processor) .transportFactory(transFactory).protocolFactory(new TBinaryProtocol.Factory()) .minWorkerThreads(minWorkerThreads).maxWorkerThreads(maxWorkerThreads); final TServer tServer = new TThreadPoolServer(args); executorService.submit(tServer::serve); return tServer; } catch (Throwable x) { throw new IOException(x); } }
Example #13
Source File: ExceptionWrappingHMSHandlerTest.java From waggle-dance with Apache License 2.0 | 4 votes |
@Test public void get_databaseNoExceptions() throws Exception { IHMSHandler handler = ExceptionWrappingHMSHandler.newProxyInstance(baseHandler); handler.get_database("bdp"); verify(baseHandler).get_database("bdp"); }
Example #14
Source File: TSetIpAddressProcessorFactory.java From waggle-dance with Apache License 2.0 | 4 votes |
private IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, HiveConf hiveConf, boolean local) throws MetaException { return RetryingHMSHandler.getProxy(hiveConf, baseHandler, local); }
Example #15
Source File: ExceptionWrappingHMSHandler.java From waggle-dance with Apache License 2.0 | 4 votes |
public ExceptionWrappingHMSHandler(IHMSHandler baseHandler) { this.baseHandler = baseHandler; }
Example #16
Source File: ExceptionWrappingHMSHandler.java From waggle-dance with Apache License 2.0 | 4 votes |
public static IHMSHandler newProxyInstance(IHMSHandler baseHandler) { return (IHMSHandler) Proxy.newProxyInstance(ExceptionWrappingHMSHandler.class.getClassLoader(), new Class[] { IHMSHandler.class }, new ExceptionWrappingHMSHandler(baseHandler)); }
Example #17
Source File: AtlasHiveHookContext.java From atlas with Apache License 2.0 | 4 votes |
public IHMSHandler getMetastoreHandler() { return metastoreHandler; }