org.apache.hadoop.hive.metastore.HiveMetaStore Java Examples
The following examples show how to use
org.apache.hadoop.hive.metastore.HiveMetaStore.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HiveTableBaseTest.java From iceberg with Apache License 2.0 | 6 votes |
private TServer thriftServer() throws IOException, TTransportException, MetaException, InvocationTargetException, NoSuchMethodException, IllegalAccessException, NoSuchFieldException { final TServerSocketKeepAlive socket = new TServerSocketKeepAlive(new TServerSocket(0)); this.hiveConf = hiveConf(new Configuration(), socket.getServerSocket().getLocalPort()); HiveMetaStore.HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", hiveConf); IHMSHandler handler = RetryingHMSHandler.getProxy(hiveConf, baseHandler, true); final TTransportFactory transportFactory = new TTransportFactory(); final TSetIpAddressProcessor<IHMSHandler> processor = new TSetIpAddressProcessor<>(handler); TThreadPoolServer.Args args = new TThreadPoolServer.Args(socket) .processor(processor) .transportFactory(transportFactory) .protocolFactory(new TBinaryProtocol.Factory()) .minWorkerThreads(3) .maxWorkerThreads(5); return new TThreadPoolServer(args); }
Example #2
Source File: TestHiveMetastore.java From iceberg with Apache License 2.0 | 5 votes |
private TServer newThriftServer(TServerSocket socket, HiveConf conf) throws Exception { HiveConf serverConf = new HiveConf(conf); serverConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:" + getDerbyPath() + ";create=true"); HiveMetaStore.HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", serverConf); IHMSHandler handler = RetryingHMSHandler.getProxy(serverConf, baseHandler, false); TThreadPoolServer.Args args = new TThreadPoolServer.Args(socket) .processor(new TSetIpAddressProcessor<>(handler)) .transportFactory(new TTransportFactory()) .protocolFactory(new TBinaryProtocol.Factory()) .minWorkerThreads(3) .maxWorkerThreads(5); return new TThreadPoolServer(args); }
Example #3
Source File: ThriftHiveMetaStoreCore.java From beeju with Apache License 2.0 | 5 votes |
public void initialise() throws Exception { thriftPort = -1; final Lock startLock = new ReentrantLock(); final Condition startCondition = startLock.newCondition(); final AtomicBoolean startedServing = new AtomicBoolean(); try (ServerSocket socket = new ServerSocket(0)) { thriftPort = socket.getLocalPort(); } beejuCore.setHiveVar(HiveConf.ConfVars.METASTOREURIS, getThriftConnectionUri()); final HiveConf hiveConf = new HiveConf(beejuCore.conf(), HiveMetaStoreClient.class); thriftServer.execute(new Runnable() { @Override public void run() { try { HadoopThriftAuthBridge bridge = new HadoopThriftAuthBridge23(); HiveMetaStore.startMetaStore(thriftPort, bridge, hiveConf, startLock, startCondition, startedServing); } catch (Throwable e) { LOG.error("Unable to start a Thrift server for Hive Metastore", e); } } }); int i = 0; while (i++ < 3) { startLock.lock(); try { if (startCondition.await(1, TimeUnit.MINUTES)) { break; } } finally { startLock.unlock(); } if (i == 3) { throw new RuntimeException("Maximum number of tries reached whilst waiting for Thrift server to be ready"); } } }
Example #4
Source File: HiveLocalMetaStore.java From hadoop-mini-clusters with Apache License 2.0 | 5 votes |
@Override public void run() { try { HiveMetaStore.startMetaStore(hiveMetastorePort, new HadoopThriftAuthBridge(), hiveConf); } catch (Throwable t) { t.printStackTrace(); } }
Example #5
Source File: InternalMetastoreServer.java From incubator-sentry with Apache License 2.0 | 5 votes |
private void startMetastore() throws Exception { Callable<Void> metastoreService = new Callable<Void>() { public Void call() throws Exception { try { HiveMetaStore.startMetaStore(getMetastorePort(conf), ShimLoader.getHadoopThriftAuthBridge(), conf); } catch (Throwable e) { throw new Exception("Error starting metastore", e); } return null; } }; metaStoreExecutor.submit(metastoreService); }
Example #6
Source File: EmbeddedHiveServer.java From incubator-sentry with Apache License 2.0 | 5 votes |
@Override public void start() { // Fix for ACCESS-148. Resets a static field // so the default database is created even // though is has been created before in this JVM Reflection.staticField("createDefaultDB") .ofType(boolean.class) .in(HiveMetaStore.HMSHandler.class) .set(false); }
Example #7
Source File: HiveTestService.java From hudi with Apache License 2.0 | 4 votes |
public TServer startMetaStore(String forceBindIP, int port, HiveConf conf) throws IOException { try { // Server will create new threads up to max as necessary. After an idle // period, it will destory threads to keep the number of threads in the // pool to min. int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS); int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS); boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE); boolean useFramedTransport = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT); // don't support SASL yet // boolean useSasl = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL); TServerTransport serverTransport; if (forceBindIP != null) { InetSocketAddress address = new InetSocketAddress(forceBindIP, port); serverTransport = tcpKeepAlive ? new TServerSocketKeepAlive(address) : new TServerSocket(address); } else { serverTransport = tcpKeepAlive ? new TServerSocketKeepAlive(port) : new TServerSocket(port); } TProcessor processor; TTransportFactory transFactory; HiveMetaStore.HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", conf, false); IHMSHandler handler = RetryingHMSHandler.getProxy(conf, baseHandler, true); if (conf.getBoolVar(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI)) { transFactory = useFramedTransport ? new ChainedTTransportFactory(new TFramedTransport.Factory(), new TUGIContainingTransport.Factory()) : new TUGIContainingTransport.Factory(); processor = new TUGIBasedProcessor<>(handler); LOG.info("Starting DB backed MetaStore Server with SetUGI enabled"); } else { transFactory = useFramedTransport ? new TFramedTransport.Factory() : new TTransportFactory(); processor = new TSetIpAddressProcessor<>(handler); LOG.info("Starting DB backed MetaStore Server"); } TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverTransport).processor(processor) .transportFactory(transFactory).protocolFactory(new TBinaryProtocol.Factory()) .minWorkerThreads(minWorkerThreads).maxWorkerThreads(maxWorkerThreads); final TServer tServer = new TThreadPoolServer(args); executorService.submit(tServer::serve); return tServer; } catch (Throwable x) { throw new IOException(x); } }
Example #8
Source File: HiveService.java From kite with Apache License 2.0 | 4 votes |
public TServer startMetaStore(String forceBindIP, int port, HiveConf conf) throws IOException { try { // Server will create new threads up to max as necessary. After an idle // period, it will destory threads to keep the number of threads in the // pool to min. int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS); int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS); boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE); boolean useFramedTransport = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT); // don't support SASL yet //boolean useSasl = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL); TServerTransport serverTransport; if (forceBindIP != null) { InetSocketAddress address = new InetSocketAddress(forceBindIP, port); serverTransport = tcpKeepAlive ? new TServerSocketKeepAlive(address) : new TServerSocket(address); } else { serverTransport = tcpKeepAlive ? new TServerSocketKeepAlive(port) : new TServerSocket(port); } TProcessor processor; TTransportFactory transFactory; IHMSHandler handler = (IHMSHandler) HiveMetaStore .newRetryingHMSHandler("new db based metaserver", conf, true); if (conf.getBoolVar(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI)) { transFactory = useFramedTransport ? new ChainedTTransportFactory(new TFramedTransport.Factory(), new TUGIContainingTransport.Factory()) : new TUGIContainingTransport.Factory(); processor = new TUGIBasedProcessor<IHMSHandler>(handler); LOG.info("Starting DB backed MetaStore Server with SetUGI enabled"); } else { transFactory = useFramedTransport ? new TFramedTransport.Factory() : new TTransportFactory(); processor = new TSetIpAddressProcessor<IHMSHandler>(handler); LOG.info("Starting DB backed MetaStore Server"); } TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverTransport) .processor(processor) .transportFactory(transFactory) .protocolFactory(new TBinaryProtocol.Factory()) .minWorkerThreads(minWorkerThreads) .maxWorkerThreads(maxWorkerThreads); final TServer tServer = new TThreadPoolServer(args); executorService.submit(new Runnable() { @Override public void run() { tServer.serve(); } }); return tServer; } catch (Throwable x) { throw new IOException(x); } }
Example #9
Source File: TransformTask.java From kite with Apache License 2.0 | 4 votes |
public PipelineResult run() throws IOException { boolean isLocal = (isLocal(from.getDataset()) || isLocal(to.getDataset())); if (isLocal) { // copy to avoid making changes to the caller's configuration Configuration conf = new Configuration(getConf()); conf.set("mapreduce.framework.name", "local"); setConf(conf); } if (isHive(from) || isHive(to)) { setConf(addHiveDelegationToken(getConf())); // add jars needed for metastore interaction to the classpath if (!isLocal) { Class<?> fb303Class, thriftClass; try { // attempt to use libfb303 and libthrift 0.9.2 when async was added fb303Class = Class.forName( "com.facebook.fb303.FacebookService.AsyncProcessor"); thriftClass = Class.forName( "org.apache.thrift.TBaseAsyncProcessor"); } catch (ClassNotFoundException e) { try { // fallback to 0.9.0 or earlier fb303Class = Class.forName( "com.facebook.fb303.FacebookBase"); thriftClass = Class.forName( "org.apache.thrift.TBase"); } catch (ClassNotFoundException real) { throw new DatasetOperationException( "Cannot find thrift dependencies", real); } } TaskUtil.configure(getConf()) .addJarForClass(Encoder.class) // commons-codec .addJarForClass(Log.class) // commons-logging .addJarForClass(CompressorInputStream.class) // commons-compress .addJarForClass(ApiAdapter.class) // datanucleus-core .addJarForClass(JDOAdapter.class) // datanucleus-api-jdo .addJarForClass(SQLQuery.class) // datanucleus-rdbms .addJarForClass(JDOHelper.class) // jdo-api .addJarForClass(Transaction.class) // jta .addJarForClass(fb303Class) // libfb303 .addJarForClass(thriftClass) // libthrift .addJarForClass(HiveMetaStore.class) // hive-metastore .addJarForClass(HiveConf.class); // hive-exec } } PType<T> toPType = ptype(to); MapFn<T, T> validate = new CheckEntityClass<T>(to.getType()); Pipeline pipeline = new MRPipeline(getClass(), getConf()); PCollection<T> collection = pipeline.read(CrunchDatasets.asSource(from)) .parallelDo(transform, toPType).parallelDo(validate, toPType); if (compact) { // the transform must be run before partitioning collection = CrunchDatasets.partition(collection, to, numWriters, numPartitionWriters); } pipeline.write(collection, CrunchDatasets.asTarget(to), mode); PipelineResult result = pipeline.done(); StageResult sr = Iterables.getFirst(result.getStageResults(), null); if (sr != null && MAP_INPUT_RECORDS != null) { this.count = sr.getCounterValue(MAP_INPUT_RECORDS); } return result; }
Example #10
Source File: MetacatHMSHandler.java From metacat with Apache License 2.0 | 2 votes |
/** * Constructor. * * @param name client name * @throws MetaException exception */ public MetacatHMSHandler(final String name) throws MetaException { this(name, new HiveConf(HiveMetaStore.HMSHandler.class)); }