org.apache.hadoop.hive.shims.ShimLoader Java Examples
The following examples show how to use
org.apache.hadoop.hive.shims.ShimLoader.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MetaStoreProxyServer.java From waggle-dance with Apache License 2.0 | 6 votes |
private TServerSocket createServerSocket(boolean useSSL, int port) throws IOException, TTransportException { TServerSocket serverSocket = null; // enable SSL support for HMS List<String> sslVersionBlacklist = new ArrayList<>(Arrays.asList(hiveConf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(","))); if (!useSSL) { serverSocket = HiveAuthUtils.getServerSocket(null, port); } else { String keyStorePath = hiveConf.getVar(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH).trim(); if (keyStorePath.isEmpty()) { throw new IllegalArgumentException( ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname + " Not configured for SSL connection"); } String keyStorePassword = ShimLoader .getHadoopShims() .getPassword(hiveConf, HiveConf.ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname); serverSocket = HiveAuthUtils.getServerSSLSocket(null, port, keyStorePath, keyStorePassword, sslVersionBlacklist); } return serverSocket; }
Example #2
Source File: SpliceOrcUtils.java From spliceengine with GNU Affero General Public License v3.0 | 6 votes |
public static List<InputSplit> getSplits(JobContext jobContext) throws IOException, InterruptedException { List<OrcSplit> splits = OrcInputFormat.generateSplitsInfo(ShimLoader.getHadoopShims() .getConfiguration(jobContext)); List<InputSplit> result = new ArrayList<InputSplit>(splits.size()); // Filter Out Splits based on paths... for(OrcSplit split: splits) { result.add(new OrcNewSplit(split)); } return result; }
Example #3
Source File: SqoopHCatUtilities.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 5 votes |
public void invokeOutputCommitterForLocalMode(Job job) throws IOException { if (ConfigurationHelper.isLocalJobTracker(job.getConfiguration()) && isHadoop1()) { // HCatalog 0.11- do have special class HCatHadoopShims, however this // class got merged into Hive Shim layer in 0.12+. Following method will // try to find correct implementation via reflection. HadoopShims shims = ShimLoader.getHadoopShims(); HCatHadoopShims hcatShims = shims.getHCatShim(); try { hcatShims.commitJob(new HCatOutputFormat(), job); } catch (Exception e) { throw new RuntimeException("Can't explicitly commit job", e); } } }
Example #4
Source File: MetaStoreProxyServer.java From waggle-dance with Apache License 2.0 | 5 votes |
@Override public void run(ApplicationArguments args) throws Exception { if (isRunning()) { throw new RuntimeException("Can't run more than one instance"); } final boolean isCliVerbose = waggleDanceConfiguration.isVerbose(); try { String msg = "Starting WaggleDance on port " + waggleDanceConfiguration.getPort(); LOG.info(msg); if (waggleDanceConfiguration.isVerbose()) { System.err.println(msg); } // Add shutdown hook. Runtime.getRuntime().addShutdownHook(new Thread(() -> { String shutdownMsg = "Shutting down WaggleDance."; LOG.info(shutdownMsg); if (isCliVerbose) { System.err.println(shutdownMsg); } })); AtomicBoolean startedServing = new AtomicBoolean(); startWaggleDance(ShimLoader.getHadoopThriftAuthBridge(), startLock, startCondition, startedServing); } catch (Throwable t) { // Catch the exception, log it and rethrow it. LOG.error("WaggleDance Thrift Server threw an exception...", t); throw new Exception(t); } }
Example #5
Source File: HoodieCombineHiveInputFormat.java From hudi with Apache License 2.0 | 5 votes |
/** * Create a generic Hive RecordReader than can iterate over all chunks in a CombinedFileSplit. */ @Override public RecordReader getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException { if (!(split instanceof CombineHiveInputSplit)) { return super.getRecordReader(split, job, reporter); } CombineHiveInputSplit hsplit = (CombineHiveInputSplit) split; String inputFormatClassName = null; Class<?> inputFormatClass; try { inputFormatClassName = hsplit.inputFormatClassName(); inputFormatClass = job.getClassByName(inputFormatClassName); } catch (Exception e) { throw new IOException("cannot find class " + inputFormatClassName); } pushProjectionsAndFilters(job, inputFormatClass, hsplit.getPath(0)); if (inputFormatClass.getName().equals(HoodieParquetInputFormat.class.getName())) { return ShimLoader.getHadoopShims().getCombineFileInputFormat().getRecordReader(job, (CombineFileSplit) split, reporter, CombineHiveRecordReader.class); } else if (inputFormatClass.getName().equals(HoodieParquetRealtimeInputFormat.class.getName())) { HoodieCombineFileInputFormatShim shims = new HoodieCombineFileInputFormatShim(); IOContextMap.get(job).setInputPath(((CombineHiveInputSplit) split).getPath(0)); return shims.getRecordReader(job, ((CombineHiveInputSplit) split).getInputSplitShim(), reporter, CombineHiveRecordReader.class); } else { throw new HoodieException("Unexpected input format : " + inputFormatClassName); } }
Example #6
Source File: InternalMetastoreServer.java From incubator-sentry with Apache License 2.0 | 5 votes |
private void startMetastore() throws Exception { Callable<Void> metastoreService = new Callable<Void>() { public Void call() throws Exception { try { HiveMetaStore.startMetaStore(getMetastorePort(conf), ShimLoader.getHadoopThriftAuthBridge(), conf); } catch (Throwable e) { throw new Exception("Error starting metastore", e); } return null; } }; metaStoreExecutor.submit(metastoreService); }
Example #7
Source File: SpliceOrcUtils.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
public static List<InputSplit> getSplits(JobContext jobContext) throws IOException, InterruptedException { Configuration conf = ShimLoader.getHadoopShims().getConfiguration(jobContext); List<OrcSplit> splits = OrcInputFormat.generateSplitsInfo(conf, createContext(conf, -1)); List<InputSplit> result = new ArrayList<InputSplit>(splits.size()); for(OrcSplit split: splits) { result.add(new OrcNewSplit(split)); } return result; }
Example #8
Source File: HoodieCombineHiveInputFormat.java From hudi with Apache License 2.0 | 4 votes |
public CombineHiveInputSplit() throws IOException { this(ShimLoader.getHadoopShims().getCombineFileInputFormat().getInputSplitShim()); }
Example #9
Source File: HoodieCombineRealtimeHiveSplit.java From hudi with Apache License 2.0 | 4 votes |
public HoodieCombineRealtimeHiveSplit() throws IOException { super(ShimLoader.getHadoopShims().getCombineFileInputFormat().getInputSplitShim()); }
Example #10
Source File: Base64TextInputFormat.java From bigdata-tutorial with Apache License 2.0 | 4 votes |
public void validateInput(JobConf job) throws IOException { ShimLoader.getHadoopShims().inputFormatValidateInput(format, job); }
Example #11
Source File: MapredParquetOutputFormat.java From parquet-mr with Apache License 2.0 | 4 votes |
@Override public void checkOutputSpecs(final FileSystem ignored, final JobConf job) throws IOException { realOutputFormat.checkOutputSpecs(ShimLoader.getHadoopShims().getHCatShim().createJobContext(job, null)); }
Example #12
Source File: BaseTestHiveImpersonation.java From dremio-oss with Apache License 2.0 | 3 votes |
protected static void startHiveMetaStore() throws Exception { final int port = MetaStoreUtils.findFreePort(); hiveConf.set(METASTOREURIS.varname, "thrift://localhost:" + port); MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), hiveConf); }