org.apache.hadoop.util.VersionUtil Java Examples
The following examples show how to use
org.apache.hadoop.util.VersionUtil.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BPServiceActor.java From hadoop with Apache License 2.0 | 6 votes |
private void checkNNVersion(NamespaceInfo nsInfo) throws IncorrectVersionException { // build and layout versions should match String nnVersion = nsInfo.getSoftwareVersion(); String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion(); if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) { IncorrectVersionException ive = new IncorrectVersionException( minimumNameNodeVersion, nnVersion, "NameNode", "DataNode"); LOG.warn(ive.getMessage()); throw ive; } String dnVersion = VersionInfo.getVersion(); if (!nnVersion.equals(dnVersion)) { LOG.info("Reported NameNode version '" + nnVersion + "' does not match " + "DataNode version '" + dnVersion + "' but is within acceptable " + "limits. Note: This is normal during a rolling upgrade."); } }
Example #2
Source File: BPServiceActor.java From big-c with Apache License 2.0 | 6 votes |
private void checkNNVersion(NamespaceInfo nsInfo) throws IncorrectVersionException { // build and layout versions should match String nnVersion = nsInfo.getSoftwareVersion(); String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion(); if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) { IncorrectVersionException ive = new IncorrectVersionException( minimumNameNodeVersion, nnVersion, "NameNode", "DataNode"); LOG.warn(ive.getMessage()); throw ive; } String dnVersion = VersionInfo.getVersion(); if (!nnVersion.equals(dnVersion)) { LOG.info("Reported NameNode version '" + nnVersion + "' does not match " + "DataNode version '" + dnVersion + "' but is within acceptable " + "limits. Note: This is normal during a rolling upgrade."); } }
Example #3
Source File: HBaseTestingClusterAutoStarter.java From flink with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUp() throws Exception { // HBase 1.4 does not work with Hadoop 3 // because it uses Guava 12.0.1, Hadoop 3 uses Guava 27.0-jre. // There is no Guava version in between that works with both. Assume.assumeTrue("This test is skipped for Hadoop versions above 3", VersionUtil.compareVersions(System.getProperty("hadoop.version"), "3.0.0") < 0); LOG.info("HBase minicluster: Starting"); TEST_UTIL.startMiniCluster(1); // https://issues.apache.org/jira/browse/HBASE-11711 TEST_UTIL.getConfiguration().setInt("hbase.master.info.port", -1); // Make sure the zookeeper quorum value contains the right port number (varies per run). LOG.info("Hbase minicluster client port: " + TEST_UTIL.getZkCluster().getClientPort()); TEST_UTIL.getConfiguration().set("hbase.zookeeper.quorum", "localhost:" + TEST_UTIL.getZkCluster().getClientPort()); conf = initialize(TEST_UTIL.getConfiguration()); LOG.info("HBase minicluster: Running"); }
Example #4
Source File: YarnFileStageTestS3ITCase.java From flink with Apache License 2.0 | 6 votes |
@Test @RetryOnFailure(times = 3) public void testRecursiveUploadForYarnS3n() throws Exception { // skip test on Hadoop 3: https://issues.apache.org/jira/browse/HADOOP-14738 Assume.assumeTrue("This test is skipped for Hadoop versions above 3", VersionUtil.compareVersions(System.getProperty("hadoop.version"), "3.0.0") < 0); try { Class.forName("org.apache.hadoop.fs.s3native.NativeS3FileSystem"); } catch (ClassNotFoundException e) { // not in the classpath, cannot run this test String msg = "Skipping test because NativeS3FileSystem is not in the class path"; log.info(msg); assumeNoException(msg, e); } testRecursiveUploadForYarn("s3n", "testYarn-s3n"); }
Example #5
Source File: SparkShims.java From zeppelin with Apache License 2.0 | 5 votes |
/** * This is temporal patch for support old versions of Yarn which is not adopted YARN-6615 * * @return true if YARN-6615 is patched, false otherwise */ protected boolean supportYarn6615(String version) { return (VersionUtil.compareVersions(HADOOP_VERSION_2_6_6, version) <= 0 && VersionUtil.compareVersions(HADOOP_VERSION_2_7_0, version) > 0) || (VersionUtil.compareVersions(HADOOP_VERSION_2_7_4, version) <= 0 && VersionUtil.compareVersions(HADOOP_VERSION_2_8_0, version) > 0) || (VersionUtil.compareVersions(HADOOP_VERSION_2_8_2, version) <= 0 && VersionUtil.compareVersions(HADOOP_VERSION_2_9_0, version) > 0) || (VersionUtil.compareVersions(HADOOP_VERSION_2_9_0, version) <= 0 && VersionUtil.compareVersions(HADOOP_VERSION_3_0_0, version) > 0) || (VersionUtil.compareVersions(HADOOP_VERSION_3_0_0_ALPHA4, version) <= 0) || (VersionUtil.compareVersions(HADOOP_VERSION_3_0_0, version) <= 0); }
Example #6
Source File: MiniAccumuloClusterFactory.java From geowave with Apache License 2.0 | 4 votes |
protected static boolean isYarn() { return VersionUtil.compareVersions(VersionInfo.getVersion(), "2.2.0") >= 0; }
Example #7
Source File: AccumuloMiniCluster.java From geowave with Apache License 2.0 | 4 votes |
protected static boolean isYarn() { return VersionUtil.compareVersions(VersionInfo.getVersion(), "2.2.0") >= 0; }
Example #8
Source File: TestUtils.java From geowave with Apache License 2.0 | 4 votes |
public static boolean isYarn() { return VersionUtil.compareVersions(VersionInfo.getVersion(), "2.2.0") >= 0; }
Example #9
Source File: ParquetReaderUtility.java From Bats with Apache License 2.0 | 2 votes |
/** * If binary metadata was stored prior to Drill version {@link #ALLOWED_DRILL_VERSION_FOR_BINARY}, * it might have incorrectly defined min / max values. * In case if given version is null, we assume this version is prior to {@link #ALLOWED_DRILL_VERSION_FOR_BINARY}. * In this case we allow reading such metadata only if {@link ParquetReaderConfig#enableStringsSignedMinMax()} is true. * * @param drillVersion drill version used to create metadata file * @param readerConfig parquet reader configuration * @return true if reading binary min / max values are allowed, false otherwise */ private static boolean allowBinaryMetadata(String drillVersion, ParquetReaderConfig readerConfig) { return readerConfig.enableStringsSignedMinMax() || (drillVersion != null && VersionUtil.compareVersions(ALLOWED_DRILL_VERSION_FOR_BINARY, drillVersion) <= 0); }