Java Code Examples for org.apache.hadoop.util.VersionInfo#getVersion()
The following examples show how to use
org.apache.hadoop.util.VersionInfo#getVersion() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HadoopRecoverableFsDataOutputStream.java From flink with Apache License 2.0 | 6 votes |
private static boolean truncate(final FileSystem hadoopFs, final Path file, final long length) throws IOException { if (!HadoopUtils.isMinHadoopVersion(2, 7)) { throw new IllegalStateException("Truncation is not available in hadoop version < 2.7 , You are on Hadoop " + VersionInfo.getVersion()); } if (truncateHandle != null) { try { return (Boolean) truncateHandle.invoke(hadoopFs, file, length); } catch (InvocationTargetException e) { ExceptionUtils.rethrowIOException(e.getTargetException()); } catch (Throwable t) { throw new IOException( "Truncation of file failed because of access/linking problems with Hadoop's truncate call. " + "This is most likely a dependency conflict or class loading problem."); } } else { throw new IllegalStateException("Truncation handle has not been initialized"); } return false; }
Example 2
Source File: NodeInfo.java From big-c with Apache License 2.0 | 6 votes |
public NodeInfo(final Context context, final ResourceView resourceView) { this.id = context.getNodeId().toString(); this.nodeHostName = context.getNodeId().getHost(); this.totalVmemAllocatedContainersMB = resourceView .getVmemAllocatedForContainers() / BYTES_IN_MB; this.vmemCheckEnabled = resourceView.isVmemCheckEnabled(); this.totalPmemAllocatedContainersMB = resourceView .getPmemAllocatedForContainers() / BYTES_IN_MB; this.pmemCheckEnabled = resourceView.isPmemCheckEnabled(); this.totalVCoresAllocatedContainers = resourceView .getVCoresAllocatedForContainers(); this.nodeHealthy = context.getNodeHealthStatus().getIsNodeHealthy(); this.lastNodeUpdateTime = context.getNodeHealthStatus() .getLastHealthReportTime(); this.healthReport = context.getNodeHealthStatus().getHealthReport(); this.nodeManagerVersion = YarnVersionInfo.getVersion(); this.nodeManagerBuildVersion = YarnVersionInfo.getBuildVersion(); this.nodeManagerVersionBuiltOn = YarnVersionInfo.getDate(); this.hadoopVersion = VersionInfo.getVersion(); this.hadoopBuildVersion = VersionInfo.getBuildVersion(); this.hadoopVersionBuiltOn = VersionInfo.getDate(); }
Example 3
Source File: ShimLoader.java From dremio-oss with Apache License 2.0 | 6 votes |
/** * Return the "major" version of Hadoop currently on the classpath. * Releases in the 1.x and 2.x series are mapped to the appropriate * 0.x release series, e.g. 1.x is mapped to "0.20S" and 2.x * is mapped to "0.23". */ public static String getMajorVersion() { String vers = VersionInfo.getVersion(); String[] parts = vers.split("\\."); if (parts.length < 2) { throw new RuntimeException("Illegal Hadoop Version: " + vers + " (expected A.B.* format)"); } switch (Integer.parseInt(parts[0])) { case 2: case 3: return HADOOP23VERSIONNAME; default: throw new IllegalArgumentException("Unrecognized Hadoop major version number: " + vers); } }
Example 4
Source File: NNThroughputBenchmark.java From big-c with Apache License 2.0 | 6 votes |
void register() throws IOException { // get versions from the namenode nsInfo = nameNodeProto.versionRequest(); dnRegistration = new DatanodeRegistration( new DatanodeID(DNS.getDefaultIP("default"), DNS.getDefaultHost("default", "default"), DataNode.generateUuid(), getNodePort(dnIdx), DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT), new DataStorage(nsInfo), new ExportedBlockKeys(), VersionInfo.getVersion()); // register datanode dnRegistration = nameNodeProto.registerDatanode(dnRegistration); //first block reports storage = new DatanodeStorage(DatanodeStorage.generateUuid()); final StorageBlockReport[] reports = { new StorageBlockReport(storage, BlockListAsLongs.EMPTY) }; nameNodeProto.blockReport(dnRegistration, nameNode.getNamesystem().getBlockPoolId(), reports, new BlockReportContext(1, 0, System.nanoTime())); }
Example 5
Source File: ClusterInfo.java From hadoop with Apache License 2.0 | 6 votes |
public ClusterInfo(ResourceManager rm) { long ts = ResourceManager.getClusterTimeStamp(); this.id = ts; this.state = rm.getServiceState(); this.haState = rm.getRMContext().getHAServiceState(); this.rmStateStoreName = rm.getRMContext().getStateStore().getClass() .getName(); this.startedOn = ts; this.resourceManagerVersion = YarnVersionInfo.getVersion(); this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion(); this.resourceManagerVersionBuiltOn = YarnVersionInfo.getDate(); this.hadoopVersion = VersionInfo.getVersion(); this.hadoopBuildVersion = VersionInfo.getBuildVersion(); this.hadoopVersionBuiltOn = VersionInfo.getDate(); }
Example 6
Source File: NNThroughputBenchmark.java From hadoop with Apache License 2.0 | 6 votes |
void register() throws IOException { // get versions from the namenode nsInfo = nameNodeProto.versionRequest(); dnRegistration = new DatanodeRegistration( new DatanodeID(DNS.getDefaultIP("default"), DNS.getDefaultHost("default", "default"), DataNode.generateUuid(), getNodePort(dnIdx), DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT), new DataStorage(nsInfo), new ExportedBlockKeys(), VersionInfo.getVersion()); // register datanode dnRegistration = nameNodeProto.registerDatanode(dnRegistration); //first block reports storage = new DatanodeStorage(DatanodeStorage.generateUuid()); final StorageBlockReport[] reports = { new StorageBlockReport(storage, BlockListAsLongs.EMPTY) }; nameNodeProto.blockReport(dnRegistration, nameNode.getNamesystem().getBlockPoolId(), reports, new BlockReportContext(1, 0, System.nanoTime())); }
Example 7
Source File: BPServiceActor.java From hadoop with Apache License 2.0 | 6 votes |
private void checkNNVersion(NamespaceInfo nsInfo) throws IncorrectVersionException { // build and layout versions should match String nnVersion = nsInfo.getSoftwareVersion(); String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion(); if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) { IncorrectVersionException ive = new IncorrectVersionException( minimumNameNodeVersion, nnVersion, "NameNode", "DataNode"); LOG.warn(ive.getMessage()); throw ive; } String dnVersion = VersionInfo.getVersion(); if (!nnVersion.equals(dnVersion)) { LOG.info("Reported NameNode version '" + nnVersion + "' does not match " + "DataNode version '" + dnVersion + "' but is within acceptable " + "limits. Note: This is normal during a rolling upgrade."); } }
Example 8
Source File: CommonHadoopShim.java From pentaho-hadoop-shims with Apache License 2.0 | 5 votes |
@Override public String getHadoopVersion() { ClassLoader originalClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader( getClass().getClassLoader() ); return VersionInfo.getVersion(); } finally { Thread.currentThread().setContextClassLoader( originalClassLoader ); } }
Example 9
Source File: MiniCluster.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
public boolean useYarn() { String version = VersionInfo.getVersion(); if (version.startsWith("0.20.") || version.startsWith("1.")) { return false; } // Check for mr1 hadoop2 if (isMr1Hadoop2()) { return false; } return true; }
Example 10
Source File: HadoopUtils.java From flink with Apache License 2.0 | 5 votes |
private static Tuple2<Integer, Integer> getMajorMinorBundledHadoopVersion() { String versionString = VersionInfo.getVersion(); String[] versionParts = versionString.split("\\."); if (versionParts.length < 2) { throw new FlinkRuntimeException( "Cannot determine version of Hadoop, unexpected version string: " + versionString); } int maj = Integer.parseInt(versionParts[0]); int min = Integer.parseInt(versionParts[1]); return Tuple2.of(maj, min); }
Example 11
Source File: HadoopLocalFileSystemBehaviorTest.java From flink with Apache License 2.0 | 5 votes |
/** * This test needs to be skipped for earlier Hadoop versions because those * have a bug. */ @Override public void testMkdirsFailsForExistingFile() throws Exception { final String versionString = VersionInfo.getVersion(); final String prefix = versionString.substring(0, 3); final float version = Float.parseFloat(prefix); Assume.assumeTrue("Cannot execute this test on Hadoop prior to 2.8", version >= 2.8f); super.testMkdirsFailsForExistingFile(); }
Example 12
Source File: HadoopUtils.java From flink with Apache License 2.0 | 5 votes |
/** * Checks if the Hadoop dependency is at least of the given version. */ public static boolean isMinHadoopVersion(int major, int minor) throws FlinkRuntimeException { String versionString = VersionInfo.getVersion(); String[] versionParts = versionString.split("\\."); if (versionParts.length < 2) { throw new FlinkRuntimeException( "Cannot determine version of Hadoop, unexpected version string: " + versionString); } int maj = Integer.parseInt(versionParts[0]); int min = Integer.parseInt(versionParts[1]); return maj > major || (maj == major && min >= minor); }
Example 13
Source File: HadoopLocalFileSystemBehaviorTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * This test needs to be skipped for earlier Hadoop versions because those * have a bug. */ @Override public void testMkdirsFailsForExistingFile() throws Exception { final String versionString = VersionInfo.getVersion(); final String prefix = versionString.substring(0, 3); final float version = Float.parseFloat(prefix); Assume.assumeTrue("Cannot execute this test on Hadoop prior to 2.8", version >= 2.8f); super.testMkdirsFailsForExistingFile(); }
Example 14
Source File: HadoopUtils.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Checks if the Hadoop dependency is at least of the given version. */ public static boolean isMinHadoopVersion(int major, int minor) throws FlinkRuntimeException { String versionString = VersionInfo.getVersion(); String[] versionParts = versionString.split("\\."); if (versionParts.length < 2) { throw new FlinkRuntimeException( "Cannot determine version of Hadoop, unexpected version string: " + versionString); } int maj = Integer.parseInt(versionParts[0]); int min = Integer.parseInt(versionParts[1]); return maj > major || (maj == major && min >= minor); }
Example 15
Source File: DataNode.java From RDFS with Apache License 2.0 | 4 votes |
@Override // DataNodeMXBean public String getVersion() { return VersionInfo.getVersion(); }
Example 16
Source File: SecondaryNameNode.java From hadoop with Apache License 2.0 | 4 votes |
@Override // VersionInfoMXBean public String getSoftwareVersion() { return VersionInfo.getVersion(); }
Example 17
Source File: DataNode.java From hadoop with Apache License 2.0 | 4 votes |
@Override // DataNodeMXBean public String getVersion() { return VersionInfo.getVersion(); }
Example 18
Source File: DataNode.java From hadoop with Apache License 2.0 | 4 votes |
@Override //ClientDatanodeProtocol public DatanodeLocalInfo getDatanodeInfo() { long uptime = ManagementFactory.getRuntimeMXBean().getUptime()/1000; return new DatanodeLocalInfo(VersionInfo.getVersion(), confVersion, uptime); }
Example 19
Source File: DFSTestUtil.java From hadoop with Apache License 2.0 | 4 votes |
public static DatanodeRegistration getLocalDatanodeRegistration() { return new DatanodeRegistration(getLocalDatanodeID(), new StorageInfo( NodeType.DATA_NODE), new ExportedBlockKeys(), VersionInfo.getVersion()); }
Example 20
Source File: SecondaryNameNode.java From big-c with Apache License 2.0 | 4 votes |
@Override // VersionInfoMXBean public String getSoftwareVersion() { return VersionInfo.getVersion(); }