Java Code Examples for org.apache.hadoop.util.StringUtils#hexStringToByte()
The following examples show how to use
org.apache.hadoop.util.StringUtils#hexStringToByte() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DistributedFileSystemMetadata.java From hdfs-metadata with GNU General Public License v3.0 | 6 votes |
/** * Returns a disk id (0-based) index from the Hdfs VolumeId object. There is * currently no public API to get at the volume id. We'll have to get it by * accessing the internals. */ public static int getDiskId(VolumeId hdfsVolumeId){ // Initialize the diskId as -1 to indicate it is unknown int diskId = -1; if (hdfsVolumeId != null) { String volumeIdString = hdfsVolumeId.toString(); byte[] volumeIdBytes = StringUtils.hexStringToByte(volumeIdString); if (volumeIdBytes != null && volumeIdBytes.length == 4) { diskId = Utils.toInt(volumeIdBytes); }else if (volumeIdBytes.length == 1) { diskId = (int) volumeIdBytes[0]; // support hadoop-2.0.2 } } return diskId; }
Example 2
Source File: HttpFSFileSystem.java From hadoop with Apache License 2.0 | 5 votes |
@Override public FileChecksum getFileChecksum(Path f) throws IOException { Map<String, String> params = new HashMap<String, String>(); params.put(OP_PARAM, Operation.GETFILECHECKSUM.toString()); HttpURLConnection conn = getConnection(Operation.GETFILECHECKSUM.getMethod(), params, f, true); HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); final JSONObject json = (JSONObject) ((JSONObject) HttpFSUtils.jsonParse(conn)).get(FILE_CHECKSUM_JSON); return new FileChecksum() { @Override public String getAlgorithmName() { return (String) json.get(CHECKSUM_ALGORITHM_JSON); } @Override public int getLength() { return ((Long) json.get(CHECKSUM_LENGTH_JSON)).intValue(); } @Override public byte[] getBytes() { return StringUtils.hexStringToByte((String) json.get(CHECKSUM_BYTES_JSON)); } @Override public void write(DataOutput out) throws IOException { throw new UnsupportedOperationException(); } @Override public void readFields(DataInput in) throws IOException { throw new UnsupportedOperationException(); } }; }
Example 3
Source File: TestIPC.java From hadoop with Apache License 2.0 | 5 votes |
/** * Convert a string of lines that look like: * "68 72 70 63 02 00 00 00 82 00 1d 6f 72 67 2e 61 hrpc.... ...org.a" * .. into an array of bytes. */ private static byte[] hexDumpToBytes(String hexdump) { final int LAST_HEX_COL = 3 * 16; StringBuilder hexString = new StringBuilder(); for (String line : StringUtils.toUpperCase(hexdump).split("\n")) { hexString.append(line.substring(0, LAST_HEX_COL).replace(" ", "")); } return StringUtils.hexStringToByte(hexString.toString()); }
Example 4
Source File: HttpFSFileSystem.java From big-c with Apache License 2.0 | 5 votes |
@Override public FileChecksum getFileChecksum(Path f) throws IOException { Map<String, String> params = new HashMap<String, String>(); params.put(OP_PARAM, Operation.GETFILECHECKSUM.toString()); HttpURLConnection conn = getConnection(Operation.GETFILECHECKSUM.getMethod(), params, f, true); HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); final JSONObject json = (JSONObject) ((JSONObject) HttpFSUtils.jsonParse(conn)).get(FILE_CHECKSUM_JSON); return new FileChecksum() { @Override public String getAlgorithmName() { return (String) json.get(CHECKSUM_ALGORITHM_JSON); } @Override public int getLength() { return ((Long) json.get(CHECKSUM_LENGTH_JSON)).intValue(); } @Override public byte[] getBytes() { return StringUtils.hexStringToByte((String) json.get(CHECKSUM_BYTES_JSON)); } @Override public void write(DataOutput out) throws IOException { throw new UnsupportedOperationException(); } @Override public void readFields(DataInput in) throws IOException { throw new UnsupportedOperationException(); } }; }
Example 5
Source File: TestIPC.java From big-c with Apache License 2.0 | 5 votes |
/** * Convert a string of lines that look like: * "68 72 70 63 02 00 00 00 82 00 1d 6f 72 67 2e 61 hrpc.... ...org.a" * .. into an array of bytes. */ private static byte[] hexDumpToBytes(String hexdump) { final int LAST_HEX_COL = 3 * 16; StringBuilder hexString = new StringBuilder(); for (String line : StringUtils.toUpperCase(hexdump).split("\n")) { hexString.append(line.substring(0, LAST_HEX_COL).replace(" ", "")); } return StringUtils.hexStringToByte(hexString.toString()); }
Example 6
Source File: JsonUtil.java From hadoop with Apache License 2.0 | 4 votes |
/** Convert a Json map to a MD5MD5CRC32FileChecksum. */ public static MD5MD5CRC32FileChecksum toMD5MD5CRC32FileChecksum( final Map<?, ?> json) throws IOException { if (json == null) { return null; } final Map<?, ?> m = (Map<?, ?>)json.get(FileChecksum.class.getSimpleName()); final String algorithm = (String)m.get("algorithm"); final int length = ((Number) m.get("length")).intValue(); final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes")); final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes)); final DataChecksum.Type crcType = MD5MD5CRC32FileChecksum.getCrcTypeFromAlgorithmName(algorithm); final MD5MD5CRC32FileChecksum checksum; // Recreate what DFSClient would have returned. switch(crcType) { case CRC32: checksum = new MD5MD5CRC32GzipFileChecksum(); break; case CRC32C: checksum = new MD5MD5CRC32CastagnoliFileChecksum(); break; default: throw new IOException("Unknown algorithm: " + algorithm); } checksum.readFields(in); //check algorithm name if (!checksum.getAlgorithmName().equals(algorithm)) { throw new IOException("Algorithm not matched. Expected " + algorithm + ", Received " + checksum.getAlgorithmName()); } //check length if (length != checksum.getLength()) { throw new IOException("Length not matched: length=" + length + ", checksum.getLength()=" + checksum.getLength()); } return checksum; }
Example 7
Source File: TestDFSUpgradeFromImage.java From hadoop with Apache License 2.0 | 4 votes |
/** * Test that sets up a fake image from Hadoop 0.3.0 and tries to start a * NN, verifying that the correct error message is thrown. */ @Test public void testFailOnPreUpgradeImage() throws IOException { Configuration conf = new HdfsConfiguration(); File namenodeStorage = new File(TEST_ROOT_DIR, "nnimage-0.3.0"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString()); // Set up a fake NN storage that looks like an ancient Hadoop dir circa 0.3.0 FileUtil.fullyDelete(namenodeStorage); assertTrue("Make " + namenodeStorage, namenodeStorage.mkdirs()); File imageDir = new File(namenodeStorage, "image"); assertTrue("Make " + imageDir, imageDir.mkdirs()); // Hex dump of a formatted image from Hadoop 0.3.0 File imageFile = new File(imageDir, "fsimage"); byte[] imageBytes = StringUtils.hexStringToByte( "fffffffee17c0d2700000000"); FileOutputStream fos = new FileOutputStream(imageFile); try { fos.write(imageBytes); } finally { fos.close(); } // Now try to start an NN from it MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(StartupOption.REGULAR) .build(); fail("Was able to start NN from 0.3.0 image"); } catch (IOException ioe) { if (!ioe.toString().contains("Old layout version is 'too old'")) { throw ioe; } } finally { // We expect startup to fail, but just in case it didn't, shutdown now. if (cluster != null) { cluster.shutdown(); } } }
Example 8
Source File: JsonUtil.java From big-c with Apache License 2.0 | 4 votes |
/** Convert a Json map to a MD5MD5CRC32FileChecksum. */ public static MD5MD5CRC32FileChecksum toMD5MD5CRC32FileChecksum( final Map<?, ?> json) throws IOException { if (json == null) { return null; } final Map<?, ?> m = (Map<?, ?>)json.get(FileChecksum.class.getSimpleName()); final String algorithm = (String)m.get("algorithm"); final int length = ((Number) m.get("length")).intValue(); final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes")); final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes)); final DataChecksum.Type crcType = MD5MD5CRC32FileChecksum.getCrcTypeFromAlgorithmName(algorithm); final MD5MD5CRC32FileChecksum checksum; // Recreate what DFSClient would have returned. switch(crcType) { case CRC32: checksum = new MD5MD5CRC32GzipFileChecksum(); break; case CRC32C: checksum = new MD5MD5CRC32CastagnoliFileChecksum(); break; default: throw new IOException("Unknown algorithm: " + algorithm); } checksum.readFields(in); //check algorithm name if (!checksum.getAlgorithmName().equals(algorithm)) { throw new IOException("Algorithm not matched. Expected " + algorithm + ", Received " + checksum.getAlgorithmName()); } //check length if (length != checksum.getLength()) { throw new IOException("Length not matched: length=" + length + ", checksum.getLength()=" + checksum.getLength()); } return checksum; }
Example 9
Source File: TestDFSUpgradeFromImage.java From big-c with Apache License 2.0 | 4 votes |
/** * Test that sets up a fake image from Hadoop 0.3.0 and tries to start a * NN, verifying that the correct error message is thrown. */ @Test public void testFailOnPreUpgradeImage() throws IOException { Configuration conf = new HdfsConfiguration(); File namenodeStorage = new File(TEST_ROOT_DIR, "nnimage-0.3.0"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString()); // Set up a fake NN storage that looks like an ancient Hadoop dir circa 0.3.0 FileUtil.fullyDelete(namenodeStorage); assertTrue("Make " + namenodeStorage, namenodeStorage.mkdirs()); File imageDir = new File(namenodeStorage, "image"); assertTrue("Make " + imageDir, imageDir.mkdirs()); // Hex dump of a formatted image from Hadoop 0.3.0 File imageFile = new File(imageDir, "fsimage"); byte[] imageBytes = StringUtils.hexStringToByte( "fffffffee17c0d2700000000"); FileOutputStream fos = new FileOutputStream(imageFile); try { fos.write(imageBytes); } finally { fos.close(); } // Now try to start an NN from it MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(false) .manageDataDfsDirs(false) .manageNameDfsDirs(false) .startupOption(StartupOption.REGULAR) .build(); fail("Was able to start NN from 0.3.0 image"); } catch (IOException ioe) { if (!ioe.toString().contains("Old layout version is 'too old'")) { throw ioe; } } finally { // We expect startup to fail, but just in case it didn't, shutdown now. if (cluster != null) { cluster.shutdown(); } } }