org.apache.hadoop.hdfs.protocol.CorruptFileBlocks Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.protocol.CorruptFileBlocks.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DFSClient.java From RDFS with Apache License 2.0 | 6 votes |
/** Method based listCorruptFileBlocks */ private CorruptFileBlocks methodBasedListCorruptFileBlocks(String path, String cookie) throws IOException { if (!namenodeProtocolProxy.isMethodSupported("listCorruptFileBlocks", String.class, String.class)) { LOG.info("NameNode version is " + namenodeVersion + " Using older version of getCorruptFiles."); if (cookie != null ) { return new CorruptFileBlocks(new String[0], ""); } ArrayList<String> str = new ArrayList<String>(); for (FileStatus stat : namenode.getCorruptFiles()) { String filename = stat.getPath().toUri().getPath(); if (filename.startsWith(path)) { str.add(filename); } } return new CorruptFileBlocks(str.toArray(new String[str.size()]), ""); } return namenode.listCorruptFileBlocks(path, cookie); }
Example #2
Source File: DFSClient.java From RDFS with Apache License 2.0 | 6 votes |
/** Version based list corrupt file blocks */ private CorruptFileBlocks versionBasedListCorruptFileBlocks(String path, String cookie) throws IOException { if (namenodeVersion < ClientProtocol.LIST_CORRUPT_FILEBLOCKS_VERSION) { LOG.info("NameNode version is " + namenodeVersion + " Using older version of getCorruptFiles."); if (cookie != null ) { return new CorruptFileBlocks(new String[0], ""); } ArrayList<String> str = new ArrayList<String>(); for (FileStatus stat : namenode.getCorruptFiles()) { String filename = stat.getPath().toUri().getPath(); if (filename.startsWith(path)) { str.add(filename); } } return new CorruptFileBlocks(str.toArray(new String[str.size()]), ""); } return namenode.listCorruptFileBlocks(path, cookie); }
Example #3
Source File: CorruptFileBlockIterator.java From RDFS with Apache License 2.0 | 6 votes |
private void loadNext() throws IOException { if (files == null || fileIdx >= files.length) { CorruptFileBlocks cfb = dfs.listCorruptFileBlocks(path, cookie); files = cfb.getFiles(); cookie = cfb.getCookie(); fileIdx = 0; callsMade++; } if (fileIdx >= files.length) { // received an empty response // there are no more corrupt file blocks nextPath = null; } else { nextPath = string2Path(files[fileIdx]); fileIdx++; } }
Example #4
Source File: CorruptFileBlockIterator.java From big-c with Apache License 2.0 | 6 votes |
private void loadNext() throws IOException { if (files == null || fileIdx >= files.length) { CorruptFileBlocks cfb = dfs.listCorruptFileBlocks(path, cookie); files = cfb.getFiles(); cookie = cfb.getCookie(); fileIdx = 0; callsMade++; } if (fileIdx >= files.length) { // received an empty response // there are no more corrupt file blocks nextPath = null; } else { nextPath = string2Path(files[fileIdx]); fileIdx++; } }
Example #5
Source File: CorruptFileBlockIterator.java From hadoop with Apache License 2.0 | 6 votes |
private void loadNext() throws IOException { if (files == null || fileIdx >= files.length) { CorruptFileBlocks cfb = dfs.listCorruptFileBlocks(path, cookie); files = cfb.getFiles(); cookie = cfb.getCookie(); fileIdx = 0; callsMade++; } if (fileIdx >= files.length) { // received an empty response // there are no more corrupt file blocks nextPath = null; } else { nextPath = string2Path(files[fileIdx]); fileIdx++; } }
Example #6
Source File: NameNodeRpcServer.java From big-c with Apache License 2.0 | 5 votes |
@Override // ClientProtocol public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) throws IOException { checkNNStartup(); String[] cookieTab = new String[] { cookie }; Collection<FSNamesystem.CorruptFileBlockInfo> fbs = namesystem.listCorruptFileBlocks(path, cookieTab); String[] files = new String[fbs.size()]; int i = 0; for(FSNamesystem.CorruptFileBlockInfo fb: fbs) { files[i++] = fb.path; } return new CorruptFileBlocks(files, cookieTab[0]); }
Example #7
Source File: DFSClient.java From RDFS with Apache License 2.0 | 5 votes |
/** * @return a list in which each entry describes a corrupt file/block * @throws AccessControlException * @throws IOException */ public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) throws IOException { if (namenodeProtocolProxy == null) { return versionBasedListCorruptFileBlocks(path, cookie); } return methodBasedListCorruptFileBlocks(path, cookie); }
Example #8
Source File: DistributedAvatarFileSystem.java From RDFS with Apache License 2.0 | 5 votes |
@Override public CorruptFileBlocks listCorruptFileBlocks(final String path, final String cookie) throws IOException { return (new ImmutableFSCaller<CorruptFileBlocks> () { CorruptFileBlocks call() throws IOException { return namenode.listCorruptFileBlocks(path, cookie); } }).callFS(); }
Example #9
Source File: ClientNamenodeProtocolTranslatorPB.java From big-c with Apache License 2.0 | 5 votes |
@Override public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) throws IOException { ListCorruptFileBlocksRequestProto.Builder req = ListCorruptFileBlocksRequestProto.newBuilder().setPath(path); if (cookie != null) req.setCookie(cookie); try { return PBHelper.convert( rpcProxy.listCorruptFileBlocks(null, req.build()).getCorrupt()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
Example #10
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java From big-c with Apache License 2.0 | 5 votes |
@Override public ListCorruptFileBlocksResponseProto listCorruptFileBlocks( RpcController controller, ListCorruptFileBlocksRequestProto req) throws ServiceException { try { CorruptFileBlocks result = server.listCorruptFileBlocks( req.getPath(), req.hasCookie() ? req.getCookie(): null); return ListCorruptFileBlocksResponseProto.newBuilder() .setCorrupt(PBHelper.convert(result)) .build(); } catch (IOException e) { throw new ServiceException(e); } }
Example #11
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static CorruptFileBlocksProto convert(CorruptFileBlocks c) { if (c == null) return null; return CorruptFileBlocksProto.newBuilder(). addAllFiles(Arrays.asList(c.getFiles())). setCookie(c.getCookie()). build(); }
Example #12
Source File: PBHelper.java From big-c with Apache License 2.0 | 5 votes |
public static CorruptFileBlocks convert(CorruptFileBlocksProto c) { if (c == null) return null; List<String> fileList = c.getFilesList(); return new CorruptFileBlocks(fileList.toArray(new String[fileList.size()]), c.getCookie()); }
Example #13
Source File: DFSClient.java From big-c with Apache License 2.0 | 5 votes |
/** * @return a list in which each entry describes a corrupt file/block * @throws IOException */ public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) throws IOException { checkOpen(); TraceScope scope = getPathTraceScope("listCorruptFileBlocks", path); try { return namenode.listCorruptFileBlocks(path, cookie); } finally { scope.close(); } }
Example #14
Source File: ClientNamenodeProtocolTranslatorPB.java From hadoop with Apache License 2.0 | 5 votes |
@Override public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) throws IOException { ListCorruptFileBlocksRequestProto.Builder req = ListCorruptFileBlocksRequestProto.newBuilder().setPath(path); if (cookie != null) req.setCookie(cookie); try { return PBHelper.convert( rpcProxy.listCorruptFileBlocks(null, req.build()).getCorrupt()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
Example #15
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java From hadoop with Apache License 2.0 | 5 votes |
@Override public ListCorruptFileBlocksResponseProto listCorruptFileBlocks( RpcController controller, ListCorruptFileBlocksRequestProto req) throws ServiceException { try { CorruptFileBlocks result = server.listCorruptFileBlocks( req.getPath(), req.hasCookie() ? req.getCookie(): null); return ListCorruptFileBlocksResponseProto.newBuilder() .setCorrupt(PBHelper.convert(result)) .build(); } catch (IOException e) { throw new ServiceException(e); } }
Example #16
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static CorruptFileBlocksProto convert(CorruptFileBlocks c) { if (c == null) return null; return CorruptFileBlocksProto.newBuilder(). addAllFiles(Arrays.asList(c.getFiles())). setCookie(c.getCookie()). build(); }
Example #17
Source File: PBHelper.java From hadoop with Apache License 2.0 | 5 votes |
public static CorruptFileBlocks convert(CorruptFileBlocksProto c) { if (c == null) return null; List<String> fileList = c.getFilesList(); return new CorruptFileBlocks(fileList.toArray(new String[fileList.size()]), c.getCookie()); }
Example #18
Source File: NameNodeRpcServer.java From hadoop with Apache License 2.0 | 5 votes |
@Override // ClientProtocol public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) throws IOException { checkNNStartup(); String[] cookieTab = new String[] { cookie }; Collection<FSNamesystem.CorruptFileBlockInfo> fbs = namesystem.listCorruptFileBlocks(path, cookieTab); String[] files = new String[fbs.size()]; int i = 0; for(FSNamesystem.CorruptFileBlockInfo fb: fbs) { files[i++] = fb.path; } return new CorruptFileBlocks(files, cookieTab[0]); }
Example #19
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
/** * @return a list in which each entry describes a corrupt file/block * @throws IOException */ public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) throws IOException { checkOpen(); TraceScope scope = getPathTraceScope("listCorruptFileBlocks", path); try { return namenode.listCorruptFileBlocks(path, cookie); } finally { scope.close(); } }
Example #20
Source File: TestFsck.java From hadoop with Apache License 2.0 | 4 votes |
/** check if option -list-corruptfiles of fsck command works properly */ @Test public void testFsckListCorruptFilesBlocks() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1). setMaxSize(1024).build(); util.createFiles(fs, "/corruptData", (short) 1); util.waitReplication(fs, "/corruptData", (short) 1); // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks"); String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks"); System.out.println("1. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); // delete the blocks final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); assertTrue("Cannot remove file.", blockFile.delete()); assertTrue("Cannot remove file.", metadataFile.delete()); } } } // wait for the namenode to see the corruption final NamenodeProtocols namenode = cluster.getNameNodeRpc(); CorruptFileBlocks corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); int numCorrupt = corruptFileBlocks.getFiles().length; while (numCorrupt == 0) { Thread.sleep(1000); corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.getFiles().length; } outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks"); System.out.println("2. bad fsck out: " + outStr); assertTrue(outStr.contains("has 3 CORRUPT files")); // Do a listing on a dir which doesn't have any corrupt blocks and validate util.createFiles(fs, "/goodData"); outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks"); System.out.println("3. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); util.cleanup(fs,"/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) {cluster.shutdown();} } }
Example #21
Source File: TestFsck.java From big-c with Apache License 2.0 | 4 votes |
/** check if option -list-corruptfiles of fsck command works properly */ @Test public void testFsckListCorruptFilesBlocks() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); FileSystem fs = null; MiniDFSCluster cluster = null; try { cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); fs = cluster.getFileSystem(); DFSTestUtil util = new DFSTestUtil.Builder(). setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1). setMaxSize(1024).build(); util.createFiles(fs, "/corruptData", (short) 1); util.waitReplication(fs, "/corruptData", (short) 1); // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks"); String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks"); System.out.println("1. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); // delete the blocks final String bpid = cluster.getNamesystem().getBlockPoolId(); for (int i=0; i<4; i++) { for (int j=0; j<=1; j++) { File storageDir = cluster.getInstanceStorageDir(i, j); File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles( data_dir); if (metadataFiles == null) continue; for (File metadataFile : metadataFiles) { File blockFile = Block.metaToBlockFile(metadataFile); assertTrue("Cannot remove file.", blockFile.delete()); assertTrue("Cannot remove file.", metadataFile.delete()); } } } // wait for the namenode to see the corruption final NamenodeProtocols namenode = cluster.getNameNodeRpc(); CorruptFileBlocks corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); int numCorrupt = corruptFileBlocks.getFiles().length; while (numCorrupt == 0) { Thread.sleep(1000); corruptFileBlocks = namenode .listCorruptFileBlocks("/corruptData", null); numCorrupt = corruptFileBlocks.getFiles().length; } outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks"); System.out.println("2. bad fsck out: " + outStr); assertTrue(outStr.contains("has 3 CORRUPT files")); // Do a listing on a dir which doesn't have any corrupt blocks and validate util.createFiles(fs, "/goodData"); outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks"); System.out.println("3. good fsck out: " + outStr); assertTrue(outStr.contains("has 0 CORRUPT files")); util.cleanup(fs,"/corruptData"); util.cleanup(fs, "/goodData"); } finally { if (cluster != null) {cluster.shutdown();} } }