Java Code Examples for org.apache.hadoop.hdfs.DFSClient#close()
The following examples show how to use
org.apache.hadoop.hdfs.DFSClient#close() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DFSClientCache.java From hadoop with Apache License 2.0 | 6 votes |
/** * Close all DFSClient instances in the Cache. * @param onlyAutomatic only close those that are marked for automatic closing */ synchronized void closeAll(boolean onlyAutomatic) throws IOException { List<IOException> exceptions = new ArrayList<IOException>(); ConcurrentMap<String, DFSClient> map = clientCache.asMap(); for (Entry<String, DFSClient> item : map.entrySet()) { final DFSClient client = item.getValue(); if (client != null) { try { client.close(); } catch (IOException ioe) { exceptions.add(ioe); } } } if (!exceptions.isEmpty()) { throw MultipleIOException.createIOException(exceptions); } }
Example 2
Source File: WebHdfsHandler.java From hadoop with Apache License 2.0 | 6 votes |
private void onGetFileChecksum(ChannelHandlerContext ctx) throws IOException { MD5MD5CRC32FileChecksum checksum = null; final String nnId = params.namenodeId(); DFSClient dfsclient = newDfsClient(nnId, conf); try { checksum = dfsclient.getFileChecksum(path, Long.MAX_VALUE); dfsclient.close(); dfsclient = null; } finally { IOUtils.cleanup(LOG, dfsclient); } final byte[] js = JsonUtil.toJsonString(checksum).getBytes(Charsets.UTF_8); DefaultFullHttpResponse resp = new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(js)); resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8); resp.headers().set(CONTENT_LENGTH, js.length); resp.headers().set(CONNECTION, CLOSE); ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE); }
Example 3
Source File: DFSClientCache.java From big-c with Apache License 2.0 | 6 votes |
/** * Close all DFSClient instances in the Cache. * @param onlyAutomatic only close those that are marked for automatic closing */ synchronized void closeAll(boolean onlyAutomatic) throws IOException { List<IOException> exceptions = new ArrayList<IOException>(); ConcurrentMap<String, DFSClient> map = clientCache.asMap(); for (Entry<String, DFSClient> item : map.entrySet()) { final DFSClient client = item.getValue(); if (client != null) { try { client.close(); } catch (IOException ioe) { exceptions.add(ioe); } } } if (!exceptions.isEmpty()) { throw MultipleIOException.createIOException(exceptions); } }
Example 4
Source File: WebHdfsHandler.java From big-c with Apache License 2.0 | 6 votes |
private void onGetFileChecksum(ChannelHandlerContext ctx) throws IOException { MD5MD5CRC32FileChecksum checksum = null; final String nnId = params.namenodeId(); DFSClient dfsclient = newDfsClient(nnId, conf); try { checksum = dfsclient.getFileChecksum(path, Long.MAX_VALUE); dfsclient.close(); dfsclient = null; } finally { IOUtils.cleanup(LOG, dfsclient); } final byte[] js = JsonUtil.toJsonString(checksum).getBytes(Charsets.UTF_8); DefaultFullHttpResponse resp = new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(js)); resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8); resp.headers().set(CONTENT_LENGTH, js.length); resp.headers().set(CONNECTION, CLOSE); ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE); }
Example 5
Source File: DFSClientCache.java From hadoop with Apache License 2.0 | 5 votes |
private RemovalListener<String, DFSClient> clientRemovalListener() { return new RemovalListener<String, DFSClient>() { @Override public void onRemoval(RemovalNotification<String, DFSClient> notification) { DFSClient client = notification.getValue(); try { client.close(); } catch (IOException e) { LOG.warn(String.format( "IOException when closing the DFSClient(%s), cause: %s", client, e)); } } }; }
Example 6
Source File: DFSClientCache.java From big-c with Apache License 2.0 | 5 votes |
private RemovalListener<String, DFSClient> clientRemovalListener() { return new RemovalListener<String, DFSClient>() { @Override public void onRemoval(RemovalNotification<String, DFSClient> notification) { DFSClient client = notification.getValue(); try { client.close(); } catch (IOException e) { LOG.warn(String.format( "IOException when closing the DFSClient(%s), cause: %s", client, e)); } } }; }
Example 7
Source File: StreamFile.java From hadoop-gpu with Apache License 2.0 | 5 votes |
public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { String filename = request.getParameter("filename"); if (filename == null || filename.length() == 0) { response.setContentType("text/plain"); PrintWriter out = response.getWriter(); out.print("Invalid input"); return; } DFSClient dfs = getDFSClient(request); FSInputStream in = dfs.open(filename); OutputStream os = response.getOutputStream(); response.setHeader("Content-Disposition", "attachment; filename=\"" + filename + "\""); response.setContentType("application/octet-stream"); byte buf[] = new byte[4096]; try { int bytesRead; while ((bytesRead = in.read(buf)) != -1) { os.write(buf, 0, bytesRead); } } finally { in.close(); os.close(); dfs.close(); } }
Example 8
Source File: TestDataNodeRollingUpgrade.java From hadoop with Apache License 2.0 | 4 votes |
@Test (timeout=600000) // Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message public void testDatanodePeersXceiver() throws Exception { try { startCluster(); // Create files in DFS. String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat"; String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat"; String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat"; DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf); DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf); DFSClient client3 = new DFSClient(NameNode.getAddress(conf), conf); DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true); DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true); DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true); byte[] toWrite = new byte[1024*1024*8]; Random rb = new Random(1111); rb.nextBytes(toWrite); s1.write(toWrite, 0, 1024*1024*8); s1.flush(); s2.write(toWrite, 0, 1024*1024*8); s2.flush(); s3.write(toWrite, 0, 1024*1024*8); s3.flush(); assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer() .getNumPeersXceiver()); s1.close(); s2.close(); s3.close(); assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer() .getNumPeersXceiver()); client1.close(); client2.close(); client3.close(); } finally { shutdownCluster(); } }
Example 9
Source File: TestDataNodeRollingUpgrade.java From big-c with Apache License 2.0 | 4 votes |
@Test (timeout=600000) // Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message public void testDatanodePeersXceiver() throws Exception { try { startCluster(); // Create files in DFS. String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat"; String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat"; String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat"; DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf); DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf); DFSClient client3 = new DFSClient(NameNode.getAddress(conf), conf); DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true); DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true); DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true); byte[] toWrite = new byte[1024*1024*8]; Random rb = new Random(1111); rb.nextBytes(toWrite); s1.write(toWrite, 0, 1024*1024*8); s1.flush(); s2.write(toWrite, 0, 1024*1024*8); s2.flush(); s3.write(toWrite, 0, 1024*1024*8); s3.flush(); assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer() .getNumPeersXceiver()); s1.close(); s2.close(); s3.close(); assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer() .getNumPeersXceiver()); client1.close(); client2.close(); client3.close(); } finally { shutdownCluster(); } }
Example 10
Source File: WaitingRoom.java From RDFS with Apache License 2.0 | 4 votes |
/** * Loads mapping of all files in WaitingRoom to their block list in * to the fileMap. Loads all blocks that are part of files in * WaitingRoom into blockRefMap. */ private void loadMaps() throws IOException { LOG.info("Loading WaitingRoomPurger maps."); fileMap.clear(); blockRefMap.clear(); DFSClient client = new DFSClient(conf); Path wrRoot = new Path(wrDir); addDirToMaps(wrRoot, client); client.close(); LOG.info("WaitingRoomPurger maps loaded successfully."); }