org.znerd.xmlenc.XMLOutputter Java Examples
The following examples show how to use
org.znerd.xmlenc.XMLOutputter.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FileChecksumServlets.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** {@inheritDoc} */ public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { final UnixUserGroupInformation ugi = getUGI(request); final PrintWriter out = response.getWriter(); final String filename = getFilename(request, response); final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); xml.declaration(); final Configuration conf = new Configuration(DataNode.getDataNode().getConf()); final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi); final ClientProtocol nnproxy = DFSClient.createNamenode(conf); try { final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum( filename, nnproxy, socketFactory, socketTimeout); MD5MD5CRC32FileChecksum.write(xml, checksum); } catch(IOException ioe) { new RemoteException(ioe.getClass().getName(), ioe.getMessage() ).writeXml(filename, xml); } xml.endDocument(); }
Example #2
Source File: DfsServlet.java From hadoop with Apache License 2.0 | 6 votes |
/** Write the object to XML format */ protected void writeXml(Exception except, String path, XMLOutputter doc) throws IOException { doc.startTag(RemoteException.class.getSimpleName()); doc.attribute("path", path); if (except instanceof RemoteException) { doc.attribute("class", ((RemoteException) except).getClassName()); } else { doc.attribute("class", except.getClass().getName()); } String msg = except.getLocalizedMessage(); int i = msg.indexOf("\n"); if (i >= 0) { msg = msg.substring(0, i); } doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim()); doc.endTag(); }
Example #3
Source File: DfsServlet.java From big-c with Apache License 2.0 | 6 votes |
/** Write the object to XML format */ protected void writeXml(Exception except, String path, XMLOutputter doc) throws IOException { doc.startTag(RemoteException.class.getSimpleName()); doc.attribute("path", path); if (except instanceof RemoteException) { doc.attribute("class", ((RemoteException) except).getClassName()); } else { doc.attribute("class", except.getClass().getName()); } String msg = except.getLocalizedMessage(); int i = msg.indexOf("\n"); if (i >= 0) { msg = msg.substring(0, i); } doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim()); doc.endTag(); }
Example #4
Source File: FileChecksumServlets.java From big-c with Apache License 2.0 | 6 votes |
@Override public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { final PrintWriter out = response.getWriter(); final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum"); final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); xml.declaration(); final ServletContext context = getServletContext(); final DataNode datanode = (DataNode) context.getAttribute("datanode"); final Configuration conf = new HdfsConfiguration(datanode.getConf()); try { final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, datanode, conf, getUGI(request, conf)); final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE); MD5MD5CRC32FileChecksum.write(xml, checksum); } catch(IOException ioe) { writeXml(ioe, path, xml); } catch (InterruptedException e) { writeXml(e, path, xml); } xml.endDocument(); }
Example #5
Source File: FileChecksumServlets.java From hadoop with Apache License 2.0 | 6 votes |
@Override public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { final PrintWriter out = response.getWriter(); final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum"); final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); xml.declaration(); final ServletContext context = getServletContext(); final DataNode datanode = (DataNode) context.getAttribute("datanode"); final Configuration conf = new HdfsConfiguration(datanode.getConf()); try { final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, datanode, conf, getUGI(request, conf)); final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE); MD5MD5CRC32FileChecksum.write(xml, checksum); } catch(IOException ioe) { writeXml(ioe, path, xml); } catch (InterruptedException e) { writeXml(e, path, xml); } xml.endDocument(); }
Example #6
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 5 votes |
/** * Generate a XML block as such, <item label=key value=value/> */ private static void toXmlItemBlock(XMLOutputter doc, String key, String value) throws IOException { doc.startTag("item"); doc.attribute("label", key); doc.attribute("value", value); doc.endTag(); }
Example #7
Source File: RemoteException.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** Write the object to XML format */ public void writeXml(String path, XMLOutputter doc) throws IOException { doc.startTag(RemoteException.class.getSimpleName()); doc.attribute("path", path); doc.attribute("class", getClassName()); String msg = getLocalizedMessage(); int i = msg.indexOf("\n"); if (i >= 0) { msg = msg.substring(0, i); } doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim()); doc.endTag(); }
Example #8
Source File: MD5MD5CRC32FileChecksum.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** Write that object to xml output. */ public static void write(XMLOutputter xml, MD5MD5CRC32FileChecksum that ) throws IOException { xml.startTag(MD5MD5CRC32FileChecksum.class.getName()); if (that != null) { xml.attribute("bytesPerCRC", "" + that.bytesPerCRC); xml.attribute("crcPerBlock", "" + that.crcPerBlock); xml.attribute("md5", "" + that.md5); } xml.endTag(); }
Example #9
Source File: FileChecksumServlets.java From RDFS with Apache License 2.0 | 5 votes |
/** {@inheritDoc} */ public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { final UnixUserGroupInformation ugi = getUGI(request); final PrintWriter out = response.getWriter(); final String filename = getFilename(request, response); final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); xml.declaration(); Configuration daemonConf = (Configuration) getServletContext() .getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); final Configuration conf = (daemonConf == null) ? new Configuration() : new Configuration(daemonConf); final int socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT); final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class); UnixUserGroupInformation.saveToConf(conf, UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi); final ProtocolProxy<ClientProtocol> nnproxy = DFSClient.createRPCNamenode(conf); try { final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum( DataTransferProtocol.DATA_TRANSFER_VERSION, filename, nnproxy.getProxy(), nnproxy, socketFactory, socketTimeout); MD5MD5CRC32FileChecksum.write(xml, checksum); } catch(IOException ioe) { new RemoteException(ioe.getClass().getName(), ioe.getMessage() ).writeXml(filename, xml); } xml.endDocument(); }
Example #10
Source File: RemoteException.java From RDFS with Apache License 2.0 | 5 votes |
/** Write the object to XML format */ public void writeXml(String path, XMLOutputter doc) throws IOException { doc.startTag(RemoteException.class.getSimpleName()); doc.attribute("path", path); doc.attribute("class", getClassName()); String msg = getLocalizedMessage(); int i = msg.indexOf("\n"); if (i >= 0) { msg = msg.substring(0, i); } doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim()); doc.endTag(); }
Example #11
Source File: MD5MD5CRC32FileChecksum.java From RDFS with Apache License 2.0 | 5 votes |
/** Write that object to xml output. */ public static void write(XMLOutputter xml, MD5MD5CRC32FileChecksum that ) throws IOException { xml.startTag(MD5MD5CRC32FileChecksum.class.getName()); if (that != null) { xml.attribute("bytesPerCRC", "" + that.bytesPerCRC); xml.attribute("crcPerBlock", "" + that.crcPerBlock); xml.attribute("md5", "" + that.md5); } xml.endTag(); }
Example #12
Source File: MD5MD5CRC32FileChecksum.java From big-c with Apache License 2.0 | 5 votes |
/** Write that object to xml output. */ public static void write(XMLOutputter xml, MD5MD5CRC32FileChecksum that ) throws IOException { xml.startTag(MD5MD5CRC32FileChecksum.class.getName()); if (that != null) { xml.attribute("bytesPerCRC", "" + that.bytesPerCRC); xml.attribute("crcPerBlock", "" + that.crcPerBlock); xml.attribute("crcType", ""+ that.getCrcType().name()); xml.attribute("md5", "" + that.md5); } xml.endTag(); }
Example #13
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 5 votes |
/** * create XML block from general exception. */ private static void createGeneralException(XMLOutputter doc, String clusterid, String eMsg) throws IOException { doc.startTag("cluster"); doc.attribute("clusterId", clusterid); doc.startTag("message"); doc.startTag("item"); doc.attribute("msg", eMsg); doc.endTag(); // item doc.endTag(); // message doc.endTag(); // cluster }
Example #14
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 5 votes |
/** * create the XML for exceptions that we encountered when connecting to * namenode. */ private static void createNamenodeExceptionMsg(XMLOutputter doc, Map<String, Exception> exceptionMsg) throws IOException { if (exceptionMsg.size() > 0) { doc.startTag("unreportedNamenodes"); for (Map.Entry<String, Exception> m : exceptionMsg.entrySet()) { doc.startTag("node"); doc.attribute("name", m.getKey()); doc.attribute("exception", StringUtils.stringifyException(m.getValue())); doc.endTag();// node } doc.endTag(); // unreportedNamnodes } }
Example #15
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 5 votes |
/** * Generate a XML block as such, <item label="Node" value="hostname" * link="http://hostname:50070" /> */ private static void toXmlItemBlockWithLink(XMLOutputter doc, String value, URL url, String label) throws IOException { doc.startTag("item"); doc.attribute("label", label); doc.attribute("value", value); doc.attribute("link", url.toString()); doc.endTag(); // item }
Example #16
Source File: MD5MD5CRC32FileChecksum.java From hadoop with Apache License 2.0 | 5 votes |
/** Write that object to xml output. */ public static void write(XMLOutputter xml, MD5MD5CRC32FileChecksum that ) throws IOException { xml.startTag(MD5MD5CRC32FileChecksum.class.getName()); if (that != null) { xml.attribute("bytesPerCRC", "" + that.bytesPerCRC); xml.attribute("crcPerBlock", "" + that.crcPerBlock); xml.attribute("crcType", ""+ that.getCrcType().name()); xml.attribute("md5", "" + that.md5); } xml.endTag(); }
Example #17
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 5 votes |
/** * create XML block from general exception. */ private static void createGeneralException(XMLOutputter doc, String clusterid, String eMsg) throws IOException { doc.startTag("cluster"); doc.attribute("clusterId", clusterid); doc.startTag("message"); doc.startTag("item"); doc.attribute("msg", eMsg); doc.endTag(); // item doc.endTag(); // message doc.endTag(); // cluster }
Example #18
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 5 votes |
/** * create the XML for exceptions that we encountered when connecting to * namenode. */ private static void createNamenodeExceptionMsg(XMLOutputter doc, Map<String, Exception> exceptionMsg) throws IOException { if (exceptionMsg.size() > 0) { doc.startTag("unreportedNamenodes"); for (Map.Entry<String, Exception> m : exceptionMsg.entrySet()) { doc.startTag("node"); doc.attribute("name", m.getKey()); doc.attribute("exception", StringUtils.stringifyException(m.getValue())); doc.endTag();// node } doc.endTag(); // unreportedNamnodes } }
Example #19
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 5 votes |
/** * Generate a XML block as such, <item label="Node" value="hostname" * link="http://hostname:50070" /> */ private static void toXmlItemBlockWithLink(XMLOutputter doc, String value, URL url, String label) throws IOException { doc.startTag("item"); doc.attribute("label", label); doc.attribute("value", value); doc.attribute("link", url.toString()); doc.endTag(); // item }
Example #20
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 5 votes |
/** * Generate a XML block as such, <item label=key value=value/> */ private static void toXmlItemBlock(XMLOutputter doc, String key, String value) throws IOException { doc.startTag("item"); doc.attribute("label", key); doc.attribute("value", value); doc.endTag(); }
Example #21
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 4 votes |
/** * Generate decommissioning datanode report in XML format * * @param doc * , xmloutputter * @throws IOException */ public void toXML(XMLOutputter doc) throws IOException { if (error != null) { createGeneralException(doc, clusterid, StringUtils.stringifyException(error)); doc.getWriter().flush(); return; } if (statusMap == null || statusMap.isEmpty()) { // none of the namenodes has reported, print exceptions from each nn. doc.startTag("cluster"); createNamenodeExceptionMsg(doc, exceptions); doc.endTag(); doc.getWriter().flush(); return; } doc.startTag("cluster"); doc.attribute("clusterId", clusterid); doc.startTag("decommissioningReport"); countDecommissionDatanodes(); toXmlItemBlock(doc, DecommissionStates.DECOMMISSIONED.toString(), Integer.toString(decommissioned)); toXmlItemBlock(doc, DecommissionStates.DECOMMISSION_INPROGRESS.toString(), Integer.toString(decommissioning)); toXmlItemBlock(doc, DecommissionStates.PARTIALLY_DECOMMISSIONED.toString(), Integer.toString(partial)); doc.endTag(); // decommissioningReport doc.startTag("datanodes"); Set<String> dnSet = statusMap.keySet(); for (String dnhost : dnSet) { Map<String, String> nnStatus = statusMap.get(dnhost); if (nnStatus == null || nnStatus.isEmpty()) { continue; } String overallStatus = nnStatus.get(OVERALL_STATUS); // check if datanode is in decommission states if (overallStatus != null && (overallStatus.equals(AdminStates.DECOMMISSION_INPROGRESS .toString()) || overallStatus.equals(AdminStates.DECOMMISSIONED.toString()) || overallStatus .equals(DecommissionStates.PARTIALLY_DECOMMISSIONED .toString()) || overallStatus .equals(DecommissionStates.UNKNOWN.toString()))) { doc.startTag("node"); // dn toXmlItemBlockWithLink(doc, dnhost, new URL("http", dnhost, httpPort, ""), "DataNode"); // overall status first toXmlItemBlock(doc, OVERALL_STATUS, overallStatus); for (Map.Entry<String, String> m : nnStatus.entrySet()) { String nn = m.getKey(); if (nn.equals(OVERALL_STATUS)) { continue; } // xml toXmlItemBlock(doc, nn, nnStatus.get(nn)); } doc.endTag(); // node } } doc.endTag(); // datanodes createNamenodeExceptionMsg(doc, exceptions); doc.endTag();// cluster }
Example #22
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 4 votes |
public void toXML(XMLOutputter doc) throws IOException { if (error != null) { // general exception, only print exception message onto web page. createGeneralException(doc, clusterid, StringUtils.stringifyException(error)); doc.getWriter().flush(); return; } int size = nnList.size(); long total = 0L, free = 0L, nonDfsUsed = 0l; float dfsUsedPercent = 0.0f, dfsRemainingPercent = 0.0f; if (size > 0) { total = total_sum / size; free = free_sum / size; nonDfsUsed = nonDfsUsed_sum / size; dfsUsedPercent = DFSUtil.getPercentUsed(clusterDfsUsed, total); dfsRemainingPercent = DFSUtil.getPercentRemaining(free, total); } doc.startTag("cluster"); doc.attribute("clusterId", clusterid); doc.startTag("storage"); toXmlItemBlock(doc, "Total Files And Directories", Long.toString(totalFilesAndDirectories)); toXmlItemBlock(doc, "Configured Capacity", StringUtils.byteDesc(total)); toXmlItemBlock(doc, "DFS Used", StringUtils.byteDesc(clusterDfsUsed)); toXmlItemBlock(doc, "Non DFS Used", StringUtils.byteDesc(nonDfsUsed)); toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free)); // dfsUsedPercent toXmlItemBlock(doc, "DFS Used%", DFSUtil.percent2String(dfsUsedPercent)); // dfsRemainingPercent toXmlItemBlock(doc, "DFS Remaining%", DFSUtil.percent2String(dfsRemainingPercent)); doc.endTag(); // storage doc.startTag("namenodes"); // number of namenodes toXmlItemBlock(doc, "NamenodesCount", Integer.toString(size)); for (NamenodeStatus nn : nnList) { doc.startTag("node"); toXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode"); toXmlItemBlock(doc, "Blockpool Used", StringUtils.byteDesc(nn.bpUsed)); toXmlItemBlock(doc, "Blockpool Used%", DFSUtil.percent2String(DFSUtil.getPercentUsed(nn.bpUsed, total))); toXmlItemBlock(doc, "Files And Directories", Long.toString(nn.filesAndDirectories)); toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount)); toXmlItemBlock(doc, "Missing Blocks", Long.toString(nn.missingBlocksCount)); toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " (" + nn.liveDecomCount + ")", new URL(nn.httpAddress, "/dfsnodelist.jsp?whatNodes=LIVE"), "Live Datanode (Decommissioned)"); toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " (" + nn.deadDecomCount + ")", new URL(nn.httpAddress, "/dfsnodelist.jsp?whatNodes=DEAD"), "Dead Datanode (Decommissioned)"); toXmlItemBlock(doc, "Software Version", nn.softwareVersion); doc.endTag(); // node } doc.endTag(); // namenodes createNamenodeExceptionMsg(doc, nnExceptions); doc.endTag(); // cluster doc.getWriter().flush(); }
Example #23
Source File: ContentSummaryServlet.java From big-c with Apache License 2.0 | 4 votes |
@Override public void doGet(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { final Configuration conf = (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF); final UserGroupInformation ugi = getUGI(request, conf); try { ugi.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { final String path = ServletUtil.getDecodedPath(request, "/contentSummary"); final PrintWriter out = response.getWriter(); final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); xml.declaration(); try { //get content summary final ClientProtocol nnproxy = createNameNodeProxy(); final ContentSummary cs = nnproxy.getContentSummary(path); //write xml xml.startTag(ContentSummary.class.getName()); if (cs != null) { xml.attribute("length" , "" + cs.getLength()); xml.attribute("fileCount" , "" + cs.getFileCount()); xml.attribute("directoryCount", "" + cs.getDirectoryCount()); xml.attribute("quota" , "" + cs.getQuota()); xml.attribute("spaceConsumed" , "" + cs.getSpaceConsumed()); xml.attribute("spaceQuota" , "" + cs.getSpaceQuota()); } xml.endTag(); } catch(IOException ioe) { writeXml(ioe, path, xml); } xml.endDocument(); return null; } }); } catch (InterruptedException e) { throw new IOException(e); } }
Example #24
Source File: ContentSummaryServlet.java From hadoop with Apache License 2.0 | 4 votes |
@Override public void doGet(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { final Configuration conf = (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF); final UserGroupInformation ugi = getUGI(request, conf); try { ugi.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { final String path = ServletUtil.getDecodedPath(request, "/contentSummary"); final PrintWriter out = response.getWriter(); final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); xml.declaration(); try { //get content summary final ClientProtocol nnproxy = createNameNodeProxy(); final ContentSummary cs = nnproxy.getContentSummary(path); //write xml xml.startTag(ContentSummary.class.getName()); if (cs != null) { xml.attribute("length" , "" + cs.getLength()); xml.attribute("fileCount" , "" + cs.getFileCount()); xml.attribute("directoryCount", "" + cs.getDirectoryCount()); xml.attribute("quota" , "" + cs.getQuota()); xml.attribute("spaceConsumed" , "" + cs.getSpaceConsumed()); xml.attribute("spaceQuota" , "" + cs.getSpaceQuota()); } xml.endTag(); } catch(IOException ioe) { writeXml(ioe, path, xml); } xml.endDocument(); return null; } }); } catch (InterruptedException e) { throw new IOException(e); } }
Example #25
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 4 votes |
/** * Generate decommissioning datanode report in XML format * * @param doc * , xmloutputter * @throws IOException */ public void toXML(XMLOutputter doc) throws IOException { if (error != null) { createGeneralException(doc, clusterid, StringUtils.stringifyException(error)); doc.getWriter().flush(); return; } if (statusMap == null || statusMap.isEmpty()) { // none of the namenodes has reported, print exceptions from each nn. doc.startTag("cluster"); createNamenodeExceptionMsg(doc, exceptions); doc.endTag(); doc.getWriter().flush(); return; } doc.startTag("cluster"); doc.attribute("clusterId", clusterid); doc.startTag("decommissioningReport"); countDecommissionDatanodes(); toXmlItemBlock(doc, DecommissionStates.DECOMMISSIONED.toString(), Integer.toString(decommissioned)); toXmlItemBlock(doc, DecommissionStates.DECOMMISSION_INPROGRESS.toString(), Integer.toString(decommissioning)); toXmlItemBlock(doc, DecommissionStates.PARTIALLY_DECOMMISSIONED.toString(), Integer.toString(partial)); doc.endTag(); // decommissioningReport doc.startTag("datanodes"); Set<String> dnSet = statusMap.keySet(); for (String dnhost : dnSet) { Map<String, String> nnStatus = statusMap.get(dnhost); if (nnStatus == null || nnStatus.isEmpty()) { continue; } String overallStatus = nnStatus.get(OVERALL_STATUS); // check if datanode is in decommission states if (overallStatus != null && (overallStatus.equals(AdminStates.DECOMMISSION_INPROGRESS .toString()) || overallStatus.equals(AdminStates.DECOMMISSIONED.toString()) || overallStatus .equals(DecommissionStates.PARTIALLY_DECOMMISSIONED .toString()) || overallStatus .equals(DecommissionStates.UNKNOWN.toString()))) { doc.startTag("node"); // dn toXmlItemBlockWithLink(doc, dnhost, new URL("http", dnhost, httpPort, ""), "DataNode"); // overall status first toXmlItemBlock(doc, OVERALL_STATUS, overallStatus); for (Map.Entry<String, String> m : nnStatus.entrySet()) { String nn = m.getKey(); if (nn.equals(OVERALL_STATUS)) { continue; } // xml toXmlItemBlock(doc, nn, nnStatus.get(nn)); } doc.endTag(); // node } } doc.endTag(); // datanodes createNamenodeExceptionMsg(doc, exceptions); doc.endTag();// cluster }
Example #26
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 4 votes |
public void toXML(XMLOutputter doc) throws IOException { if (error != null) { // general exception, only print exception message onto web page. createGeneralException(doc, clusterid, StringUtils.stringifyException(error)); doc.getWriter().flush(); return; } int size = nnList.size(); long total = 0L, free = 0L, nonDfsUsed = 0l; float dfsUsedPercent = 0.0f, dfsRemainingPercent = 0.0f; if (size > 0) { total = total_sum / size; free = free_sum / size; nonDfsUsed = nonDfsUsed_sum / size; dfsUsedPercent = DFSUtil.getPercentUsed(clusterDfsUsed, total); dfsRemainingPercent = DFSUtil.getPercentRemaining(free, total); } doc.startTag("cluster"); doc.attribute("clusterId", clusterid); doc.startTag("storage"); toXmlItemBlock(doc, "Total Files And Directories", Long.toString(totalFilesAndDirectories)); toXmlItemBlock(doc, "Configured Capacity", StringUtils.byteDesc(total)); toXmlItemBlock(doc, "DFS Used", StringUtils.byteDesc(clusterDfsUsed)); toXmlItemBlock(doc, "Non DFS Used", StringUtils.byteDesc(nonDfsUsed)); toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free)); // dfsUsedPercent toXmlItemBlock(doc, "DFS Used%", DFSUtil.percent2String(dfsUsedPercent)); // dfsRemainingPercent toXmlItemBlock(doc, "DFS Remaining%", DFSUtil.percent2String(dfsRemainingPercent)); doc.endTag(); // storage doc.startTag("namenodes"); // number of namenodes toXmlItemBlock(doc, "NamenodesCount", Integer.toString(size)); for (NamenodeStatus nn : nnList) { doc.startTag("node"); toXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode"); toXmlItemBlock(doc, "Blockpool Used", StringUtils.byteDesc(nn.bpUsed)); toXmlItemBlock(doc, "Blockpool Used%", DFSUtil.percent2String(DFSUtil.getPercentUsed(nn.bpUsed, total))); toXmlItemBlock(doc, "Files And Directories", Long.toString(nn.filesAndDirectories)); toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount)); toXmlItemBlock(doc, "Missing Blocks", Long.toString(nn.missingBlocksCount)); toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " (" + nn.liveDecomCount + ")", new URL(nn.httpAddress, "/dfsnodelist.jsp?whatNodes=LIVE"), "Live Datanode (Decommissioned)"); toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " (" + nn.deadDecomCount + ")", new URL(nn.httpAddress, "/dfsnodelist.jsp?whatNodes=DEAD"), "Dead Datanode (Decommissioned)"); toXmlItemBlock(doc, "Software Version", nn.softwareVersion); doc.endTag(); // node } doc.endTag(); // namenodes createNamenodeExceptionMsg(doc, nnExceptions); doc.endTag(); // cluster doc.getWriter().flush(); }