Java Code Examples for org.znerd.xmlenc.XMLOutputter#startTag()
The following examples show how to use
org.znerd.xmlenc.XMLOutputter#startTag() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DfsServlet.java From hadoop with Apache License 2.0 | 6 votes |
/** Write the object to XML format */ protected void writeXml(Exception except, String path, XMLOutputter doc) throws IOException { doc.startTag(RemoteException.class.getSimpleName()); doc.attribute("path", path); if (except instanceof RemoteException) { doc.attribute("class", ((RemoteException) except).getClassName()); } else { doc.attribute("class", except.getClass().getName()); } String msg = except.getLocalizedMessage(); int i = msg.indexOf("\n"); if (i >= 0) { msg = msg.substring(0, i); } doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim()); doc.endTag(); }
Example 2
Source File: DfsServlet.java From big-c with Apache License 2.0 | 6 votes |
/** Write the object to XML format */ protected void writeXml(Exception except, String path, XMLOutputter doc) throws IOException { doc.startTag(RemoteException.class.getSimpleName()); doc.attribute("path", path); if (except instanceof RemoteException) { doc.attribute("class", ((RemoteException) except).getClassName()); } else { doc.attribute("class", except.getClass().getName()); } String msg = except.getLocalizedMessage(); int i = msg.indexOf("\n"); if (i >= 0) { msg = msg.substring(0, i); } doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim()); doc.endTag(); }
Example 3
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 5 votes |
/** * Generate a XML block as such, <item label=key value=value/> */ private static void toXmlItemBlock(XMLOutputter doc, String key, String value) throws IOException { doc.startTag("item"); doc.attribute("label", key); doc.attribute("value", value); doc.endTag(); }
Example 4
Source File: RemoteException.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** Write the object to XML format */ public void writeXml(String path, XMLOutputter doc) throws IOException { doc.startTag(RemoteException.class.getSimpleName()); doc.attribute("path", path); doc.attribute("class", getClassName()); String msg = getLocalizedMessage(); int i = msg.indexOf("\n"); if (i >= 0) { msg = msg.substring(0, i); } doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim()); doc.endTag(); }
Example 5
Source File: MD5MD5CRC32FileChecksum.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** Write that object to xml output. */ public static void write(XMLOutputter xml, MD5MD5CRC32FileChecksum that ) throws IOException { xml.startTag(MD5MD5CRC32FileChecksum.class.getName()); if (that != null) { xml.attribute("bytesPerCRC", "" + that.bytesPerCRC); xml.attribute("crcPerBlock", "" + that.crcPerBlock); xml.attribute("md5", "" + that.md5); } xml.endTag(); }
Example 6
Source File: RemoteException.java From RDFS with Apache License 2.0 | 5 votes |
/** Write the object to XML format */ public void writeXml(String path, XMLOutputter doc) throws IOException { doc.startTag(RemoteException.class.getSimpleName()); doc.attribute("path", path); doc.attribute("class", getClassName()); String msg = getLocalizedMessage(); int i = msg.indexOf("\n"); if (i >= 0) { msg = msg.substring(0, i); } doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim()); doc.endTag(); }
Example 7
Source File: MD5MD5CRC32FileChecksum.java From RDFS with Apache License 2.0 | 5 votes |
/** Write that object to xml output. */ public static void write(XMLOutputter xml, MD5MD5CRC32FileChecksum that ) throws IOException { xml.startTag(MD5MD5CRC32FileChecksum.class.getName()); if (that != null) { xml.attribute("bytesPerCRC", "" + that.bytesPerCRC); xml.attribute("crcPerBlock", "" + that.crcPerBlock); xml.attribute("md5", "" + that.md5); } xml.endTag(); }
Example 8
Source File: MD5MD5CRC32FileChecksum.java From big-c with Apache License 2.0 | 5 votes |
/** Write that object to xml output. */ public static void write(XMLOutputter xml, MD5MD5CRC32FileChecksum that ) throws IOException { xml.startTag(MD5MD5CRC32FileChecksum.class.getName()); if (that != null) { xml.attribute("bytesPerCRC", "" + that.bytesPerCRC); xml.attribute("crcPerBlock", "" + that.crcPerBlock); xml.attribute("crcType", ""+ that.getCrcType().name()); xml.attribute("md5", "" + that.md5); } xml.endTag(); }
Example 9
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 5 votes |
/** * create XML block from general exception. */ private static void createGeneralException(XMLOutputter doc, String clusterid, String eMsg) throws IOException { doc.startTag("cluster"); doc.attribute("clusterId", clusterid); doc.startTag("message"); doc.startTag("item"); doc.attribute("msg", eMsg); doc.endTag(); // item doc.endTag(); // message doc.endTag(); // cluster }
Example 10
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 5 votes |
/** * create the XML for exceptions that we encountered when connecting to * namenode. */ private static void createNamenodeExceptionMsg(XMLOutputter doc, Map<String, Exception> exceptionMsg) throws IOException { if (exceptionMsg.size() > 0) { doc.startTag("unreportedNamenodes"); for (Map.Entry<String, Exception> m : exceptionMsg.entrySet()) { doc.startTag("node"); doc.attribute("name", m.getKey()); doc.attribute("exception", StringUtils.stringifyException(m.getValue())); doc.endTag();// node } doc.endTag(); // unreportedNamnodes } }
Example 11
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 5 votes |
/** * Generate a XML block as such, <item label="Node" value="hostname" * link="http://hostname:50070" /> */ private static void toXmlItemBlockWithLink(XMLOutputter doc, String value, URL url, String label) throws IOException { doc.startTag("item"); doc.attribute("label", label); doc.attribute("value", value); doc.attribute("link", url.toString()); doc.endTag(); // item }
Example 12
Source File: MD5MD5CRC32FileChecksum.java From hadoop with Apache License 2.0 | 5 votes |
/** Write that object to xml output. */ public static void write(XMLOutputter xml, MD5MD5CRC32FileChecksum that ) throws IOException { xml.startTag(MD5MD5CRC32FileChecksum.class.getName()); if (that != null) { xml.attribute("bytesPerCRC", "" + that.bytesPerCRC); xml.attribute("crcPerBlock", "" + that.crcPerBlock); xml.attribute("crcType", ""+ that.getCrcType().name()); xml.attribute("md5", "" + that.md5); } xml.endTag(); }
Example 13
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 5 votes |
/** * create XML block from general exception. */ private static void createGeneralException(XMLOutputter doc, String clusterid, String eMsg) throws IOException { doc.startTag("cluster"); doc.attribute("clusterId", clusterid); doc.startTag("message"); doc.startTag("item"); doc.attribute("msg", eMsg); doc.endTag(); // item doc.endTag(); // message doc.endTag(); // cluster }
Example 14
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 5 votes |
/** * create the XML for exceptions that we encountered when connecting to * namenode. */ private static void createNamenodeExceptionMsg(XMLOutputter doc, Map<String, Exception> exceptionMsg) throws IOException { if (exceptionMsg.size() > 0) { doc.startTag("unreportedNamenodes"); for (Map.Entry<String, Exception> m : exceptionMsg.entrySet()) { doc.startTag("node"); doc.attribute("name", m.getKey()); doc.attribute("exception", StringUtils.stringifyException(m.getValue())); doc.endTag();// node } doc.endTag(); // unreportedNamnodes } }
Example 15
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 5 votes |
/** * Generate a XML block as such, <item label="Node" value="hostname" * link="http://hostname:50070" /> */ private static void toXmlItemBlockWithLink(XMLOutputter doc, String value, URL url, String label) throws IOException { doc.startTag("item"); doc.attribute("label", label); doc.attribute("value", value); doc.attribute("link", url.toString()); doc.endTag(); // item }
Example 16
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 5 votes |
/** * Generate a XML block as such, <item label=key value=value/> */ private static void toXmlItemBlock(XMLOutputter doc, String key, String value) throws IOException { doc.startTag("item"); doc.attribute("label", key); doc.attribute("value", value); doc.endTag(); }
Example 17
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 4 votes |
public void toXML(XMLOutputter doc) throws IOException { if (error != null) { // general exception, only print exception message onto web page. createGeneralException(doc, clusterid, StringUtils.stringifyException(error)); doc.getWriter().flush(); return; } int size = nnList.size(); long total = 0L, free = 0L, nonDfsUsed = 0l; float dfsUsedPercent = 0.0f, dfsRemainingPercent = 0.0f; if (size > 0) { total = total_sum / size; free = free_sum / size; nonDfsUsed = nonDfsUsed_sum / size; dfsUsedPercent = DFSUtil.getPercentUsed(clusterDfsUsed, total); dfsRemainingPercent = DFSUtil.getPercentRemaining(free, total); } doc.startTag("cluster"); doc.attribute("clusterId", clusterid); doc.startTag("storage"); toXmlItemBlock(doc, "Total Files And Directories", Long.toString(totalFilesAndDirectories)); toXmlItemBlock(doc, "Configured Capacity", StringUtils.byteDesc(total)); toXmlItemBlock(doc, "DFS Used", StringUtils.byteDesc(clusterDfsUsed)); toXmlItemBlock(doc, "Non DFS Used", StringUtils.byteDesc(nonDfsUsed)); toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free)); // dfsUsedPercent toXmlItemBlock(doc, "DFS Used%", DFSUtil.percent2String(dfsUsedPercent)); // dfsRemainingPercent toXmlItemBlock(doc, "DFS Remaining%", DFSUtil.percent2String(dfsRemainingPercent)); doc.endTag(); // storage doc.startTag("namenodes"); // number of namenodes toXmlItemBlock(doc, "NamenodesCount", Integer.toString(size)); for (NamenodeStatus nn : nnList) { doc.startTag("node"); toXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode"); toXmlItemBlock(doc, "Blockpool Used", StringUtils.byteDesc(nn.bpUsed)); toXmlItemBlock(doc, "Blockpool Used%", DFSUtil.percent2String(DFSUtil.getPercentUsed(nn.bpUsed, total))); toXmlItemBlock(doc, "Files And Directories", Long.toString(nn.filesAndDirectories)); toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount)); toXmlItemBlock(doc, "Missing Blocks", Long.toString(nn.missingBlocksCount)); toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " (" + nn.liveDecomCount + ")", new URL(nn.httpAddress, "/dfsnodelist.jsp?whatNodes=LIVE"), "Live Datanode (Decommissioned)"); toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " (" + nn.deadDecomCount + ")", new URL(nn.httpAddress, "/dfsnodelist.jsp?whatNodes=DEAD"), "Dead Datanode (Decommissioned)"); toXmlItemBlock(doc, "Software Version", nn.softwareVersion); doc.endTag(); // node } doc.endTag(); // namenodes createNamenodeExceptionMsg(doc, nnExceptions); doc.endTag(); // cluster doc.getWriter().flush(); }
Example 18
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 4 votes |
/** * Generate decommissioning datanode report in XML format * * @param doc * , xmloutputter * @throws IOException */ public void toXML(XMLOutputter doc) throws IOException { if (error != null) { createGeneralException(doc, clusterid, StringUtils.stringifyException(error)); doc.getWriter().flush(); return; } if (statusMap == null || statusMap.isEmpty()) { // none of the namenodes has reported, print exceptions from each nn. doc.startTag("cluster"); createNamenodeExceptionMsg(doc, exceptions); doc.endTag(); doc.getWriter().flush(); return; } doc.startTag("cluster"); doc.attribute("clusterId", clusterid); doc.startTag("decommissioningReport"); countDecommissionDatanodes(); toXmlItemBlock(doc, DecommissionStates.DECOMMISSIONED.toString(), Integer.toString(decommissioned)); toXmlItemBlock(doc, DecommissionStates.DECOMMISSION_INPROGRESS.toString(), Integer.toString(decommissioning)); toXmlItemBlock(doc, DecommissionStates.PARTIALLY_DECOMMISSIONED.toString(), Integer.toString(partial)); doc.endTag(); // decommissioningReport doc.startTag("datanodes"); Set<String> dnSet = statusMap.keySet(); for (String dnhost : dnSet) { Map<String, String> nnStatus = statusMap.get(dnhost); if (nnStatus == null || nnStatus.isEmpty()) { continue; } String overallStatus = nnStatus.get(OVERALL_STATUS); // check if datanode is in decommission states if (overallStatus != null && (overallStatus.equals(AdminStates.DECOMMISSION_INPROGRESS .toString()) || overallStatus.equals(AdminStates.DECOMMISSIONED.toString()) || overallStatus .equals(DecommissionStates.PARTIALLY_DECOMMISSIONED .toString()) || overallStatus .equals(DecommissionStates.UNKNOWN.toString()))) { doc.startTag("node"); // dn toXmlItemBlockWithLink(doc, dnhost, new URL("http", dnhost, httpPort, ""), "DataNode"); // overall status first toXmlItemBlock(doc, OVERALL_STATUS, overallStatus); for (Map.Entry<String, String> m : nnStatus.entrySet()) { String nn = m.getKey(); if (nn.equals(OVERALL_STATUS)) { continue; } // xml toXmlItemBlock(doc, nn, nnStatus.get(nn)); } doc.endTag(); // node } } doc.endTag(); // datanodes createNamenodeExceptionMsg(doc, exceptions); doc.endTag();// cluster }
Example 19
Source File: ClusterJspHelper.java From big-c with Apache License 2.0 | 4 votes |
public void toXML(XMLOutputter doc) throws IOException { if (error != null) { // general exception, only print exception message onto web page. createGeneralException(doc, clusterid, StringUtils.stringifyException(error)); doc.getWriter().flush(); return; } int size = nnList.size(); long total = 0L, free = 0L, nonDfsUsed = 0l; float dfsUsedPercent = 0.0f, dfsRemainingPercent = 0.0f; if (size > 0) { total = total_sum / size; free = free_sum / size; nonDfsUsed = nonDfsUsed_sum / size; dfsUsedPercent = DFSUtil.getPercentUsed(clusterDfsUsed, total); dfsRemainingPercent = DFSUtil.getPercentRemaining(free, total); } doc.startTag("cluster"); doc.attribute("clusterId", clusterid); doc.startTag("storage"); toXmlItemBlock(doc, "Total Files And Directories", Long.toString(totalFilesAndDirectories)); toXmlItemBlock(doc, "Configured Capacity", StringUtils.byteDesc(total)); toXmlItemBlock(doc, "DFS Used", StringUtils.byteDesc(clusterDfsUsed)); toXmlItemBlock(doc, "Non DFS Used", StringUtils.byteDesc(nonDfsUsed)); toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free)); // dfsUsedPercent toXmlItemBlock(doc, "DFS Used%", DFSUtil.percent2String(dfsUsedPercent)); // dfsRemainingPercent toXmlItemBlock(doc, "DFS Remaining%", DFSUtil.percent2String(dfsRemainingPercent)); doc.endTag(); // storage doc.startTag("namenodes"); // number of namenodes toXmlItemBlock(doc, "NamenodesCount", Integer.toString(size)); for (NamenodeStatus nn : nnList) { doc.startTag("node"); toXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode"); toXmlItemBlock(doc, "Blockpool Used", StringUtils.byteDesc(nn.bpUsed)); toXmlItemBlock(doc, "Blockpool Used%", DFSUtil.percent2String(DFSUtil.getPercentUsed(nn.bpUsed, total))); toXmlItemBlock(doc, "Files And Directories", Long.toString(nn.filesAndDirectories)); toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount)); toXmlItemBlock(doc, "Missing Blocks", Long.toString(nn.missingBlocksCount)); toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " (" + nn.liveDecomCount + ")", new URL(nn.httpAddress, "/dfsnodelist.jsp?whatNodes=LIVE"), "Live Datanode (Decommissioned)"); toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " (" + nn.deadDecomCount + ")", new URL(nn.httpAddress, "/dfsnodelist.jsp?whatNodes=DEAD"), "Dead Datanode (Decommissioned)"); toXmlItemBlock(doc, "Software Version", nn.softwareVersion); doc.endTag(); // node } doc.endTag(); // namenodes createNamenodeExceptionMsg(doc, nnExceptions); doc.endTag(); // cluster doc.getWriter().flush(); }
Example 20
Source File: ClusterJspHelper.java From hadoop with Apache License 2.0 | 4 votes |
/** * Generate decommissioning datanode report in XML format * * @param doc * , xmloutputter * @throws IOException */ public void toXML(XMLOutputter doc) throws IOException { if (error != null) { createGeneralException(doc, clusterid, StringUtils.stringifyException(error)); doc.getWriter().flush(); return; } if (statusMap == null || statusMap.isEmpty()) { // none of the namenodes has reported, print exceptions from each nn. doc.startTag("cluster"); createNamenodeExceptionMsg(doc, exceptions); doc.endTag(); doc.getWriter().flush(); return; } doc.startTag("cluster"); doc.attribute("clusterId", clusterid); doc.startTag("decommissioningReport"); countDecommissionDatanodes(); toXmlItemBlock(doc, DecommissionStates.DECOMMISSIONED.toString(), Integer.toString(decommissioned)); toXmlItemBlock(doc, DecommissionStates.DECOMMISSION_INPROGRESS.toString(), Integer.toString(decommissioning)); toXmlItemBlock(doc, DecommissionStates.PARTIALLY_DECOMMISSIONED.toString(), Integer.toString(partial)); doc.endTag(); // decommissioningReport doc.startTag("datanodes"); Set<String> dnSet = statusMap.keySet(); for (String dnhost : dnSet) { Map<String, String> nnStatus = statusMap.get(dnhost); if (nnStatus == null || nnStatus.isEmpty()) { continue; } String overallStatus = nnStatus.get(OVERALL_STATUS); // check if datanode is in decommission states if (overallStatus != null && (overallStatus.equals(AdminStates.DECOMMISSION_INPROGRESS .toString()) || overallStatus.equals(AdminStates.DECOMMISSIONED.toString()) || overallStatus .equals(DecommissionStates.PARTIALLY_DECOMMISSIONED .toString()) || overallStatus .equals(DecommissionStates.UNKNOWN.toString()))) { doc.startTag("node"); // dn toXmlItemBlockWithLink(doc, dnhost, new URL("http", dnhost, httpPort, ""), "DataNode"); // overall status first toXmlItemBlock(doc, OVERALL_STATUS, overallStatus); for (Map.Entry<String, String> m : nnStatus.entrySet()) { String nn = m.getKey(); if (nn.equals(OVERALL_STATUS)) { continue; } // xml toXmlItemBlock(doc, nn, nnStatus.get(nn)); } doc.endTag(); // node } } doc.endTag(); // datanodes createNamenodeExceptionMsg(doc, exceptions); doc.endTag();// cluster }