Java Code Examples for org.codehaus.jackson.JsonNode#path()
The following examples show how to use
org.codehaus.jackson.JsonNode#path() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MapReduceFetcherHadoop2.java From dr-elephant with Apache License 2.0 | 6 votes |
private long[] getTaskExecTime(URL url) throws IOException, AuthenticationException { JsonNode rootNode = ThreadContextMR2.readJsonNode(url); JsonNode taskAttempt = rootNode.path("taskAttempt"); long startTime = taskAttempt.get("startTime").getLongValue(); long finishTime = taskAttempt.get("finishTime").getLongValue(); boolean isMapper = taskAttempt.get("type").getValueAsText().equals("MAP"); long[] time; if (isMapper) { // No shuffle sore time in Mapper time = new long[] { finishTime - startTime, 0, 0 ,startTime, finishTime}; } else { long shuffleTime = taskAttempt.get("elapsedShuffleTime").getLongValue(); long sortTime = taskAttempt.get("elapsedMergeTime").getLongValue(); time = new long[] { finishTime - startTime, shuffleTime, sortTime, startTime, finishTime }; } return time; }
Example 2
Source File: TezFetcher.java From dr-elephant with Apache License 2.0 | 6 votes |
private TezCounterData getDagCounter(URL url) throws IOException, AuthenticationException { TezCounterData holder = new TezCounterData(); JsonNode rootNode = ThreadContextMR2.readJsonNode(url); JsonNode groups = rootNode.path("otherinfo").path("counters").path("counterGroups"); for (JsonNode group : groups) { for (JsonNode counter : group.path("counters")) { String name = counter.get("counterName").getTextValue(); String groupName = group.get("counterGroupName").getTextValue(); Long value = counter.get("counterValue").getLongValue(); holder.set(groupName, name, value); } } return holder; }
Example 3
Source File: TezFetcher.java From dr-elephant with Apache License 2.0 | 6 votes |
private void getTaskDataAll(URL vertexListUrl, String dagId, List<TezTaskData> mapperList, List<TezTaskData> reducerList, List<TezTaskData> scopeTaskList) throws IOException, AuthenticationException { JsonNode rootVertexNode = ThreadContextMR2.readJsonNode(vertexListUrl); JsonNode vertices = rootVertexNode.path("entities"); boolean isMapVertex = false; String vertexType = null; for (JsonNode vertex : vertices) { String vertexId = vertex.get("entity").getTextValue(); String vertexClass = vertex.path("otherinfo").path("processorClassName").getTextValue(); URL tasksByVertexURL = getTaskListByVertexURL(dagId, vertexId); if (vertexClass.equals("org.apache.hadoop.hive.ql.exec.tez.MapTezProcessor")) { isMapVertex = true; getTaskDataByVertexId(tasksByVertexURL, dagId, vertexId, mapperList,isMapVertex); } else if (vertexClass.equals("org.apache.hadoop.hive.ql.exec.tez.ReduceTezProcessor")) { isMapVertex = false; getTaskDataByVertexId(tasksByVertexURL, dagId, vertexId, reducerList, isMapVertex); } else if (vertexClass.equals("org.apache.pig.backend.hadoop.executionengine.tez.runtime.PigProcessor")) { isMapVertex = false; getTaskDataByVertexId(tasksByVertexURL, dagId, vertexId, scopeTaskList, isMapVertex); } } }
Example 4
Source File: TezFetcher.java From dr-elephant with Apache License 2.0 | 6 votes |
private void getTaskDataByVertexId(URL url, String dagId, String vertexId, List<TezTaskData> taskList, boolean isMapTask) throws IOException, AuthenticationException { JsonNode rootNode = ThreadContextMR2.readJsonNode(url); JsonNode tasks = rootNode.path("entities"); for (JsonNode task : tasks) { String state = task.path("otherinfo").path("status").getTextValue(); String taskId = task.get("entity").getValueAsText(); String attemptId = task.path("otherinfo").path("successfulAttemptId").getTextValue(); if (state.equals("SUCCEEDED")) { attemptId = task.path("otherinfo").path("successfulAttemptId").getTextValue(); } else{ JsonNode firstAttempt = getTaskFirstFailedAttempt(_urlFactory.getTaskAllAttemptsURL(dagId,taskId)); if(firstAttempt != null){ attemptId = firstAttempt.get("entity").getTextValue(); } } taskList.add(new TezTaskData(taskId, attemptId)); } getTaskData(dagId, taskList, isMapTask); }
Example 5
Source File: TezFetcher.java From dr-elephant with Apache License 2.0 | 6 votes |
private JsonNode getTaskFirstFailedAttempt(URL taskAllAttemptsUrl) throws IOException, AuthenticationException { JsonNode rootNode = ThreadContextMR2.readJsonNode(taskAllAttemptsUrl); long firstAttemptFinishTime = Long.MAX_VALUE; JsonNode firstAttempt = null; JsonNode taskAttempts = rootNode.path("entities"); for (JsonNode taskAttempt : taskAttempts) { String state = taskAttempt.path("otherinfo").path("counters").path("status").getTextValue(); if (state.equals("SUCCEEDED")) { continue; } long finishTime = taskAttempt.path("otherinfo").path("counters").path("endTime").getLongValue(); if( finishTime < firstAttemptFinishTime) { firstAttempt = taskAttempt; firstAttemptFinishTime = finishTime; } } return firstAttempt; }
Example 6
Source File: ShuffleRewriter.java From Cubert with Apache License 2.0 | 6 votes |
@Override public JsonNode rewrite(JsonNode plan, Set<String> namesUsed, boolean debugMode, boolean revisit) { this.namesUsed = namesUsed; ObjectNode newPlan = (ObjectNode) cloneNode(plan); ArrayNode jobs = mapper.createArrayNode(); for (JsonNode job : plan.path("jobs")) { jobs.add(rewriteJob(job)); } newPlan.remove("jobs"); newPlan.put("jobs", jobs); return newPlan; }
Example 7
Source File: GenerateOperator.java From Cubert with Apache License 2.0 | 6 votes |
private Map<String, String> getLineage(BlockSchema inputSchema, JsonNode json) throws PreconditionException { Map<String, String> lineage = new HashMap<String, String>(); for (JsonNode outputCol : json.path("outputTuple")) { String outColName = JsonUtils.getText(outputCol, "col_name"); if (!outputCol.has("expression")) continue; JsonNode expressionJson = outputCol.get("expression"); if (!expressionJson.has("function")) continue; String function = JsonUtils.getText(expressionJson, "function"); if (!function.equals("INPUT_PROJECTION")) continue; Object selector = JsonUtils.decodeConstant(expressionJson.get("arguments").get(0), null); int index = FunctionTree.getSelectorIndex(inputSchema, selector); String inColName = inputSchema.getColumnNames()[index]; lineage.put(inColName, outColName); } return lineage; }
Example 8
Source File: MapReduceFetcherHadoop2.java From dr-elephant with Apache License 2.0 | 6 votes |
private MapReduceCounterData getJobCounter(URL url) throws IOException, AuthenticationException { MapReduceCounterData holder = new MapReduceCounterData(); JsonNode rootNode = ThreadContextMR2.readJsonNode(url); JsonNode groups = rootNode.path("jobCounters").path("counterGroup"); for (JsonNode group : groups) { for (JsonNode counter : group.path("counter")) { String counterName = counter.get("name").getValueAsText(); Long counterValue = counter.get("totalCounterValue").getLongValue(); String groupName = group.get("counterGroupName").getValueAsText(); holder.set(groupName, counterName, counterValue); } } return holder; }
Example 9
Source File: VespaDocumentOperationTest.java From vespa with Apache License 2.0 | 5 votes |
@Test public void requireThatUDFSupportsUpdateAssign() throws IOException { String json = getDocumentOperationJson("docid=id:<application>:metrics::<name>-<date>", "operation=update"); ObjectMapper m = new ObjectMapper(); JsonNode root = m.readTree(json); JsonNode fields = root.path("fields"); assertEquals("id:testapp:metrics::clicks-20160112", root.get("update").getTextValue()); assertEquals("testapp", fields.get("application").get("assign").getTextValue()); assertEquals("clicks", fields.get("name").get("assign").getTextValue()); assertEquals(3, fields.get("value").get("assign").getIntValue()); }
Example 10
Source File: DependencyAnalyzer.java From Cubert with Apache License 2.0 | 5 votes |
@Override public void visitInput(JsonNode json) { String type = getText(json, "type"); for (JsonNode pathJson : json.path("path")) { currentJob.addInput(type, JsonUtils.encodePath(pathJson), json); } }
Example 11
Source File: CubertCombiner.java From Cubert with Apache License 2.0 | 5 votes |
public static void checkPostCondition(Map<String, PostCondition> preConditions, JsonNode json) throws PreconditionException { PostCondition condition = preConditions.values().iterator().next(); BlockSchema inputSchema = condition.getSchema(); String[] keyColumns = JsonUtils.asArray(json, "pivotKeys"); BlockSchema outputSchema = inputSchema.getSubset(keyColumns); if (json.has("aggregates")) { for (JsonNode aggregateJson : json.path("aggregates")) { AggregationType aggType = AggregationType.valueOf(JsonUtils.getText(aggregateJson, "type")); AggregationFunction aggregator = null; aggregator = AggregationFunctions.get(aggType, aggregateJson); BlockSchema aggSchema = aggregator.outputSchema(inputSchema, aggregateJson); outputSchema = outputSchema.append(aggSchema); } } if (!inputSchema.equals(outputSchema)) throw new PreconditionException(PreconditionExceptionType.INVALID_SCHEMA, "The input and output schema for SHUFFLE must be identical." + "\n\tInput Schema: " + inputSchema + "\n\tOutputSchema: " + outputSchema); }
Example 12
Source File: ThreadPoolManager.java From Cubert with Apache License 2.0 | 5 votes |
private void setupGraph(JsonNode programNode) { // need a list of jobNames in order to print the dependency graph jobs in // sequential order List<String> jobNames = new ArrayList<String>(programNode.path("jobs").size()); for (JsonNode job : programNode.path("jobs")) { // get the name of the job String jobName = job.get("name").getTextValue(); jobNames.add(jobName); // get the parents of the job List<String> parents = new ArrayList<String>(); ArrayNode dependencies = (ArrayNode) job.get("dependsOn"); JsonNode indexOfJob; for (JsonNode dependency : dependencies) { int index = dependency.getIntValue(); indexOfJob = ((ArrayNode) programNode.get("jobs")).get(index); parents.add(indexOfJob.get("name").getTextValue()); } // add the name, parents of the job, and the actual JsonNode job graph.addNode(jobName, parents, job); } // set the children field for parent job nodes graph.setChildren(); // print graph System.out.println(graph.prettyPrint(jobNames)); }
Example 13
Source File: VespaDocumentOperationTest.java From vespa with Apache License 2.0 | 5 votes |
@Test public void requireThatUDFCanExcludeFields() throws IOException { String json = getDocumentOperationJson("docid=id:<application>:metrics::<name>-<date>", "exclude-fields=application,date"); ObjectMapper m = new ObjectMapper(); JsonNode root = m.readTree(json); JsonNode fields = root.path("fields"); // 'application' and 'date' fields should not appear in JSON assertNull(fields.get("application")); assertNull(fields.get("date")); assertNotNull(fields.get("name")); assertNotNull(fields.get("value")); }
Example 14
Source File: VespaDocumentOperationTest.java From vespa with Apache License 2.0 | 5 votes |
@Test public void requireThatUDFSupportsCreateIfNonExistent() throws IOException { String json = getDocumentOperationJson("docid=id:<application>:metrics::<name>-<date>", "operation=update", "create-if-non-existent=true"); ObjectMapper m = new ObjectMapper(); JsonNode root = m.readTree(json); JsonNode fields = root.path("fields"); assertEquals("id:testapp:metrics::clicks-20160112", root.get("update").getTextValue()); assertEquals(true, root.get("create").getBooleanValue()); assertEquals("testapp", fields.get("application").get("assign").getTextValue()); assertEquals("clicks", fields.get("name").get("assign").getTextValue()); assertEquals(3, fields.get("value").get("assign").getIntValue()); }
Example 15
Source File: VespaDocumentOperationTest.java From vespa with Apache License 2.0 | 5 votes |
@Test public void requireThatUDFSupportsConditionalUpdateAssign() throws IOException { String json = getDocumentOperationJson("docid=id:<application>:metrics::<name>-<date>", "operation=update", "condition=clicks < <value>"); ObjectMapper m = new ObjectMapper(); JsonNode root = m.readTree(json); JsonNode fields = root.path("fields"); assertEquals("id:testapp:metrics::clicks-20160112", root.get("update").getTextValue()); assertEquals("clicks < 3", root.get("condition").getTextValue()); assertEquals("testapp", fields.get("application").get("assign").getTextValue()); assertEquals("clicks", fields.get("name").get("assign").getTextValue()); assertEquals(3, fields.get("value").get("assign").getIntValue()); }
Example 16
Source File: VespaDocumentOperationTest.java From vespa with Apache License 2.0 | 5 votes |
@Test public void requireThatUDFReturnsCorrectJson() throws Exception { String json = getDocumentOperationJson("docid=id:<application>:metrics::<name>-<date>"); ObjectMapper m = new ObjectMapper(); JsonNode root = m.readTree(json); JsonNode fields = root.path("fields"); // operation put is default assertEquals("id:testapp:metrics::clicks-20160112", root.get("put").getTextValue()); assertEquals("testapp", fields.get("application").getTextValue()); assertEquals("clicks", fields.get("name").getTextValue()); assertEquals(3, fields.get("value").getIntValue()); }
Example 17
Source File: ExecutorService.java From Cubert with Apache License 2.0 | 5 votes |
private static void setupConf(JsonNode programNode) { // copy the hadoopConf and libjars from global level to each job JsonNode globalHadoopConf = programNode.get("hadoopConf"); JsonNode libjars = programNode.get("libjars"); for (JsonNode json : programNode.path("jobs")) { ObjectNode job = (ObjectNode) json; // if there isn't local hadoop conf, then use the global conf if (!job.has("hadoopConf")) { job.put("hadoopConf", globalHadoopConf); } else { // if there are local conf properties, then copy only those properties // from global properties that are not already defined at local level ObjectNode localHadoopConf = (ObjectNode) job.get("hadoopConf"); Iterator<String> it = globalHadoopConf.getFieldNames(); while (it.hasNext()) { String key = it.next(); if (!localHadoopConf.has(key)) { localHadoopConf.put(key, globalHadoopConf.get(key)); } } } if (libjars != null) job.put("libjars", libjars); } }
Example 18
Source File: AzureCloudInstanceInformationProcessor.java From helix with Apache License 2.0 | 5 votes |
/** * Parse raw Azure cloud instance information. * @return required azure cloud instance information */ @Override public AzureCloudInstanceInformation parseCloudInstanceInformation(List<String> responses) { AzureCloudInstanceInformation azureCloudInstanceInformation = null; if (responses.size() > 1) { throw new HelixException("Multiple responses are not supported for Azure now"); } String response = responses.get(0); ObjectMapper mapper = new ObjectMapper(); try { JsonNode jsonNode = mapper.readTree(response); JsonNode computeNode = jsonNode.path(COMPUTE); if (!computeNode.isMissingNode()) { String vmName = computeNode.path(INSTANCE_NAME).getTextValue(); String platformFaultDomain = computeNode.path(DOMAIN).getTextValue(); String vmssName = computeNode.path(INSTANCE_SET_NAME).getValueAsText(); String azureTopology = AzureConstants.AZURE_TOPOLOGY; String[] parts = azureTopology.trim().split("/"); //The hostname will be filled in by each participant String domain = parts[1] + "=" + platformFaultDomain + "," + parts[2] + "="; AzureCloudInstanceInformation.Builder builder = new AzureCloudInstanceInformation.Builder(); builder.setInstanceName(vmName).setFaultDomain(domain).setInstanceSetName(vmssName); azureCloudInstanceInformation = builder.build(); } } catch (IOException e) { throw new HelixException( String.format("Error in parsing cloud instance information: {}", response, e)); } return azureCloudInstanceInformation; }
Example 19
Source File: GeoJsonParser.java From arcgis-runtime-demo-java with Apache License 2.0 | 5 votes |
private List<Geometry> parseGeometries(JsonParser parser) { try { JsonNode node = mapper.readTree(parser); String type = node.path(FIELD_TYPE).getTextValue(); if (type.equals(FIELD_GEOMETRY_COLLECTION)) { ArrayNode jsonFeatures = (ArrayNode) node.path(FIELD_GEOMETRIES); return parseGeometries(jsonFeatures); } } catch (Exception ex) { throw new RuntimeException(ex); } return Collections.emptyList(); }
Example 20
Source File: GroupByOperator.java From Cubert with Apache License 2.0 | 4 votes |
@Override public PostCondition getPostCondition(Map<String, PostCondition> preConditions, JsonNode json) throws PreconditionException { PostCondition condition = preConditions.values().iterator().next(); BlockSchema inputSchema = condition.getSchema(); String[] partitionKeys = condition.getPartitionKeys(); String[] sortKeys = condition.getSortKeys(); if (condition.getPivotKeys() != null) sortKeys = CommonUtils.concat(condition.getPivotKeys(), sortKeys); BlockSchema outputSchema; String[] groupByColumns = JsonUtils.asArray(json, GROUP_BY_COLUMNNAMES); // test that group by columns are present for (String groupByColumn : groupByColumns) { if (!inputSchema.hasIndex(groupByColumn)) throw new PreconditionException(PreconditionExceptionType.COLUMN_NOT_PRESENT, "Column [" + groupByColumn + "] not present."); } // test that block is sorted on group by columns if (groupByColumns.length > 0) { if (!CommonUtils.isPrefix(sortKeys, groupByColumns)) { System.out.println("Input SortKeys = " + Arrays.toString(sortKeys)); throw new PreconditionException(PreconditionExceptionType.INVALID_SORT_KEYS); } } // generate the output schema if (((ArrayNode) json.get(GROUP_BY_COLUMNNAMES)).size() > 0) { outputSchema = inputSchema.getSubset(groupByColumns); } else { outputSchema = new BlockSchema(new ColumnType[] {}); } String[] fullExpectedSortKeys = groupByColumns; boolean countDistinctAggPresent = false; if (json.has("aggregates")) { for (JsonNode aggregateJson : json.path("aggregates")) { // BlockSchema aggOutputSchema; AggregationType aggType = AggregationType.valueOf(JsonUtils.getText(aggregateJson, "type")); AggregationFunction aggregator = null; aggregator = AggregationFunctions.get(aggType, aggregateJson); if (aggregator == null) throw new PreconditionException(PreconditionExceptionType.INVALID_CONFIG, "Cannot instantiate aggregation operator for type " + aggType); BlockSchema aggOutputSchema = aggregator.outputSchema(inputSchema, aggregateJson); outputSchema = outputSchema.append(aggOutputSchema); // Check pre-condition for COUNT-DISTINCT String[] measureColumn = JsonUtils.asArray(aggregateJson.get("input")); if (aggType == AggregationType.COUNT_DISTINCT) { if (countDistinctAggPresent) throw new PreconditionException(PreconditionExceptionType.INVALID_GROUPBY); countDistinctAggPresent = true; fullExpectedSortKeys = CommonUtils.concat(groupByColumns, measureColumn); if (!CommonUtils.isPrefix(sortKeys, fullExpectedSortKeys)) { String errorMesg = "Expecting sortkeys = " + CommonUtils.join(fullExpectedSortKeys, ",") + " actual = " + CommonUtils.join(sortKeys, ","); throw new PreconditionException(PreconditionExceptionType.INVALID_SORT_KEYS, errorMesg); } } } } return new PostCondition(outputSchema, partitionKeys, groupByColumns); }