Java Code Examples for org.codehaus.jackson.node.ObjectNode#get()
The following examples show how to use
org.codehaus.jackson.node.ObjectNode#get() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Lineage.java From Cubert with Apache License 2.0 | 6 votes |
public static void visitOperators(ObjectNode programNode, ObjectNode jobNode, OperatorVisitor tracerObj, boolean reverse) { ArrayNode jobs = (ArrayNode) programNode.get("jobs"); int si = (reverse ? jobs.size() - 1 : 0); int ei = (reverse ? -1 : jobs.size()); for (int i = si; i != ei; i = i + increment(reverse)) { if (jobNode != null && jobNode != jobs.get(i)) continue; if (!visitOperatorsInJob(programNode, (ObjectNode) jobs.get(i), tracerObj, reverse)) return; } }
Example 2
Source File: Lineage.java From Cubert with Apache License 2.0 | 6 votes |
public boolean isDistinctGroupBy(ObjectNode gbyNode) { if (!JsonUtils.getText(gbyNode, "operator").equals("GROUP_BY")) return false; if (gbyNode.get("aggregates") != null) return false; ObjectNode inpNode = this.lineageInfo.preLineageInfo.findOperatorSource(gbyNode, gbyNode.get("input") .getTextValue()); String[] inColumns = getSchemaOutputColumns(inpNode); String[] outColumns = getSchemaOutputColumns(gbyNode); if (inColumns.length != outColumns.length) return false; for (int i = 0; i < inColumns.length; i++) { if (inColumns[i].equals(outColumns[i])) return false; } return true; }
Example 3
Source File: Lineage.java From Cubert with Apache License 2.0 | 6 votes |
private static String checkTopLevelColumn(ObjectNode sourceOp, JsonNode genExprNode) { String topColName = null; if (!(genExprNode instanceof ObjectNode)) return null; ObjectNode opNode = (ObjectNode) genExprNode; if (opNode.get("function") == null || !opNode.get("function").getTextValue().equals("INPUT_PROJECTION")) return null; ArrayNode argsNode = (ArrayNode) opNode.get("arguments"); if (argsNode.size() != 1 || ((topColName = findNamedColumn(argsNode)) == null) && (topColName = findIndexedColumn(sourceOp, argsNode)) == null) return null; return topColName; }
Example 4
Source File: AggregateRewriter.java From Cubert with Apache License 2.0 | 6 votes |
protected ObjectNode getFactTimeSpecNode(ObjectNode factNode, ObjectNode cubeNode) throws AggregateRewriteException { List<String> paths = lineage.getPaths(factNode.get("path")); for (JsonNode timeSpecNode : (ArrayNode) (cubeNode.get("timeColumnSpec"))) { String elementPath = ((ObjectNode) timeSpecNode).get("factPath").getTextValue(); if (paths.indexOf(elementPath) != -1) { tNode = (ObjectNode) timeSpecNode; return tNode; } } throw new AggregateRewriteException("No matching time column specification found for FACT load at " + factNode.toString()); }
Example 5
Source File: ShuffleRewriter.java From Cubert with Apache License 2.0 | 6 votes |
private JsonNode rewriteDistinct(JsonNode job) { ObjectNode newJob = (ObjectNode) cloneNode(job); ObjectNode shuffle = (ObjectNode) newJob.get("shuffle"); String name = getText(shuffle, "name"); ObjectNode distinctOp = JsonUtils.createObjectNode("operator", "DISTINCT", "input", name, "output", name); if (!newJob.has("reduce") || newJob.get("reduce").isNull()) newJob.put("reduce", mapper.createArrayNode()); ArrayNode reduce = (ArrayNode) newJob.get("reduce"); reduce.insert(0, distinctOp); shuffle.put("type", "SHUFFLE"); shuffle.put("distinctShuffle", true); return newJob; }
Example 6
Source File: Lineage.java From Cubert with Apache License 2.0 | 5 votes |
private static boolean isAvroLoad(ObjectNode jobNode, JsonNode phaseNode, ObjectNode operatorNode) { if (operatorNode.get("operator") != null || !operatorNode.get("type").getTextValue().equalsIgnoreCase("AVRO")) return false; return true; }
Example 7
Source File: CriterionUtils.java From bdf3 with Apache License 2.0 | 5 votes |
private static Criterion parseCriterion(ObjectNode rudeCriterion) throws Exception { String junction = JsonUtils.getString(rudeCriterion, "junction"); if (StringUtils.isNotEmpty(junction)) { Junction junctionCrition; if ("or".equals(junction)) { junctionCrition = new Or(); } else { junctionCrition = new And(); } ArrayNode criterions = (ArrayNode) rudeCriterion.get("criterions"); if (criterions != null) { for (Iterator<JsonNode> it = criterions.iterator(); it.hasNext();) { junctionCrition.addCriterion(parseCriterion((ObjectNode) it.next())); } } return junctionCrition; } else { String property = JsonUtils.getString(rudeCriterion, "property"); String expression = JsonUtils.getString(rudeCriterion, "expression"); String dataTypeName = JsonUtils.getString(rudeCriterion, "dataType"); DataType dataType = null; ViewManager viewManager = ViewManager.getInstance(); if (StringUtils.isNotEmpty(dataTypeName)) { dataType = viewManager.getDataType(dataTypeName); } return viewManager.getFilterCriterionParser().createFilterCriterion(property, dataType, expression); } }
Example 8
Source File: Lineage.java From Cubert with Apache License 2.0 | 5 votes |
public static boolean isCountDistinctAggregate(ObjectNode operatorNode) { if (operatorNode.get("operator") == null) return false; String type = operatorNode.get("operator").getTextValue(); if (!type.equals("GROUP_BY") && !type.equals("CUBE")) return false; if (!operatorNode.has("aggregates")) return false; for (JsonNode aggregateJson : operatorNode.path("aggregates")) { // Create the aggregator object JsonNode typeNode = aggregateJson.get("type"); // Group by case if (typeNode.isTextual()){ AggregationType aggType = AggregationType.valueOf(JsonUtils.getText(aggregateJson, "type")); String measureColumn = JsonUtils.getText(aggregateJson, "input"); if (aggType != AggregationType.COUNT_DISTINCT) return false; } else if (typeNode instanceof ArrayNode){ String[] typeArray = JsonUtils.asArray(aggregateJson, "type"); if (!typeArray[0].equals("SUM") || !typeArray[1].equals("COUNT_TO_ONE")) return false; } } return true; }
Example 9
Source File: LineageHelper.java From Cubert with Apache License 2.0 | 5 votes |
public static boolean isStoreCommand(ObjectNode jobNode, JsonNode phaseNode, ObjectNode opNode) { return (jobNode.get("output") == opNode || opNode.get("operator") != null && opNode.get("operator").getTextValue().equals("TEE") ? true : false); }
Example 10
Source File: LinkDeserializer.java From secure-data-service with Apache License 2.0 | 5 votes |
@Override public Link deserialize(JsonParser parser, DeserializationContext context) throws IOException { ObjectMapper mapper = (ObjectMapper) parser.getCodec(); ObjectNode root = (ObjectNode) mapper.readTree(parser); JsonNode relNode = root.get("rel"); JsonNode hrefNode = root.get("href"); return new BasicLink(relNode.asText(), new URL(hrefNode.asText())); }
Example 11
Source File: LineageHelper.java From Cubert with Apache License 2.0 | 5 votes |
public ArrayList<ObjectNode> findAllOperatorSources(ObjectNode jobNode, JsonNode phaseNode, ObjectNode opNode) { ArrayList<ObjectNode> sourceNodes = new ArrayList<ObjectNode>(); if (isLoadOperator(jobNode, phaseNode, opNode)) { List<String> loadPaths = operatorMapGet(loadPathsMap, opNode); for (String loadPath : loadPaths) { ObjectNode storeNode = findPrecedingStore(opNode, loadPath); if (storeNode != null) sourceNodes.add(storeNode); } return sourceNodes; } JsonNode inputsNode = (isStoreCommand(jobNode, phaseNode, opNode) ? opNode.get("name") : opNode.get("input")); if (inputsNode == null) { // trace("Getting sources for " + opNode.toString() + " ?"); return null; } if (!(inputsNode instanceof ArrayNode)) sourceNodes.addAll(findOperatorInputSources(opNode, inputsNode.getTextValue())); else { for (JsonNode inputNode : (ArrayNode) inputsNode) sourceNodes.addAll(findOperatorInputSources(opNode, inputNode.getTextValue())); } return sourceNodes; }
Example 12
Source File: CountDistinctRewriter.java From Cubert with Apache License 2.0 | 5 votes |
private boolean incompatibleWithBlockgenPath(ObjectNode opNode){ if (opNode.get("operator") == null) return false; String optype = opNode.get("operator").getTextValue(); OperatorType type = OperatorType.valueOf(optype); if (type == OperatorType.CREATE_BLOCK) return false; if (!type.isTupleOperator() || type == OperatorType.GROUP_BY ) return true; return false; }
Example 13
Source File: RewriteUtils.java From Cubert with Apache License 2.0 | 5 votes |
public static String[] getInputRelations(ObjectNode opNode) { String[] inputRelations; if (opNode.get("input") instanceof ArrayNode) inputRelations = JsonUtils.asArray(opNode.get("input")); else inputRelations = new String[]{opNode.get("input").getTextValue()}; return inputRelations; }
Example 14
Source File: MetadataResource.java From Eagle with Apache License 2.0 | 5 votes |
private void addTo( ObjectNode resourceNode, String uriPrefix, AbstractResourceMethod srm, String path ){ if(resourceNode.get( uriPrefix ) == null){ ObjectNode inner = JsonNodeFactory.instance.objectNode(); inner.put("path", path); inner.put("methods", JsonNodeFactory.instance.arrayNode()); resourceNode.put( uriPrefix, inner ); } ((ArrayNode) resourceNode.get( uriPrefix ).get("methods")).add( srm.getHttpMethod() ); }
Example 15
Source File: Lineage.java From Cubert with Apache License 2.0 | 5 votes |
public static boolean isJoinOperator(ObjectNode operatorNode) { if (operatorNode.get("operator") != null && (operatorNode.get("operator").getTextValue().equalsIgnoreCase("JOIN") || operatorNode.get("operator") .getTextValue() .equalsIgnoreCase("HASHJOIN"))) return true; return false; }
Example 16
Source File: ShuffleRewriter.java From Cubert with Apache License 2.0 | 4 votes |
private JsonNode rewriteBlockgen(JsonNode job) { String blockgenType = job.get("shuffle").get("blockgenType").getTextValue(); if (blockgenType.equalsIgnoreCase("BY_INDEX")) { return rewriteBlockgenByIndex(job); } // else: following is the rewrite of BLOCKGEN ObjectNode newJob = (ObjectNode) cloneNode(job); ObjectNode shuffle = (ObjectNode) newJob.get("shuffle"); JsonNode blockgenTypeNode = shuffle.get("blockgenType"); JsonNode blockgenValueNode = shuffle.get("blockgenValue"); if (!shuffle.has("pivotKeys")) throw new PlanRewriteException("PivotKeys are not defined in SHUFFLE"); // add CREATE_BLOCK operator in the reducer if (!newJob.has("reduce") || newJob.get("reduce").isNull()) newJob.put("reduce", mapper.createArrayNode()); ArrayNode reduce = (ArrayNode) newJob.get("reduce"); ObjectNode createBlockOperator = createObjectNode("operator", "CREATE_BLOCK", "input", shuffle.get("name"), "output", shuffle.get("name"), "blockgenType", blockgenTypeNode, "blockgenValue", blockgenValueNode, "partitionKeys", shuffle.get("partitionKeys")); copyLine(shuffle, createBlockOperator, "[REDUCE] "); reduce.insert(0, createBlockOperator); // add DISTINCT operator, if requested boolean isDistinct = shuffle.has("distinct") && shuffle.get("distinct").getBooleanValue(); if (isDistinct) { ObjectNode distinct = createObjectNode("operator", "DISTINCT", "input", shuffle.get("name"), "output", shuffle.get("name")); copyLine(shuffle, distinct, "[REDUCE DISTINCT]"); reduce.insert(0, distinct); } // the sort keys for the SHUFFLE are set to the actual // blockgen PARTITION KEYS. These sort keys are configured into the JsonNode for // the CREATE_BLOCK operator // clean up shuffle shuffle.remove("blockgenType"); shuffle.remove("blockgenValue"); shuffle.put("type", "SHUFFLE"); shuffle.put("distinct", isDistinct); if (!CommonUtils.isPrefix(asArray(shuffle, "pivotKeys"), asArray(shuffle, "partitionKeys"))) { createBlockOperator.put("pivotKeys", shuffle.get("pivotKeys")); shuffle.put("pivotKeys", shuffle.get("partitionKeys")); } return newJob; }
Example 17
Source File: ShuffleRewriter.java From Cubert with Apache License 2.0 | 4 votes |
private JsonNode rewriteBlockgenByIndex(JsonNode job) { ObjectNode newJob = (ObjectNode) cloneNode(job); ObjectNode shuffle = (ObjectNode) newJob.get("shuffle"); String path = getText(shuffle, "relation"); // add a cache index String indexName = generateVariableName(namesUsed); if (!newJob.has("cacheIndex") || newJob.get("cacheIndex").isNull()) newJob.put("cacheIndex", mapper.createArrayNode()); ArrayNode cacheIndex = (ArrayNode) newJob.get("cacheIndex"); cacheIndex.add(createObjectNode("name", indexName, "path", path)); // create BLOCK-INDEX-JOIN operator ObjectNode blockIndexJoin = createObjectNode("operator", "BLOCK_INDEX_JOIN", "input", shuffle.get("name"), "output", shuffle.get("name"), "partitionKeys", shuffle.get("partitionKeys"), "index", indexName); copyLine(shuffle, blockIndexJoin, "[MAP] "); // add it as the last operator for all mapper for (JsonNode map : newJob.path("map")) { if (!map.has("operators") || map.get("operators").isNull()) ((ObjectNode) map).put("operators", mapper.createArrayNode()); ArrayNode operators = (ArrayNode) map.get("operators"); // we need unique references for all blockIndexJoin operators.add(JsonUtils.cloneNode(blockIndexJoin)); } // create CREATE-BLOCK operator ObjectNode createBlock = createObjectNode("operator", "CREATE_BLOCK", "input", shuffle.get("name"), "output", shuffle.get("name"), "blockgenType", "BY_INDEX", "index", indexName, "partitionKeys", createArrayNode("BLOCK_ID"), "indexPath", path); copyLine(shuffle, createBlock, "[REDUCE] "); // add it as first operator in reduce if (!newJob.has("reduce") || newJob.get("reduce").isNull()) newJob.put("reduce", mapper.createArrayNode()); ArrayNode reduce = (ArrayNode) newJob.get("reduce"); reduce.insert(0, createBlock); // add DISTINCT operator, if requested boolean isDistinct = shuffle.has("distinct") && shuffle.get("distinct").getBooleanValue(); if (isDistinct) { ObjectNode distinct = createObjectNode("operator", "DISTINCT", "input", shuffle.get("name"), "output", shuffle.get("name")); copyLine(shuffle, distinct, "[REDUCE DISTINCT] "); reduce.insert(0, distinct); } // blockgen by index uses a different partitioner shuffle.put("partitionerClass", "com.linkedin.cubert.plan.physical.ByIndexPartitioner"); // clean up shuffle shuffle.put("type", "SHUFFLE"); shuffle.put("partitionKeys", createArrayNode("BLOCK_ID")); shuffle.put("distinct", isDistinct); shuffle.put("index", indexName); shuffle.remove("blockgenType"); shuffle.remove("relation"); ArrayNode pivotKeys = mapper.createArrayNode(); pivotKeys.add("BLOCK_ID"); if (shuffle.has("pivotKeys")) { for (JsonNode key : shuffle.path("pivotKeys")) pivotKeys.add(key); } shuffle.put("pivotKeys", pivotKeys); return newJob; }
Example 18
Source File: ShuffleRewriter.java From Cubert with Apache License 2.0 | 4 votes |
private JsonNode rewriteCube(JsonNode job) { ObjectNode newJob = (ObjectNode) cloneNode(job); ObjectNode shuffle = (ObjectNode) newJob.get("shuffle"); String name = getText(shuffle, "name"); JsonNode aggregates = shuffle.get("aggregates"); // create the OLAP_CUBE_COUNT_DISTINCT operator ObjectNode cube = createObjectNode("operator", "CUBE", "input", name, "output", name, "dimensions", shuffle.get("dimensions"), "aggregates", cloneNode(aggregates)); if (shuffle.has("groupingSets")) cube.put("groupingSets", shuffle.get("groupingSets")); if (shuffle.has("innerDimensions")) cube.put("innerDimensions", shuffle.get("innerDimensions")); if (shuffle.has("hashTableSize")) cube.put("hashTableSize", shuffle.get("hashTableSize")); copyLine(shuffle, cube, "[MAP] "); // add it as the last operator for all mapper for (JsonNode map : newJob.path("map")) { if (!map.has("operators") || map.get("operators").isNull()) ((ObjectNode) map).put("operators", mapper.createArrayNode()); ArrayNode operators = (ArrayNode) map.get("operators"); operators.add(cube); } rewriteGroupByAggregateForCube(aggregates); // create the GROUP BY operator at the reducer ObjectNode groupBy = createObjectNode("operator", "GROUP_BY", "input", name, "output", name, "groupBy", shuffle.get("dimensions"), "aggregates", aggregates); copyLine(shuffle, groupBy, "[REDUCE] "); // add it as first operator in reduce if (!newJob.has("reduce") || newJob.get("reduce").isNull()) newJob.put("reduce", mapper.createArrayNode()); ArrayNode reduce = (ArrayNode) newJob.get("reduce"); reduce.insert(0, groupBy); // clean up shuffle shuffle.put("type", "SHUFFLE"); shuffle.put("aggregates", aggregates); shuffle.put("partitionKeys", shuffle.get("dimensions")); shuffle.put("pivotKeys", shuffle.get("dimensions")); shuffle.remove("dimensions"); shuffle.remove("groupingSets"); shuffle.remove("innerDimensions"); return newJob; }
Example 19
Source File: IndexerDefinitionJsonSerDeser.java From hbase-indexer with Apache License 2.0 | 4 votes |
public IndexerDefinitionBuilder fromJson(ObjectNode node, IndexerDefinitionBuilder indexerDefinitionBuilder) { String name = JsonUtil.getString(node, "name"); LifecycleState lifecycleState = LifecycleState.valueOf(JsonUtil.getString(node, "lifecycleState")); IncrementalIndexingState incrementalIndexingState = IncrementalIndexingState.valueOf(JsonUtil.getString(node, "incrementalIndexingState")); BatchIndexingState batchIndexingState = BatchIndexingState.valueOf(JsonUtil.getString(node, "batchIndexingState")); String queueSubscriptionId = JsonUtil.getString(node, "subscriptionId", null); long subscriptionTimestamp = JsonUtil.getLong(node, "subscriptionTimestamp", 0L); String indexerComponentFactory = JsonUtil.getString(node, "indexerComponentFactory", null); byte[] configuration = getByteArrayProperty(node, "configuration"); String connectionType = JsonUtil.getString(node, "connectionType", null); ObjectNode connectionParamsNode = JsonUtil.getObject(node, "connectionParams", null); Map<String, String> connectionParams = null; if (connectionParamsNode != null) { connectionParams = new HashMap<String, String>(); Iterator<Map.Entry<String, JsonNode>> it = connectionParamsNode.getFields(); while (it.hasNext()) { Map.Entry<String, JsonNode> entry = it.next(); connectionParams.put(entry.getKey(), entry.getValue().getTextValue()); } } BatchBuildInfo activeBatchBuild = null; if (node.get("activeBatchBuild") != null) { activeBatchBuild = parseBatchBuildInfo(JsonUtil.getObject(node, "activeBatchBuild")); } BatchBuildInfo lastBatchBuild = null; if (node.get("lastBatchBuild") != null) { lastBatchBuild = parseBatchBuildInfo(JsonUtil.getObject(node, "lastBatchBuild")); } String[] batchIndexCliArguments = getStringArrayProperty(node, "batchIndexCliArguments"); String[] defaultBatchIndexCliArguments = getStringArrayProperty(node, "defaultBatchIndexCliArguments"); int occVersion = JsonUtil.getInt(node, "occVersion"); indexerDefinitionBuilder.name(name); indexerDefinitionBuilder.lifecycleState(lifecycleState); indexerDefinitionBuilder.incrementalIndexingState(incrementalIndexingState); indexerDefinitionBuilder.batchIndexingState(batchIndexingState); indexerDefinitionBuilder.subscriptionId(queueSubscriptionId); indexerDefinitionBuilder.subscriptionTimestamp(subscriptionTimestamp); indexerDefinitionBuilder.configuration(configuration); indexerDefinitionBuilder.indexerComponentFactory(indexerComponentFactory); indexerDefinitionBuilder.connectionType(connectionType); indexerDefinitionBuilder.connectionParams(connectionParams); indexerDefinitionBuilder.activeBatchBuildInfo(activeBatchBuild); indexerDefinitionBuilder.lastBatchBuildInfo(lastBatchBuild); indexerDefinitionBuilder.batchIndexCliArguments(batchIndexCliArguments); indexerDefinitionBuilder.defaultBatchIndexCliArguments(defaultBatchIndexCliArguments); indexerDefinitionBuilder.occVersion(occVersion); return indexerDefinitionBuilder; }
Example 20
Source File: RewriteUtils.java From Cubert with Apache License 2.0 | 4 votes |
public static boolean hasSummaryRewrite(ObjectNode programNode){ if (programNode.get("summaryRewrite") != null && programNode.get("summaryRewrite").getTextValue().equals("true")) return true; return false; }