Java Code Examples for org.codehaus.jackson.node.ObjectNode#remove()
The following examples show how to use
org.codehaus.jackson.node.ObjectNode#remove() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ShuffleRewriter.java From Cubert with Apache License 2.0 | 6 votes |
@Override public JsonNode rewrite(JsonNode plan, Set<String> namesUsed, boolean debugMode, boolean revisit) { this.namesUsed = namesUsed; ObjectNode newPlan = (ObjectNode) cloneNode(plan); ArrayNode jobs = mapper.createArrayNode(); for (JsonNode job : plan.path("jobs")) { jobs.add(rewriteJob(job)); } newPlan.remove("jobs"); newPlan.put("jobs", jobs); return newPlan; }
Example 2
Source File: GenericEntityDeserializer.java From secure-data-service with Apache License 2.0 | 6 votes |
@Override public GenericEntity deserialize(JsonParser parser, DeserializationContext context) throws IOException { ObjectMapper mapper = (ObjectMapper) parser.getCodec(); ObjectNode root = (ObjectNode) mapper.readTree(parser); String entityType = null; if (root.has(ENTITY_TYPE_KEY)) { entityType = root.get(ENTITY_TYPE_KEY).getTextValue(); root.remove(ENTITY_TYPE_KEY); } Map<String, Object> data = processObject(root); if (entityType != null) { return new GenericEntity(entityType, data); } else { return new GenericEntity("Generic", data); } }
Example 3
Source File: TestSamzaObjectMapper.java From samza with Apache License 2.0 | 5 votes |
/** * Given a {@link ContainerModel} JSON without a processor-id but with a container-id, deserialization should use the * container-id to calculate the processor-id. */ @Test public void testDeserializeContainerModelOnlyContainerId() throws IOException { ObjectNode jobModelJson = buildJobModelJson(); ObjectNode containerModelJson = (ObjectNode) jobModelJson.get("containers").get("1"); containerModelJson.remove("processor-id"); containerModelJson.put("container-id", 1); assertEquals(this.jobModel, deserializeFromObjectNode(jobModelJson)); }
Example 4
Source File: TestSamzaObjectMapper.java From samza with Apache License 2.0 | 5 votes |
/** * Given a {@link ContainerModel} JSON with neither a processor-id nor a container-id, deserialization should fail. */ @Test(expected = SamzaException.class) public void testDeserializeContainerModelMissingProcessorIdAndContainerId() throws IOException { ObjectNode jobModelJson = buildJobModelJson(); ObjectNode containerModelJson = (ObjectNode) jobModelJson.get("containers").get("1"); containerModelJson.remove("processor-id"); deserializeFromObjectNode(jobModelJson); }
Example 5
Source File: TestSamzaObjectMapper.java From samza with Apache License 2.0 | 5 votes |
/** * Given a {@link ContainerModel} JSON with only an "id" field, deserialization should fail. * This verifies that even though {@link ContainerModel} has a getId method, the "id" field is not used, since * "processor-id" is the field that is supposed to be used. */ @Test(expected = SamzaException.class) public void testDeserializeContainerModelIdFieldOnly() throws IOException { ObjectNode jobModelJson = buildJobModelJson(); ObjectNode containerModelJson = (ObjectNode) jobModelJson.get("containers").get("1"); containerModelJson.remove("processor-id"); containerModelJson.put("id", 1); deserializeFromObjectNode(jobModelJson); }
Example 6
Source File: AbstractReportModelGenerater.java From bdf3 with Apache License 2.0 | 4 votes |
@SuppressWarnings({ "unchecked", "rawtypes" }) public List<Map<String, Object>> getGridModelData(Map<String, Object> map, List<Map<String, Object>> columnInfos, String intercepterBean) throws Exception { List<Map<String, Object>> dataList = null; String dataScope = (String) map.get("dataScope"); String treeColumn = (String) map.get("treeColumn"); int maxSize = Integer.valueOf(map.get("maxSize").toString()); ViewManager viewManager = ViewManager.getInstance(); if (dataScope.equals("serverAll")) { Object dataProviderParameter = map.get("dataProviderParameter"); String dataProviderId = (String) map.get("dataProviderId"); int pageSize = Integer.valueOf(map.get("pageSize").toString()); if (map.get("sysParameter") != null) { ObjectMapper om = JsonUtils.getObjectMapper(); String sp = om.writeValueAsString(map.get("sysParameter")); ObjectNode rudeSysParameter = (ObjectNode) om.readTree(sp); JsonNode rudeCriteria = null; if (rudeSysParameter != null) { rudeCriteria = rudeSysParameter.remove("criteria"); } MetaData sysParameter = null; if (rudeSysParameter != null) { sysParameter = (MetaData) JsonUtils.toJavaObject(rudeSysParameter, null, null, false, null); if (rudeCriteria != null && rudeCriteria instanceof ObjectNode) { sysParameter.put("criteria", CriterionUtils.getCriteria((ObjectNode) rudeCriteria)); } if (sysParameter != null && !sysParameter.isEmpty()) { dataProviderParameter = new ParameterWrapper(dataProviderParameter, sysParameter); } } } DataType resultDataType = null; String resultDataTypeName = (String) map.get("resultDataType"); if (StringUtils.isNotEmpty(resultDataTypeName)) { resultDataType = viewManager.getDataType(resultDataTypeName); } DataProvider dataProvider = viewManager.getDataProvider(dataProviderId); Collection<Object> collection = null; if (pageSize > 0) { Page<Object> page = new Page<Object>(maxSize, 1); dataProvider.getPagingResult(dataProviderParameter, page, resultDataType); collection = page.getEntities(); } else { collection = (Collection<Object>) dataProvider.getResult(dataProviderParameter, resultDataType); } if (collection instanceof EntityList) { if(collection.size()<=65536){ collection = ((EntityList) collection).getTarget(); }else{ Field field = ((EntityCollection)collection).getClass().getField("target"); field.setAccessible(true); collection = (Collection<Object>) field.get(collection); } } dataList = new ArrayList<Map<String, Object>>(); for (Object obj : collection) { // if (ProxyBeanUtils.isProxy(obj)) { // obj = ProxyBeanUtils.getProxyTarget(obj); // } if (obj instanceof Map) { dataList.add((Map<String, Object>) obj); } else { if(collection.size()<=65536){ Map<String, Object> targetMap = new HashMap<String, Object>(); EntityUtils.copyProperties(targetMap, obj); dataList.add(targetMap); }else{ dataList.add(PropertyUtils.describe(obj)); } } } dataList = retrieveServerComplexPropertyData(dataList, columnInfos); } else if (dataScope.equals("currentPage")) { dataList = new ArrayList<Map<String, Object>>(); List<Map<String, Object>> clientData = (List<Map<String, Object>>) map.get("data"); List<Object> subDataList; for (Map<String, Object> tempMap : clientData) { if (tempMap.get(treeColumn) != null) { map.put(treeColumn, tempMap.get(treeColumn)); } dataList.add(tempMap); if (tempMap.get("children") != null) { subDataList = (List<Object>) tempMap.get("children"); this.createChildData(dataList, subDataList, "", treeColumn); } } } for (Map<String, Object> dataMap : dataList) { Set<String> nameSet = dataMap.keySet(); for (String name : nameSet) { replaceValueWithMapping(dataMap, columnInfos, name); } } fireGridDataInterceptor(intercepterBean, dataList); return dataList; }
Example 7
Source File: ShuffleRewriter.java From Cubert with Apache License 2.0 | 4 votes |
private JsonNode rewriteBlockgen(JsonNode job) { String blockgenType = job.get("shuffle").get("blockgenType").getTextValue(); if (blockgenType.equalsIgnoreCase("BY_INDEX")) { return rewriteBlockgenByIndex(job); } // else: following is the rewrite of BLOCKGEN ObjectNode newJob = (ObjectNode) cloneNode(job); ObjectNode shuffle = (ObjectNode) newJob.get("shuffle"); JsonNode blockgenTypeNode = shuffle.get("blockgenType"); JsonNode blockgenValueNode = shuffle.get("blockgenValue"); if (!shuffle.has("pivotKeys")) throw new PlanRewriteException("PivotKeys are not defined in SHUFFLE"); // add CREATE_BLOCK operator in the reducer if (!newJob.has("reduce") || newJob.get("reduce").isNull()) newJob.put("reduce", mapper.createArrayNode()); ArrayNode reduce = (ArrayNode) newJob.get("reduce"); ObjectNode createBlockOperator = createObjectNode("operator", "CREATE_BLOCK", "input", shuffle.get("name"), "output", shuffle.get("name"), "blockgenType", blockgenTypeNode, "blockgenValue", blockgenValueNode, "partitionKeys", shuffle.get("partitionKeys")); copyLine(shuffle, createBlockOperator, "[REDUCE] "); reduce.insert(0, createBlockOperator); // add DISTINCT operator, if requested boolean isDistinct = shuffle.has("distinct") && shuffle.get("distinct").getBooleanValue(); if (isDistinct) { ObjectNode distinct = createObjectNode("operator", "DISTINCT", "input", shuffle.get("name"), "output", shuffle.get("name")); copyLine(shuffle, distinct, "[REDUCE DISTINCT]"); reduce.insert(0, distinct); } // the sort keys for the SHUFFLE are set to the actual // blockgen PARTITION KEYS. These sort keys are configured into the JsonNode for // the CREATE_BLOCK operator // clean up shuffle shuffle.remove("blockgenType"); shuffle.remove("blockgenValue"); shuffle.put("type", "SHUFFLE"); shuffle.put("distinct", isDistinct); if (!CommonUtils.isPrefix(asArray(shuffle, "pivotKeys"), asArray(shuffle, "partitionKeys"))) { createBlockOperator.put("pivotKeys", shuffle.get("pivotKeys")); shuffle.put("pivotKeys", shuffle.get("partitionKeys")); } return newJob; }
Example 8
Source File: ShuffleRewriter.java From Cubert with Apache License 2.0 | 4 votes |
private JsonNode rewriteBlockgenByIndex(JsonNode job) { ObjectNode newJob = (ObjectNode) cloneNode(job); ObjectNode shuffle = (ObjectNode) newJob.get("shuffle"); String path = getText(shuffle, "relation"); // add a cache index String indexName = generateVariableName(namesUsed); if (!newJob.has("cacheIndex") || newJob.get("cacheIndex").isNull()) newJob.put("cacheIndex", mapper.createArrayNode()); ArrayNode cacheIndex = (ArrayNode) newJob.get("cacheIndex"); cacheIndex.add(createObjectNode("name", indexName, "path", path)); // create BLOCK-INDEX-JOIN operator ObjectNode blockIndexJoin = createObjectNode("operator", "BLOCK_INDEX_JOIN", "input", shuffle.get("name"), "output", shuffle.get("name"), "partitionKeys", shuffle.get("partitionKeys"), "index", indexName); copyLine(shuffle, blockIndexJoin, "[MAP] "); // add it as the last operator for all mapper for (JsonNode map : newJob.path("map")) { if (!map.has("operators") || map.get("operators").isNull()) ((ObjectNode) map).put("operators", mapper.createArrayNode()); ArrayNode operators = (ArrayNode) map.get("operators"); // we need unique references for all blockIndexJoin operators.add(JsonUtils.cloneNode(blockIndexJoin)); } // create CREATE-BLOCK operator ObjectNode createBlock = createObjectNode("operator", "CREATE_BLOCK", "input", shuffle.get("name"), "output", shuffle.get("name"), "blockgenType", "BY_INDEX", "index", indexName, "partitionKeys", createArrayNode("BLOCK_ID"), "indexPath", path); copyLine(shuffle, createBlock, "[REDUCE] "); // add it as first operator in reduce if (!newJob.has("reduce") || newJob.get("reduce").isNull()) newJob.put("reduce", mapper.createArrayNode()); ArrayNode reduce = (ArrayNode) newJob.get("reduce"); reduce.insert(0, createBlock); // add DISTINCT operator, if requested boolean isDistinct = shuffle.has("distinct") && shuffle.get("distinct").getBooleanValue(); if (isDistinct) { ObjectNode distinct = createObjectNode("operator", "DISTINCT", "input", shuffle.get("name"), "output", shuffle.get("name")); copyLine(shuffle, distinct, "[REDUCE DISTINCT] "); reduce.insert(0, distinct); } // blockgen by index uses a different partitioner shuffle.put("partitionerClass", "com.linkedin.cubert.plan.physical.ByIndexPartitioner"); // clean up shuffle shuffle.put("type", "SHUFFLE"); shuffle.put("partitionKeys", createArrayNode("BLOCK_ID")); shuffle.put("distinct", isDistinct); shuffle.put("index", indexName); shuffle.remove("blockgenType"); shuffle.remove("relation"); ArrayNode pivotKeys = mapper.createArrayNode(); pivotKeys.add("BLOCK_ID"); if (shuffle.has("pivotKeys")) { for (JsonNode key : shuffle.path("pivotKeys")) pivotKeys.add(key); } shuffle.put("pivotKeys", pivotKeys); return newJob; }
Example 9
Source File: ShuffleRewriter.java From Cubert with Apache License 2.0 | 4 votes |
private JsonNode rewriteCube(JsonNode job) { ObjectNode newJob = (ObjectNode) cloneNode(job); ObjectNode shuffle = (ObjectNode) newJob.get("shuffle"); String name = getText(shuffle, "name"); JsonNode aggregates = shuffle.get("aggregates"); // create the OLAP_CUBE_COUNT_DISTINCT operator ObjectNode cube = createObjectNode("operator", "CUBE", "input", name, "output", name, "dimensions", shuffle.get("dimensions"), "aggregates", cloneNode(aggregates)); if (shuffle.has("groupingSets")) cube.put("groupingSets", shuffle.get("groupingSets")); if (shuffle.has("innerDimensions")) cube.put("innerDimensions", shuffle.get("innerDimensions")); if (shuffle.has("hashTableSize")) cube.put("hashTableSize", shuffle.get("hashTableSize")); copyLine(shuffle, cube, "[MAP] "); // add it as the last operator for all mapper for (JsonNode map : newJob.path("map")) { if (!map.has("operators") || map.get("operators").isNull()) ((ObjectNode) map).put("operators", mapper.createArrayNode()); ArrayNode operators = (ArrayNode) map.get("operators"); operators.add(cube); } rewriteGroupByAggregateForCube(aggregates); // create the GROUP BY operator at the reducer ObjectNode groupBy = createObjectNode("operator", "GROUP_BY", "input", name, "output", name, "groupBy", shuffle.get("dimensions"), "aggregates", aggregates); copyLine(shuffle, groupBy, "[REDUCE] "); // add it as first operator in reduce if (!newJob.has("reduce") || newJob.get("reduce").isNull()) newJob.put("reduce", mapper.createArrayNode()); ArrayNode reduce = (ArrayNode) newJob.get("reduce"); reduce.insert(0, groupBy); // clean up shuffle shuffle.put("type", "SHUFFLE"); shuffle.put("aggregates", aggregates); shuffle.put("partitionKeys", shuffle.get("dimensions")); shuffle.put("pivotKeys", shuffle.get("dimensions")); shuffle.remove("dimensions"); shuffle.remove("groupingSets"); shuffle.remove("innerDimensions"); return newJob; }
Example 10
Source File: ShuffleRewriter.java From Cubert with Apache License 2.0 | 4 votes |
private JsonNode rewriteJoin(JsonNode job) { ObjectNode newJob = (ObjectNode) cloneNode(job); ObjectNode shuffle = (ObjectNode) newJob.get("shuffle"); JsonNode joinKeys = shuffle.get("joinKeys"); String blockName = getText(shuffle, "name"); // make sure there are two mappers in the job JsonNode mapJsons = newJob.get("map"); if (mapJsons.size() != 2) { throw new RuntimeException("There must be exactly two multimappers for JOIN shuffle command."); } // Add the Map side operator in each of the mappers // tag = 1, for the first mapper (non dimensional) // tag = 0, for the second dimensional mapper int tag = 1; for (JsonNode mapJson: mapJsons) { if (!mapJson.has("operators") || mapJson.get("operators").isNull()) ((ObjectNode) mapJson).put("operators", mapper.createArrayNode()); ArrayNode operators = (ArrayNode) mapJson.get("operators"); // we need unique references for all blockIndexJoin operators.add(createObjectNode("operator", "REDUCE_JOIN_MAPPER", "input", createArrayNode(blockName), "output", blockName, "joinKeys", joinKeys, "tag", tag)); tag --; } // create the reduce side operator ObjectNode reducerOperator = createObjectNode("operator", "REDUCE_JOIN", "input", createArrayNode(blockName), "output", blockName, "joinKeys", joinKeys); if (shuffle.has("joinType")) reducerOperator.put("joinType", shuffle.get("joinType")); // add the reduce side operator if (!newJob.has("reduce") || newJob.get("reduce").isNull()) { newJob.put("reduce", mapper.createArrayNode()); } ArrayNode reduce = (ArrayNode) newJob.get("reduce"); reduce.insert(0, reducerOperator); // Fix the shuffle json if (shuffle.has("partitionKeys")) { String[] partitionKeys = JsonUtils.asArray(shuffle, "partitionKeys"); String[] joinKeyNames = JsonUtils.asArray(shuffle, "joinKeys"); // make sure that partitionKeys is prefix of joinKeys if (!CommonUtils.isPrefix(joinKeyNames, partitionKeys)) { throw new RuntimeException("Partition key must be a prefix of join keys"); } } else { shuffle.put("partitionKeys", shuffle.get("joinKeys")); } // We will sort on (joinKeys + ___tag) JsonNode pivotKeys = cloneNode(shuffle.get("joinKeys")); ((ArrayNode) pivotKeys).add("___tag"); shuffle.put("type", "SHUFFLE"); shuffle.put("join", true); shuffle.put("pivotKeys", pivotKeys); shuffle.remove("joinKeys"); return newJob; }