Java Code Examples for org.codehaus.jackson.JsonNode#isNull()
The following examples show how to use
org.codehaus.jackson.JsonNode#isNull() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CubeOperator.java From Cubert with Apache License 2.0 | 7 votes |
private static Object instantiateObject(Class<?> cls, JsonNode constructorArgs) throws InstantiationException, IllegalAccessException, IllegalArgumentException, SecurityException, InvocationTargetException, NoSuchMethodException { if (constructorArgs == null || constructorArgs.isNull()) return cls.newInstance(); Object[] args = new Object[constructorArgs.size()]; Class<?>[] argClasses = new Class[args.length]; for (int i = 0; i < args.length; i++) { args[i] = JsonUtils.asObject(constructorArgs.get(i)); argClasses[i] = args[i].getClass(); } return cls.getConstructor(argClasses).newInstance(args); }
Example 2
Source File: BlockgenLineageAnalyzer.java From Cubert with Apache License 2.0 | 5 votes |
@Override public JsonNode rewrite(JsonNode plan, Set<String> namesUsed, boolean debugMode, boolean revisit) throws IOException { conf = new JobConf(); // first get the blockgen id from global input cubert files JsonNode inputs = plan.get("input"); if (inputs != null && !inputs.isNull()) { Iterator<String> inputsIt = inputs.getFieldNames(); while (inputsIt.hasNext()) { String input = inputsIt.next(); JsonNode json = inputs.get(input); String type = getText(json, "type"); if (type.equalsIgnoreCase("RUBIX")) { try { blockgenIdMap.put(input, getBlockgenId(input)); } catch (ClassNotFoundException e) { throw new PlanRewriteException(e); } } } } new PhysicalPlanWalker(plan, this).walk(); return plan; }
Example 3
Source File: TextStorage.java From Cubert with Apache License 2.0 | 5 votes |
@Override public PostCondition getPostCondition(Configuration conf, JsonNode json, Path path) throws IOException { JsonNode params = json.get("params"); if (params == null || params.isNull() || !params.has("schema") || params.get("schema").isNull()) throw new PlanRewriteException("Cannot infer schema of TEXT input. Please specify using the 'schema' param."); BlockSchema schema = new BlockSchema(json.get("params").get("schema").getTextValue()); return new PostCondition(schema, null, null); }
Example 4
Source File: ConvertJSONToSQL.java From localization_nifi with Apache License 2.0 | 4 votes |
private String generateInsert(final JsonNode rootNode, final Map<String, String> attributes, final String tableName, final TableSchema schema, final boolean translateFieldNames, final boolean ignoreUnmappedFields, final boolean failUnmappedColumns, final boolean warningUnmappedColumns, boolean escapeColumnNames, boolean quoteTableName) { final Set<String> normalizedFieldNames = getNormalizedColumnNames(rootNode, translateFieldNames); for (final String requiredColName : schema.getRequiredColumnNames()) { final String normalizedColName = normalizeColumnName(requiredColName, translateFieldNames); if (!normalizedFieldNames.contains(normalizedColName)) { String missingColMessage = "JSON does not have a value for the Required column '" + requiredColName + "'"; if (failUnmappedColumns) { getLogger().error(missingColMessage); throw new ProcessException(missingColMessage); } else if (warningUnmappedColumns) { getLogger().warn(missingColMessage); } } } final StringBuilder sqlBuilder = new StringBuilder(); int fieldCount = 0; sqlBuilder.append("INSERT INTO "); if (quoteTableName) { sqlBuilder.append(schema.getQuotedIdentifierString()) .append(tableName) .append(schema.getQuotedIdentifierString()); } else { sqlBuilder.append(tableName); } sqlBuilder.append(" ("); // iterate over all of the elements in the JSON, building the SQL statement by adding the column names, as well as // adding the column value to a "sql.args.N.value" attribute and the type of a "sql.args.N.type" attribute add the // columns that we are inserting into final Iterator<String> fieldNames = rootNode.getFieldNames(); while (fieldNames.hasNext()) { final String fieldName = fieldNames.next(); final ColumnDescription desc = schema.getColumns().get(normalizeColumnName(fieldName, translateFieldNames)); if (desc == null && !ignoreUnmappedFields) { throw new ProcessException("Cannot map JSON field '" + fieldName + "' to any column in the database"); } if (desc != null) { if (fieldCount++ > 0) { sqlBuilder.append(", "); } if(escapeColumnNames){ sqlBuilder.append(schema.getQuotedIdentifierString()) .append(desc.getColumnName()) .append(schema.getQuotedIdentifierString()); } else { sqlBuilder.append(desc.getColumnName()); } final int sqlType = desc.getDataType(); attributes.put("sql.args." + fieldCount + ".type", String.valueOf(sqlType)); final Integer colSize = desc.getColumnSize(); final JsonNode fieldNode = rootNode.get(fieldName); if (!fieldNode.isNull()) { String fieldValue = fieldNode.asText(); if (colSize != null && fieldValue.length() > colSize) { fieldValue = fieldValue.substring(0, colSize); } attributes.put("sql.args." + fieldCount + ".value", fieldValue); } } } // complete the SQL statements by adding ?'s for all of the values to be escaped. sqlBuilder.append(") VALUES ("); for (int i=0; i < fieldCount; i++) { if (i > 0) { sqlBuilder.append(", "); } sqlBuilder.append("?"); } sqlBuilder.append(")"); if (fieldCount == 0) { throw new ProcessException("None of the fields in the JSON map to the columns defined by the " + tableName + " table"); } return sqlBuilder.toString(); }
Example 5
Source File: PutHBaseJSON.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override protected PutFlowFile createPut(final ProcessSession session, final ProcessContext context, final FlowFile flowFile) { final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(flowFile).getValue(); final String rowId = context.getProperty(ROW_ID).evaluateAttributeExpressions(flowFile).getValue(); final String rowFieldName = context.getProperty(ROW_FIELD_NAME).evaluateAttributeExpressions(flowFile).getValue(); final String columnFamily = context.getProperty(COLUMN_FAMILY).evaluateAttributeExpressions(flowFile).getValue(); final boolean extractRowId = !StringUtils.isBlank(rowFieldName); final String complexFieldStrategy = context.getProperty(COMPLEX_FIELD_STRATEGY).getValue(); final String fieldEncodingStrategy = context.getProperty(FIELD_ENCODING_STRATEGY).getValue(); final String rowIdEncodingStrategy = context.getProperty(ROW_ID_ENCODING_STRATEGY).getValue(); // Parse the JSON document final ObjectMapper mapper = new ObjectMapper(); final AtomicReference<JsonNode> rootNodeRef = new AtomicReference<>(null); try { session.read(flowFile, new InputStreamCallback() { @Override public void process(final InputStream in) throws IOException { try (final InputStream bufferedIn = new BufferedInputStream(in)) { rootNodeRef.set(mapper.readTree(bufferedIn)); } } }); } catch (final ProcessException pe) { getLogger().error("Failed to parse {} as JSON due to {}; routing to failure", new Object[]{flowFile, pe.toString()}, pe); return null; } final JsonNode rootNode = rootNodeRef.get(); if (rootNode.isArray()) { getLogger().error("Root node of JSON must be a single document, found array for {}; routing to failure", new Object[]{flowFile}); return null; } final Collection<PutColumn> columns = new ArrayList<>(); final AtomicReference<String> rowIdHolder = new AtomicReference<>(null); // convert each field/value to a column for the put, skip over nulls and arrays final Iterator<String> fieldNames = rootNode.getFieldNames(); while (fieldNames.hasNext()) { final String fieldName = fieldNames.next(); final AtomicReference<byte[]> fieldValueHolder = new AtomicReference<>(null); final JsonNode fieldNode = rootNode.get(fieldName); if (fieldNode.isNull()) { getLogger().debug("Skipping {} because value was null", new Object[]{fieldName}); } else if (fieldNode.isValueNode()) { // for a value node we need to determine if we are storing the bytes of a string, or the bytes of actual types if (STRING_ENCODING_VALUE.equals(fieldEncodingStrategy)) { final byte[] valueBytes = clientService.toBytes(fieldNode.asText()); fieldValueHolder.set(valueBytes); } else { fieldValueHolder.set(extractJNodeValue(fieldNode)); } } else { // for non-null, non-value nodes, determine what to do based on the handling strategy switch (complexFieldStrategy) { case FAIL_VALUE: getLogger().error("Complex value found for {}; routing to failure", new Object[]{fieldName}); return null; case WARN_VALUE: getLogger().warn("Complex value found for {}; skipping", new Object[]{fieldName}); break; case TEXT_VALUE: // use toString() here because asText() is only guaranteed to be supported on value nodes // some other types of nodes, like ArrayNode, provide toString implementations fieldValueHolder.set(clientService.toBytes(fieldNode.toString())); break; case IGNORE_VALUE: // silently skip break; default: break; } } // if we have a field value, then see if this is the row id field, if so store the value for later // otherwise add a new column where the fieldName and fieldValue are the column qualifier and value if (fieldValueHolder.get() != null) { if (extractRowId && fieldName.equals(rowFieldName)) { rowIdHolder.set(fieldNode.asText()); } else { columns.add(new PutColumn(columnFamily.getBytes(StandardCharsets.UTF_8), fieldName.getBytes(StandardCharsets.UTF_8), fieldValueHolder.get())); } } } // if we are expecting a field name to use for the row id and the incoming document doesn't have it // log an error message so the user can see what the field names were and return null so it gets routed to failure if (extractRowId && rowIdHolder.get() == null) { final String fieldNameStr = StringUtils.join(rootNode.getFieldNames(), ","); getLogger().error("Row ID field named '{}' not found in field names '{}'; routing to failure", new Object[] {rowFieldName, fieldNameStr}); return null; } final String putRowId = (extractRowId ? rowIdHolder.get() : rowId); byte[] rowKeyBytes = getRow(putRowId,context.getProperty(ROW_ID_ENCODING_STRATEGY).getValue()); return new PutFlowFile(tableName, rowKeyBytes, columns, flowFile); }
Example 6
Source File: SchemaValidator.java From avro-util with BSD 2-Clause "Simplified" License | 4 votes |
/** * validation logic taken out of class {@link Schema} with adaptations * @param schema schema (type) of a field * @param defaultValue default value provided for said field in the parent schema * @throws SchemaParseException is name is invalid */ private static boolean isValidDefault(Schema schema, JsonNode defaultValue) { if (defaultValue == null) { return false; } switch (schema.getType()) { case STRING: case BYTES: case ENUM: case FIXED: return defaultValue.isTextual(); case INT: case LONG: case FLOAT: case DOUBLE: return defaultValue.isNumber(); case BOOLEAN: return defaultValue.isBoolean(); case NULL: return defaultValue.isNull(); case ARRAY: if (!defaultValue.isArray()) { return false; } for (JsonNode element : defaultValue) { if (!isValidDefault(schema.getElementType(), element)) { return false; } } return true; case MAP: if (!defaultValue.isObject()) { return false; } for (JsonNode value : defaultValue) { if (!isValidDefault(schema.getValueType(), value)) { return false; } } return true; case UNION: // union default: first branch return isValidDefault(schema.getTypes().get(0), defaultValue); case RECORD: if (!defaultValue.isObject()) { return false; } for (Schema.Field field : schema.getFields()) { if (!isValidDefault( field.schema(), defaultValue.get(field.name()) != null ? defaultValue.get(field.name()) : field.defaultValue() )) { return false; } } return true; default: return false; } }
Example 7
Source File: CubeDimensions.java From Cubert with Apache License 2.0 | 4 votes |
public CubeDimensions(BlockSchema inputSchema, BlockSchema outputSchema, String[] dimensions, JsonNode groupingSetsJson) { // create the arrays inputIndex = new int[dimensions.length]; outputIndex = new int[dimensions.length]; dimensionTypes = new DataType[dimensions.length]; dimensionOffsets = new int[dimensions.length]; dictionaries = new CodeDictionary[dimensions.length]; // intialize the above arrays int idx = 0; int offset = 0; for (String dim : dimensions) { inputIndex[idx] = inputSchema.getIndex(dim); outputIndex[idx] = outputSchema.getIndex(dim); dimensionTypes[idx] = inputSchema.getType(inputIndex[idx]); dimensionOffsets[idx] = offset; offset++; // pad one more int if the data type is long ("encoded" as 2 ints) if (dimensionTypes[idx] == DataType.LONG) offset++; // create dictionary if the dimension is string if (dimensionTypes[idx] == DataType.STRING) dictionaries[idx] = new CodeDictionary(); idx++; } // the "last" int in the dimension key is used to store the null bit vector nullBitVectorIndex = offset; // create the dimension key key = new DimensionKey(nullBitVectorIndex); key.getArray()[nullBitVectorIndex] = 0; // determine if this is a full cube isFullCube = (groupingSetsJson == null) || (groupingSetsJson.isNull()) || groupingSetsJson.size() == 0; // determine the number of ancestors if (isFullCube) numAncestors = (int) Math.pow(2, dimensions.length); else numAncestors = groupingSetsJson.size(); // allocate the ancestors ancestors = new DimensionKey[numAncestors]; for (int i = 0; i < numAncestors; i++) ancestors[i] = new DimensionKey(nullBitVectorIndex); // pre-assign null bit vector for the ancestors assignNullBitVector(dimensions, groupingSetsJson); // assign zeroedDimIndex for the ancestors zeroedDimArrayLength = new int[numAncestors]; zeroedDimIndex = new byte[numAncestors][64]; assignZeroedDimensions(dimensions); }
Example 8
Source File: AbstractSiteToSiteReportingTask.java From nifi with Apache License 2.0 | 4 votes |
protected Object getRawNodeValue(final JsonNode fieldNode, final DataType dataType) throws IOException { if (fieldNode == null || fieldNode.isNull()) { return null; } if (fieldNode.isNumber()) { return fieldNode.getNumberValue(); } if (fieldNode.isBinary()) { return fieldNode.getBinaryValue(); } if (fieldNode.isBoolean()) { return fieldNode.getBooleanValue(); } if (fieldNode.isTextual()) { return fieldNode.getTextValue(); } if (fieldNode.isArray()) { final ArrayNode arrayNode = (ArrayNode) fieldNode; final int numElements = arrayNode.size(); final Object[] arrayElements = new Object[numElements]; int count = 0; final DataType elementDataType; if (dataType != null && dataType.getFieldType() == RecordFieldType.ARRAY) { final ArrayDataType arrayDataType = (ArrayDataType) dataType; elementDataType = arrayDataType.getElementType(); } else { elementDataType = null; } for (final JsonNode node : arrayNode) { final Object value = getRawNodeValue(node, elementDataType); arrayElements[count++] = value; } return arrayElements; } if (fieldNode.isObject()) { RecordSchema childSchema; if (dataType != null && RecordFieldType.RECORD == dataType.getFieldType()) { final RecordDataType recordDataType = (RecordDataType) dataType; childSchema = recordDataType.getChildSchema(); } else { childSchema = null; } if (childSchema == null) { childSchema = new SimpleRecordSchema(Collections.emptyList()); } final Iterator<String> fieldNames = fieldNode.getFieldNames(); final Map<String, Object> childValues = new HashMap<>(); while (fieldNames.hasNext()) { final String childFieldName = fieldNames.next(); final Object childValue = getRawNodeValue(fieldNode.get(childFieldName), dataType); childValues.put(childFieldName, childValue); } final MapRecord record = new MapRecord(childSchema, childValues); return record; } return null; }
Example 9
Source File: ConvertJSONToSQL.java From nifi with Apache License 2.0 | 4 votes |
private String generateInsert(final JsonNode rootNode, final Map<String, String> attributes, final String tableName, final TableSchema schema, final boolean translateFieldNames, final boolean ignoreUnmappedFields, final boolean failUnmappedColumns, final boolean warningUnmappedColumns, boolean escapeColumnNames, boolean quoteTableName, final String attributePrefix) { final Set<String> normalizedFieldNames = getNormalizedColumnNames(rootNode, translateFieldNames); for (final String requiredColName : schema.getRequiredColumnNames()) { final String normalizedColName = normalizeColumnName(requiredColName, translateFieldNames); if (!normalizedFieldNames.contains(normalizedColName)) { String missingColMessage = "JSON does not have a value for the Required column '" + requiredColName + "'"; if (failUnmappedColumns) { getLogger().error(missingColMessage); throw new ProcessException(missingColMessage); } else if (warningUnmappedColumns) { getLogger().warn(missingColMessage); } } } final StringBuilder sqlBuilder = new StringBuilder(); int fieldCount = 0; sqlBuilder.append("INSERT INTO "); if (quoteTableName) { sqlBuilder.append(schema.getQuotedIdentifierString()) .append(tableName) .append(schema.getQuotedIdentifierString()); } else { sqlBuilder.append(tableName); } sqlBuilder.append(" ("); // iterate over all of the elements in the JSON, building the SQL statement by adding the column names, as well as // adding the column value to a "<sql>.args.N.value" attribute and the type of a "<sql>.args.N.type" attribute add the // columns that we are inserting into final Iterator<String> fieldNames = rootNode.getFieldNames(); while (fieldNames.hasNext()) { final String fieldName = fieldNames.next(); final ColumnDescription desc = schema.getColumns().get(normalizeColumnName(fieldName, translateFieldNames)); if (desc == null && !ignoreUnmappedFields) { throw new ProcessException("Cannot map JSON field '" + fieldName + "' to any column in the database"); } if (desc != null) { if (fieldCount++ > 0) { sqlBuilder.append(", "); } if(escapeColumnNames){ sqlBuilder.append(schema.getQuotedIdentifierString()) .append(desc.getColumnName()) .append(schema.getQuotedIdentifierString()); } else { sqlBuilder.append(desc.getColumnName()); } final int sqlType = desc.getDataType(); attributes.put(attributePrefix + ".args." + fieldCount + ".type", String.valueOf(sqlType)); final Integer colSize = desc.getColumnSize(); final JsonNode fieldNode = rootNode.get(fieldName); if (!fieldNode.isNull()) { String fieldValue = createSqlStringValue(fieldNode, colSize, sqlType); attributes.put(attributePrefix + ".args." + fieldCount + ".value", fieldValue); } } } // complete the SQL statements by adding ?'s for all of the values to be escaped. sqlBuilder.append(") VALUES ("); for (int i=0; i < fieldCount; i++) { if (i > 0) { sqlBuilder.append(", "); } sqlBuilder.append("?"); } sqlBuilder.append(")"); if (fieldCount == 0) { throw new ProcessException("None of the fields in the JSON map to the columns defined by the " + tableName + " table"); } return sqlBuilder.toString(); }
Example 10
Source File: ConvertJSONToSQL.java From nifi with Apache License 2.0 | 4 votes |
private String generateDelete(final JsonNode rootNode, final Map<String, String> attributes, final String tableName, final TableSchema schema, final boolean translateFieldNames, final boolean ignoreUnmappedFields, final boolean failUnmappedColumns, final boolean warningUnmappedColumns, boolean escapeColumnNames, boolean quoteTableName, final String attributePrefix) { final Set<String> normalizedFieldNames = getNormalizedColumnNames(rootNode, translateFieldNames); for (final String requiredColName : schema.getRequiredColumnNames()) { final String normalizedColName = normalizeColumnName(requiredColName, translateFieldNames); if (!normalizedFieldNames.contains(normalizedColName)) { String missingColMessage = "JSON does not have a value for the Required column '" + requiredColName + "'"; if (failUnmappedColumns) { getLogger().error(missingColMessage); throw new ProcessException(missingColMessage); } else if (warningUnmappedColumns) { getLogger().warn(missingColMessage); } } } final StringBuilder sqlBuilder = new StringBuilder(); int fieldCount = 0; sqlBuilder.append("DELETE FROM "); if (quoteTableName) { sqlBuilder.append(schema.getQuotedIdentifierString()) .append(tableName) .append(schema.getQuotedIdentifierString()); } else { sqlBuilder.append(tableName); } sqlBuilder.append(" WHERE "); // iterate over all of the elements in the JSON, building the SQL statement by adding the column names, as well as // adding the column value to a "<sql>.args.N.value" attribute and the type of a "<sql>.args.N.type" attribute add the // columns that we are inserting into final Iterator<String> fieldNames = rootNode.getFieldNames(); while (fieldNames.hasNext()) { final String fieldName = fieldNames.next(); final ColumnDescription desc = schema.getColumns().get(normalizeColumnName(fieldName, translateFieldNames)); if (desc == null && !ignoreUnmappedFields) { throw new ProcessException("Cannot map JSON field '" + fieldName + "' to any column in the database"); } if (desc != null) { if (fieldCount++ > 0) { sqlBuilder.append(" AND "); } if (escapeColumnNames) { sqlBuilder.append(schema.getQuotedIdentifierString()) .append(desc.getColumnName()) .append(schema.getQuotedIdentifierString()); } else { sqlBuilder.append(desc.getColumnName()); } sqlBuilder.append(" = ?"); final int sqlType = desc.getDataType(); attributes.put(attributePrefix + ".args." + fieldCount + ".type", String.valueOf(sqlType)); final Integer colSize = desc.getColumnSize(); final JsonNode fieldNode = rootNode.get(fieldName); if (!fieldNode.isNull()) { String fieldValue = fieldNode.asText(); if (colSize != null && fieldValue.length() > colSize) { fieldValue = fieldValue.substring(0, colSize); } attributes.put(attributePrefix + ".args." + fieldCount + ".value", fieldValue); } } } if (fieldCount == 0) { throw new ProcessException("None of the fields in the JSON map to the columns defined by the " + tableName + " table"); } return sqlBuilder.toString(); }