Java Code Examples for org.deeplearning4j.nn.conf.ComputationGraphConfiguration#fromJson()
The following examples show how to use
org.deeplearning4j.nn.conf.ComputationGraphConfiguration#fromJson() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CGVaeReconstructionProbWithKeyFunction.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public VariationalAutoencoder getVaeLayer() { ComputationGraph network = new ComputationGraph(ComputationGraphConfiguration.fromJson((String) jsonConfig.getValue())); network.init(); INDArray val = ((INDArray) params.value()).unsafeDuplication(); if (val.length() != network.numParams(false)) throw new IllegalStateException( "Network did not have same number of parameters as the broadcasted set parameters"); network.setParams(val); Layer l = network.getLayer(0); if (!(l instanceof VariationalAutoencoder)) { throw new RuntimeException( "Cannot use CGVaeReconstructionProbWithKeyFunction on network that doesn't have a VAE " + "layer as layer 0. Layer type: " + l.getClass()); } return (VariationalAutoencoder) l; }
Example 2
Source File: MiscRegressionTests.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testFrozen() throws Exception { File f = new ClassPathResource("regression_testing/misc/legacy_frozen/configuration.json").getFile(); String json = FileUtils.readFileToString(f, StandardCharsets.UTF_8.name()); ComputationGraphConfiguration conf = ComputationGraphConfiguration.fromJson(json); int countFrozen = 0; for(Map.Entry<String,GraphVertex> e : conf.getVertices().entrySet()){ GraphVertex gv = e.getValue(); assertNotNull(gv); if(gv instanceof LayerVertex){ LayerVertex lv = (LayerVertex)gv; Layer layer = lv.getLayerConf().getLayer(); if(layer instanceof FrozenLayer) countFrozen++; } } assertTrue(countFrozen > 0); }
Example 3
Source File: TestCustomLayers.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testJsonComputationGraph() { //ComputationGraph with a custom layer; check JSON and YAML config actually works... ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder() .addInputs("in").addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in") .addLayer("1", new CustomLayer(3.14159), "0").addLayer("2", new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX) .nIn(10).nOut(10).build(), "1") .setOutputs("2").build(); String json = conf.toJson(); String yaml = conf.toYaml(); // System.out.println(json); ComputationGraphConfiguration confFromJson = ComputationGraphConfiguration.fromJson(json); assertEquals(conf, confFromJson); ComputationGraphConfiguration confFromYaml = ComputationGraphConfiguration.fromYaml(yaml); assertEquals(conf, confFromYaml); }
Example 4
Source File: CGVaeReconstructionErrorWithKeyFunction.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Override public VariationalAutoencoder getVaeLayer() { ComputationGraph network = new ComputationGraph(ComputationGraphConfiguration.fromJson((String) jsonConfig.getValue())); network.init(); INDArray val = ((INDArray) params.value()).unsafeDuplication(); if (val.length() != network.numParams(false)) throw new IllegalStateException( "Network did not have same number of parameters as the broadcasted set parameters"); network.setParams(val); Layer l = network.getLayer(0); if (!(l instanceof VariationalAutoencoder)) { throw new RuntimeException( "Cannot use CGVaeReconstructionErrorWithKeyFunction on network that doesn't have a VAE " + "layer as layer 0. Layer type: " + l.getClass()); } return (VariationalAutoencoder) l; }
Example 5
Source File: TestGraphNodes.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testJSON() { //The config here is non-sense, but that doesn't matter for config -> json -> config test ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") .addVertex("v1", new ElementWiseVertex(ElementWiseVertex.Op.Add), "in") .addVertex("v2", new org.deeplearning4j.nn.conf.graph.MergeVertex(), "in", "in") .addVertex("v3", new PreprocessorVertex( new CnnToFeedForwardPreProcessor(1, 2, 1)), "in") .addVertex("v4", new org.deeplearning4j.nn.conf.graph.SubsetVertex(0, 1), "in") .addVertex("v5", new DuplicateToTimeSeriesVertex("in"), "in") .addVertex("v6", new LastTimeStepVertex("in"), "in") .addVertex("v7", new org.deeplearning4j.nn.conf.graph.StackVertex(), "in") .addVertex("v8", new org.deeplearning4j.nn.conf.graph.UnstackVertex(0, 1), "in") .addLayer("out", new OutputLayer.Builder().nIn(1).nOut(1).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build(), "in") .setOutputs("out", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8").build(); String json = conf.toJson(); ComputationGraphConfiguration conf2 = ComputationGraphConfiguration.fromJson(json); assertEquals(conf, conf2); }
Example 6
Source File: ModelGuesser.java From deeplearning4j with Apache License 2.0 | 5 votes |
/** * Load the model from the given file path * @param path the path of the file to "guess" * * @return the loaded model * @throws Exception */ public static Object loadConfigGuess(String path) throws Exception { String input = FileUtils.readFileToString(new File(path)); //note here that we load json BEFORE YAML. YAML //turns out to load just fine *accidentally* try { return MultiLayerConfiguration.fromJson(input); } catch (Exception e) { log.warn("Tried multi layer config from json", e); try { return KerasModelImport.importKerasModelConfiguration(path); } catch (Exception e1) { log.warn("Tried keras model config", e); try { return KerasModelImport.importKerasSequentialConfiguration(path); } catch (Exception e2) { log.warn("Tried keras sequence config", e); try { return ComputationGraphConfiguration.fromJson(input); } catch (Exception e3) { log.warn("Tried computation graph from json"); try { return MultiLayerConfiguration.fromYaml(input); } catch (Exception e4) { log.warn("Tried multi layer configuration from yaml"); try { return ComputationGraphConfiguration.fromYaml(input); } catch (Exception e5) { throw new ModelGuesserException("Unable to load configuration from path " + path + " (invalid config file or not a known config type)"); } } } } } } }
Example 7
Source File: ScoreFlatMapFunctionCGDataSet.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Iterator<Tuple2<Long, Double>> call(Iterator<DataSet> dataSetIterator) throws Exception { if (!dataSetIterator.hasNext()) { return Collections.singletonList(new Tuple2<>(0L, 0.0)).iterator(); } DataSetIterator iter = new IteratorDataSetIterator(dataSetIterator, minibatchSize); //Does batching where appropriate ComputationGraph network = new ComputationGraph(ComputationGraphConfiguration.fromJson(json)); network.init(); INDArray val = params.value().unsafeDuplication(); //.value() is shared by all executors on single machine -> OK, as params are not changed in score function if (val.length() != network.numParams(false)) throw new IllegalStateException( "Network did not have same number of parameters as the broadcast set parameters"); network.setParams(val); List<Tuple2<Long, Double>> out = new ArrayList<>(); while (iter.hasNext()) { DataSet ds = iter.next(); double score = network.score(ds, false); long numExamples = ds.getFeatures().size(0); out.add(new Tuple2<>(numExamples, score * numExamples)); } Nd4j.getExecutioner().commit(); return out.iterator(); }
Example 8
Source File: ScoreFlatMapFunctionCGMultiDataSet.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public Iterator<Tuple2<Long, Double>> call(Iterator<MultiDataSet> dataSetIterator) throws Exception { if (!dataSetIterator.hasNext()) { return Collections.singletonList(new Tuple2<>(0L, 0.0)).iterator(); } MultiDataSetIterator iter = new IteratorMultiDataSetIterator(dataSetIterator, minibatchSize); //Does batching where appropriate ComputationGraph network = new ComputationGraph(ComputationGraphConfiguration.fromJson(json)); network.init(); INDArray val = params.value().unsafeDuplication(); //.value() is shared by all executors on single machine -> OK, as params are not changed in score function if (val.length() != network.numParams(false)) throw new IllegalStateException( "Network did not have same number of parameters as the broadcast set parameters"); network.setParams(val); List<Tuple2<Long, Double>> out = new ArrayList<>(); while (iter.hasNext()) { MultiDataSet ds = iter.next(); double score = network.score(ds, false); long numExamples = ds.getFeatures(0).size(0); out.add(new Tuple2<>(numExamples, score * numExamples)); } Nd4j.getExecutioner().commit(); return out.iterator(); }
Example 9
Source File: ParallelInference.java From deeplearning4j with Apache License 2.0 | 5 votes |
/** * This method duplicates model for future use during inference */ protected void initializeReplicaModel() { if (protoModel instanceof ComputationGraph) { if (!rootDevice) { this.replicatedModel = new ComputationGraph(ComputationGraphConfiguration .fromJson(((ComputationGraph) protoModel).getConfiguration().toJson())); this.replicatedModel.init(); synchronized (locker) { this.replicatedModel.setParams(protoModel.params().unsafeDuplication(true)); Nd4j.getExecutioner().commit(); } } else { this.replicatedModel = protoModel; } } else if (protoModel instanceof MultiLayerNetwork) { if (!rootDevice) { this.replicatedModel = new MultiLayerNetwork(MultiLayerConfiguration.fromJson( ((MultiLayerNetwork) protoModel).getLayerWiseConfigurations().toJson())); this.replicatedModel.init(); synchronized (locker) { this.replicatedModel.setParams(protoModel.params().unsafeDuplication(true)); Nd4j.getExecutioner().commit(); } } else { this.replicatedModel = protoModel; } } }
Example 10
Source File: TestGraphNodes.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testDuplicateToTimeSeriesVertex() { ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder() .addInputs("in2d", "in3d") .addVertex("duplicateTS", new DuplicateToTimeSeriesVertex("in3d"), "in2d") .addLayer("out", new OutputLayer.Builder().nIn(1).nOut(1).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build(), "duplicateTS") .addLayer("out3d", new RnnOutputLayer.Builder().nIn(1).nOut(1).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build(), "in3d") .setOutputs("out", "out3d").build(); ComputationGraph graph = new ComputationGraph(conf); graph.init(); INDArray in2d = Nd4j.rand(3, 5); INDArray in3d = Nd4j.rand(new int[] {3, 2, 7}); graph.setInputs(in2d, in3d); INDArray expOut = Nd4j.zeros(3, 5, 7); for (int i = 0; i < 7; i++) { expOut.put(new INDArrayIndex[] {NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(i)}, in2d); } GraphVertex gv = graph.getVertex("duplicateTS"); gv.setInputs(in2d); INDArray outFwd = gv.doForward(true, LayerWorkspaceMgr.noWorkspaces()); assertEquals(expOut, outFwd); INDArray expOutBackward = expOut.sum(2); gv.setEpsilon(expOut); INDArray outBwd = gv.doBackward(false, LayerWorkspaceMgr.noWorkspaces()).getSecond()[0]; assertEquals(expOutBackward, outBwd); String json = conf.toJson(); ComputationGraphConfiguration conf2 = ComputationGraphConfiguration.fromJson(json); assertEquals(conf, conf2); }
Example 11
Source File: TestMemoryReports.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testPreprocessors() throws Exception { //https://github.com/deeplearning4j/deeplearning4j/issues/4223 File f = new ClassPathResource("4223/CompGraphConfig.json").getTempFileFromArchive(); String s = FileUtils.readFileToString(f, Charset.defaultCharset()); ComputationGraphConfiguration conf = ComputationGraphConfiguration.fromJson(s); conf.getMemoryReport(InputType.convolutional(17,19,19)); }
Example 12
Source File: TestGraphNodes.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testLastTimeStepVertex() { ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().graphBuilder().addInputs("in") .addVertex("lastTS", new LastTimeStepVertex("in"), "in") .addLayer("out", new OutputLayer.Builder().nIn(1).nOut(1).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build(), "lastTS").setOutputs("out") .build(); ComputationGraph graph = new ComputationGraph(conf); graph.init(); //First: test without input mask array Nd4j.getRandom().setSeed(12345); INDArray in = Nd4j.rand(new int[] {3, 5, 6}); INDArray expOut = in.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(5)); GraphVertex gv = graph.getVertex("lastTS"); gv.setInputs(in); //Forward pass: INDArray outFwd = gv.doForward(true, LayerWorkspaceMgr.noWorkspaces()); assertEquals(expOut, outFwd); //Backward pass: gv.setEpsilon(expOut); Pair<Gradient, INDArray[]> pair = gv.doBackward(false, LayerWorkspaceMgr.noWorkspaces()); INDArray eps = pair.getSecond()[0]; assertArrayEquals(in.shape(), eps.shape()); assertEquals(Nd4j.zeros(3, 5, 5), eps.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.interval(0, 4, true))); assertEquals(expOut, eps.get(NDArrayIndex.all(), NDArrayIndex.all(), NDArrayIndex.point(5))); //Second: test with input mask array INDArray inMask = Nd4j.zeros(3, 6); inMask.putRow(0, Nd4j.create(new double[] {1, 1, 1, 0, 0, 0})); inMask.putRow(1, Nd4j.create(new double[] {1, 1, 1, 1, 0, 0})); inMask.putRow(2, Nd4j.create(new double[] {1, 1, 1, 1, 1, 0})); graph.setLayerMaskArrays(new INDArray[] {inMask}, null); expOut = Nd4j.zeros(3, 5); expOut.putRow(0, in.get(NDArrayIndex.point(0), NDArrayIndex.all(), NDArrayIndex.point(2))); expOut.putRow(1, in.get(NDArrayIndex.point(1), NDArrayIndex.all(), NDArrayIndex.point(3))); expOut.putRow(2, in.get(NDArrayIndex.point(2), NDArrayIndex.all(), NDArrayIndex.point(4))); gv.setInputs(in); outFwd = gv.doForward(true, LayerWorkspaceMgr.noWorkspaces()); assertEquals(expOut, outFwd); String json = conf.toJson(); ComputationGraphConfiguration conf2 = ComputationGraphConfiguration.fromJson(json); assertEquals(conf, conf2); }
Example 13
Source File: FrozenLayerTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testFrozenLayerInstantiationCompGraph() { //We need to be able to instantitate frozen layers from JSON etc, and have them be the same as if // they were initialized via the builder ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder() .addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build(), "in") .addLayer("1", new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build(), "0") .addLayer("2", new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(10) .nOut(10).build(), "1") .setOutputs("2").build(); ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder() .addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer.Builder() .layer(new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build()) .build(), "in") .addLayer("1", new org.deeplearning4j.nn.conf.layers.misc.FrozenLayer.Builder() .layer(new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build()) .build(), "0") .addLayer("2", new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder( LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(10) .nOut(10).build(), "1") .setOutputs("2").build(); ComputationGraph net1 = new ComputationGraph(conf1); net1.init(); ComputationGraph net2 = new ComputationGraph(conf2); net2.init(); assertEquals(net1.params(), net2.params()); String json = conf2.toJson(); ComputationGraphConfiguration fromJson = ComputationGraphConfiguration.fromJson(json); assertEquals(conf2, fromJson); ComputationGraph net3 = new ComputationGraph(fromJson); net3.init(); INDArray input = Nd4j.rand(10, 10); INDArray out2 = net2.outputSingle(input); INDArray out3 = net3.outputSingle(input); assertEquals(out2, out3); }
Example 14
Source File: TestCustomLayers.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testCustomOutputLayerCG() { //Create a ComputationGraphConfiguration with custom output layer, and check JSON and YAML config actually works... ComputationGraphConfiguration conf = new NeuralNetConfiguration.Builder().seed(12345) .graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in").addLayer("1", new CustomOutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10) .nOut(10).activation(Activation.SOFTMAX).build(), "0") .setOutputs("1").build(); String json = conf.toJson(); String yaml = conf.toYaml(); // System.out.println(json); ComputationGraphConfiguration confFromJson = ComputationGraphConfiguration.fromJson(json); assertEquals(conf, confFromJson); ComputationGraphConfiguration confFromYaml = ComputationGraphConfiguration.fromYaml(yaml); assertEquals(conf, confFromYaml); //Third: check initialization Nd4j.getRandom().setSeed(12345); ComputationGraph net = new ComputationGraph(conf); net.init(); assertTrue(net.getLayer(1) instanceof CustomOutputLayerImpl); //Fourth: compare to an equivalent standard output layer (should be identical) ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345) .graphBuilder().addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).build(), "in").addLayer("1", new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(10).nOut(10) .activation(Activation.SOFTMAX).build(), "0") .setOutputs("1").build(); Nd4j.getRandom().setSeed(12345); ComputationGraph net2 = new ComputationGraph(conf2); net2.init(); assertEquals(net2.params(), net.params()); INDArray testFeatures = Nd4j.rand(1, 10); INDArray testLabels = Nd4j.zeros(1, 10); testLabels.putScalar(0, 3, 1.0); DataSet ds = new DataSet(testFeatures, testLabels); assertEquals(net2.output(testFeatures)[0], net.output(testFeatures)[0]); assertEquals(net2.score(ds), net.score(ds), 1e-6); }
Example 15
Source File: ScoreExamplesWithKeyFunction.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public Iterator<Tuple2<K, Double>> call(Iterator<Tuple2<K, MultiDataSet>> iterator) throws Exception { if (!iterator.hasNext()) { return Collections.emptyIterator(); } ComputationGraph network = new ComputationGraph(ComputationGraphConfiguration.fromJson(jsonConfig.getValue())); network.init(); INDArray val = params.value().unsafeDuplication(); if (val.length() != network.numParams(false)) throw new IllegalStateException( "Network did not have same number of parameters as the broadcast set parameters"); network.setParams(val); List<Tuple2<K, Double>> ret = new ArrayList<>(); List<MultiDataSet> collect = new ArrayList<>(batchSize); List<K> collectKey = new ArrayList<>(batchSize); int totalCount = 0; while (iterator.hasNext()) { collect.clear(); collectKey.clear(); int nExamples = 0; while (iterator.hasNext() && nExamples < batchSize) { Tuple2<K, MultiDataSet> t2 = iterator.next(); MultiDataSet ds = t2._2(); val n = ds.getFeatures(0).size(0); if (n != 1) throw new IllegalStateException("Cannot score examples with one key per data set if " + "data set contains more than 1 example (numExamples: " + n + ")"); collect.add(ds); collectKey.add(t2._1()); nExamples += n; } totalCount += nExamples; MultiDataSet data = org.nd4j.linalg.dataset.MultiDataSet.merge(collect); INDArray scores = network.scoreExamples(data, addRegularization); double[] doubleScores = scores.data().asDouble(); for (int i = 0; i < doubleScores.length; i++) { ret.add(new Tuple2<>(collectKey.get(i), doubleScores[i])); } } Nd4j.getExecutioner().commit(); if (log.isDebugEnabled()) { log.debug("Scored {} examples ", totalCount); } return ret.iterator(); }
Example 16
Source File: FrozenLayerWithBackpropTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testFrozenLayerInstantiationCompGraph() { //We need to be able to instantitate frozen layers from JSON etc, and have them be the same as if // they were initialized via the builder ComputationGraphConfiguration conf1 = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder() .addInputs("in") .addLayer("0", new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build(), "in") .addLayer("1", new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build(), "0") .addLayer("2", new OutputLayer.Builder( LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(10) .nOut(10).build(), "1") .setOutputs("2").build(); ComputationGraphConfiguration conf2 = new NeuralNetConfiguration.Builder().seed(12345).graphBuilder() .addInputs("in") .addLayer("0", new org.deeplearning4j.nn.conf.layers.misc.FrozenLayerWithBackprop( new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build()), "in") .addLayer("1", new org.deeplearning4j.nn.conf.layers.misc.FrozenLayerWithBackprop( new DenseLayer.Builder().nIn(10).nOut(10).activation(Activation.TANH) .weightInit(WeightInit.XAVIER).build()), "0") .addLayer("2", new OutputLayer.Builder( LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX).nIn(10) .nOut(10).build(), "1") .setOutputs("2").build(); ComputationGraph net1 = new ComputationGraph(conf1); net1.init(); ComputationGraph net2 = new ComputationGraph(conf2); net2.init(); assertEquals(net1.params(), net2.params()); String json = conf2.toJson(); ComputationGraphConfiguration fromJson = ComputationGraphConfiguration.fromJson(json); assertEquals(conf2, fromJson); ComputationGraph net3 = new ComputationGraph(fromJson); net3.init(); INDArray input = Nd4j.rand(10, 10); INDArray out2 = net2.outputSingle(input); INDArray out3 = net3.outputSingle(input); assertEquals(out2, out3); }
Example 17
Source File: DL4JModelValidator.java From deeplearning4j with Apache License 2.0 | 4 votes |
/** * Validate whether the file represents a valid ComputationGraph saved previously with {@link ComputationGraph#save(File)} * or {@link ModelSerializer#writeModel(Model, File, boolean)}, to be read with {@link ComputationGraph#load(File, boolean)} * * @param f File that should represent an saved MultiLayerNetwork * @return Result of validation */ public static ValidationResult validateComputationGraph(@NonNull File f){ List<String> requiredEntries = Arrays.asList(ModelSerializer.CONFIGURATION_JSON, ModelSerializer.COEFFICIENTS_BIN); //TODO no-params models... might be OK to have no params, but basically useless in practice ValidationResult vr = Nd4jCommonValidator.isValidZipFile(f, false, requiredEntries); if(vr != null && !vr.isValid()) { vr.setFormatClass(ComputationGraph.class); vr.setFormatType("ComputationGraph"); return vr; } //Check that configuration (JSON) can actually be deserialized correctly... String config; try(ZipFile zf = new ZipFile(f)){ ZipEntry ze = zf.getEntry(ModelSerializer.CONFIGURATION_JSON); config = IOUtils.toString(new BufferedReader(new InputStreamReader(zf.getInputStream(ze), StandardCharsets.UTF_8))); } catch (IOException e){ return ValidationResult.builder() .formatType("ComputationGraph") .formatClass(ComputationGraph.class) .valid(false) .path(Nd4jCommonValidator.getPath(f)) .issues(Collections.singletonList("Unable to read configuration from model zip file")) .exception(e) .build(); } try{ ComputationGraphConfiguration.fromJson(config); } catch (Throwable t){ return ValidationResult.builder() .formatType("ComputationGraph") .formatClass(ComputationGraph.class) .valid(false) .path(Nd4jCommonValidator.getPath(f)) .issues(Collections.singletonList("Zip file JSON model configuration does not appear to represent a valid ComputationGraphConfiguration")) .exception(t) .build(); } //TODO should we check params too? (a) that it can be read, and (b) that it matches config (number of parameters, etc) return ValidationResult.builder() .formatType("ComputationGraph") .formatClass(ComputationGraph.class) .valid(true) .path(Nd4jCommonValidator.getPath(f)) .build(); }
Example 18
Source File: ScoreExamplesFunction.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Override public Iterator<Double> call(Iterator<MultiDataSet> iterator) throws Exception { if (!iterator.hasNext()) { return Collections.emptyIterator(); } ComputationGraph network = new ComputationGraph(ComputationGraphConfiguration.fromJson(jsonConfig.getValue())); network.init(); INDArray val = params.value().unsafeDuplication(); if (val.length() != network.numParams(false)) throw new IllegalStateException( "Network did not have same number of parameters as the broadcast set parameters"); network.setParams(val); List<Double> ret = new ArrayList<>(); List<MultiDataSet> collect = new ArrayList<>(batchSize); int totalCount = 0; while (iterator.hasNext()) { collect.clear(); int nExamples = 0; while (iterator.hasNext() && nExamples < batchSize) { MultiDataSet ds = iterator.next(); val n = ds.getFeatures(0).size(0); collect.add(ds); nExamples += n; } totalCount += nExamples; MultiDataSet data = org.nd4j.linalg.dataset.MultiDataSet.merge(collect); INDArray scores = network.scoreExamples(data, addRegularization); double[] doubleScores = scores.data().asDouble(); for (double doubleScore : doubleScores) { ret.add(doubleScore); } } Nd4j.getExecutioner().commit(); if (log.isDebugEnabled()) { log.debug("Scored {} examples ", totalCount); } return ret.iterator(); }