Java Code Examples for org.deeplearning4j.nn.multilayer.MultiLayerNetwork#init()
The following examples show how to use
org.deeplearning4j.nn.multilayer.MultiLayerNetwork#init() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LayerConfigTest.java From deeplearning4j with Apache License 2.0 | 6 votes |
@Test public void testUpdaterAdamParamsLayerwiseOverride() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .updater(new Adam(1.0, 0.5, 0.5, 1e-8)) .list() .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).updater(new Adam(1.0, 0.6, 0.7, 1e-8)).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); assertEquals(0.5, ((Adam) ((BaseLayer) conf.getConf(0).getLayer()).getIUpdater()).getBeta1(), 0.0); assertEquals(0.6, ((Adam) ((BaseLayer) conf.getConf(1).getLayer()).getIUpdater()).getBeta1(), 0.0); assertEquals(0.5, ((Adam) ((BaseLayer) conf.getConf(0).getLayer()).getIUpdater()).getBeta2(), 0.0); assertEquals(0.7, ((Adam) ((BaseLayer) conf.getConf(1).getLayer()).getIUpdater()).getBeta2(), 0.0); }
Example 2
Source File: ScoringModelTest.java From deeplearning4j with Apache License 2.0 | 6 votes |
protected Model buildMultiLayerNetworkModel(int numFeatures) throws Exception { final MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .list( new OutputLayer.Builder().nIn(numFeatures).nOut(1).lossFunction(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY).build() ) .build(); final MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); final float[] floats = new float[numFeatures+1]; float base = 1f; for (int ii=0; ii<floats.length; ++ii) { base *= 2; floats[ii] = base; } final INDArray params = Nd4j.create(floats); model.setParams(params); return model; }
Example 3
Source File: TestEarlyStopping.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test public void testClassificationScoreFunctionSimple() throws Exception { for(Evaluation.Metric metric : Evaluation.Metric.values()) { log.info("Metric: " + metric); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .list() .layer(new DenseLayer.Builder().nIn(784).nOut(32).build()) .layer(new OutputLayer.Builder().nIn(32).nOut(10).activation(Activation.SOFTMAX).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); DataSetIterator iter = new MnistDataSetIterator(32, false, 12345); List<DataSet> l = new ArrayList<>(); for( int i=0; i<10; i++ ){ DataSet ds = iter.next(); l.add(ds); } iter = new ExistingDataSetIterator(l); EarlyStoppingModelSaver<MultiLayerNetwork> saver = new InMemoryModelSaver<>(); EarlyStoppingConfiguration<MultiLayerNetwork> esConf = new EarlyStoppingConfiguration.Builder<MultiLayerNetwork>() .epochTerminationConditions(new MaxEpochsTerminationCondition(5)) .iterationTerminationConditions( new MaxTimeIterationTerminationCondition(1, TimeUnit.MINUTES)) .scoreCalculator(new ClassificationScoreCalculator(metric, iter)).modelSaver(saver) .build(); EarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, net, iter); EarlyStoppingResult<MultiLayerNetwork> result = trainer.fit(); assertNotNull(result.getBestModel()); } }
Example 4
Source File: AlexNet.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public MultiLayerNetwork init() { MultiLayerConfiguration conf = conf(); MultiLayerNetwork network = new MultiLayerNetwork(conf); network.init(); return network; }
Example 5
Source File: SaveLoadMultiLayerNetwork.java From dl4j-tutorials with MIT License | 5 votes |
public static void main(String[] args) throws Exception { //Define a simple MultiLayerNetwork: MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .weightInit(WeightInit.XAVIER) .updater(new Nesterovs(0.01, 0.9)) .list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).activation(Activation.TANH).build()) .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).activation(Activation.SOFTMAX).nIn(3).nOut(3).build()) .backprop(true).pretrain(false).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); //Save the model File locationToSave = new File("model/MyMultiLayerNetwork.zip"); //Where to save the network. Note: the file is in .zip format - can be opened externally /** * 主要是用于保存模型的更新器信息 * 如果模型保存之后还打算继续训练,则进行保存 -> true 才能根据后面的数据进行增量更新 * 如果不打算继续训练 -> 模型定型之后,false */ boolean saveUpdater = true; //Updater: i.e., the state for Momentum, RMSProp, Adagrad etc. Save this if you want to train your network more in the future ModelSerializer.writeModel(net, locationToSave, saveUpdater); //Load the model MultiLayerNetwork restored = ModelSerializer.restoreMultiLayerNetwork(locationToSave); System.out.println("Saved and loaded parameters are equal: " + net.params().equals(restored.params())); System.out.println("Saved and loaded configurations are equal: " + net.getLayerWiseConfigurations().equals(restored.getLayerWiseConfigurations())); }
Example 6
Source File: LayerConfigValidationTest.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Test(expected = IllegalStateException.class) @Ignore //Old assumption: throw exception on l1 but no regularization. Current design: warn, not exception public void testRegNotSetL1Global() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new Sgd(0.3)).l1(0.5).list() .layer(0, new DenseLayer.Builder().nIn(2).nOut(2).build()) .layer(1, new DenseLayer.Builder().nIn(2).nOut(2).build()).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); }
Example 7
Source File: TestFailureListener.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Ignore @Test public void testFailureRandom_OR(){ MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .updater(new Adam(1e-4)) .list() .layer(0, new OutputLayer.Builder().nIn(4).nOut(3).activation(Activation.SOFTMAX).lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); String username = System.getProperty("user.name"); assertNotNull(username); assertFalse(username.isEmpty()); net.setListeners(new FailureTestingListener( FailureTestingListener.FailureMode.SYSTEM_EXIT_1, new FailureTestingListener.Or( new FailureTestingListener.IterationEpochTrigger(false, 10000), new FailureTestingListener.RandomProb(FailureTestingListener.CallType.ANY, 0.02)) )); DataSetIterator iter = new IrisDataSetIterator(5,150); net.fit(iter); }
Example 8
Source File: TransferLearningMLNTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testTransferLearningSameDiffLayers(){ MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .updater(new Adam(0.01)) .weightInit(WeightInit.XAVIER) .list() .layer(new LSTM.Builder().nOut(8).build()) .layer( new SelfAttentionLayer.Builder().nOut(4).nHeads(2).projectInput(true).build()) .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.MAX).build()) .layer(new OutputLayer.Builder().nOut(2).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .setInputType(InputType.recurrent(4)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); INDArray in = Nd4j.rand(DataType.FLOAT, 3, 4, 5); INDArray out = net.output(in); MultiLayerNetwork net2 = new TransferLearning.Builder(net) .fineTuneConfiguration(FineTuneConfiguration.builder().updater(new Adam(0.01)).build()) .removeLayersFromOutput(1) .addLayer(new OutputLayer.Builder().nIn(4).nOut(2).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .build(); net2.setParam("3_W", net.getParam("3_W")); net2.setParam("3_b", net.getParam("3_b")); Map<String,INDArray> p1 = net.paramTable(); Map<String,INDArray> p2 = net2.paramTable(); for(String s : p1.keySet()){ INDArray i1 = p1.get(s); INDArray i2 = p2.get(s); assertEquals(s, i1, i2); } INDArray out2 = net2.output(in); assertEquals(out, out2); }
Example 9
Source File: CuDNNGradientChecks.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testLSTM() throws Exception { Nd4j.getRandom().setSeed(12345); int minibatch = 4; int inputSize = 3; int lstmLayerSize = 4; int timeSeriesLength = 3; int nOut = 4; INDArray input = Nd4j.rand(new int[] {minibatch, inputSize, timeSeriesLength}); INDArray labels = Nd4j.zeros(minibatch, nOut, timeSeriesLength); Random r = new Random(12345); for (int i = 0; i < minibatch; i++) { for (int j = 0; j < timeSeriesLength; j++) { labels.putScalar(i, r.nextInt(nOut), j, 1.0); } } MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder() .dataType(DataType.DOUBLE) .updater(new NoOp()).seed(12345L) .dist(new NormalDistribution(0, 2)).list() .layer(0, new LSTM.Builder().nIn(input.size(1)).nOut(lstmLayerSize) .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build()) .layer(1, new LSTM.Builder().nIn(lstmLayerSize).nOut(lstmLayerSize) .gateActivationFunction(Activation.SIGMOID).activation(Activation.TANH).build()) .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(lstmLayerSize).nOut(nOut).build()) ; MultiLayerNetwork mln = new MultiLayerNetwork(builder.build()); mln.init(); Field f = org.deeplearning4j.nn.layers.recurrent.LSTM.class.getDeclaredField("helper"); f.setAccessible(true); org.deeplearning4j.nn.layers.recurrent.LSTM l = (org.deeplearning4j.nn.layers.recurrent.LSTM) mln.getLayer(1); LSTMHelper helper = (LSTMHelper) f.get(l); assertTrue(helper instanceof CudnnLSTMHelper); //------------------------------- //For debugging/comparison to no-cudnn case: set helper field to null // f.set(l, null); // assertNull(f.get(l)); //------------------------------- if (PRINT_RESULTS) { for (int j = 0; j < mln.getnLayers(); j++) System.out.println("Layer " + j + " # params: " + mln.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(mln, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels, null, null, true, 32, null, null); assertTrue(gradOK); }
Example 10
Source File: LearnDigitsDropout.java From aifh with Apache License 2.0 | 4 votes |
/** * The main method. * @param args Not used. */ public static void main(String[] args) { try { int seed = 43; double learningRate = 1e-2; int nEpochs = 50; int batchSize = 500; // Setup training data. System.out.println("Please wait, reading MNIST training data."); String dir = System.getProperty("user.dir"); MNISTReader trainingReader = MNIST.loadMNIST(dir, true); MNISTReader validationReader = MNIST.loadMNIST(dir, false); DataSet trainingSet = trainingReader.getData(); DataSet validationSet = validationReader.getData(); DataSetIterator trainSetIterator = new ListDataSetIterator(trainingSet.asList(), batchSize); DataSetIterator validationSetIterator = new ListDataSetIterator(validationSet.asList(), validationReader.getNumRows()); System.out.println("Training set size: " + trainingReader.getNumImages()); System.out.println("Validation set size: " + validationReader.getNumImages()); System.out.println(trainingSet.get(0).getFeatures().size(1)); System.out.println(validationSet.get(0).getFeatures().size(1)); int numInputs = trainingReader.getNumCols()*trainingReader.getNumRows(); int numOutputs = 10; int numHiddenNodes = 100; // Create neural network. MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(1) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .learningRate(learningRate) .updater(Updater.NESTEROVS).momentum(0.9) .list(2) .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes) .weightInit(WeightInit.XAVIER) .activation("relu") .build()) .layer(1, new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD) .weightInit(WeightInit.XAVIER) .activation("softmax") .nIn(numHiddenNodes).nOut(numOutputs).build()) .pretrain(false).backprop(true).build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); model.setListeners(new ScoreIterationListener(1)); // Define when we want to stop training. EarlyStoppingModelSaver saver = new InMemoryModelSaver(); EarlyStoppingConfiguration esConf = new EarlyStoppingConfiguration.Builder() //.epochTerminationConditions(new MaxEpochsTerminationCondition(10)) .epochTerminationConditions(new ScoreImprovementEpochTerminationCondition(5)) .evaluateEveryNEpochs(1) .scoreCalculator(new DataSetLossCalculator(validationSetIterator, true)) //Calculate test set score .modelSaver(saver) .build(); EarlyStoppingTrainer trainer = new EarlyStoppingTrainer(esConf, conf, trainSetIterator); // Train and display result. EarlyStoppingResult result = trainer.fit(); System.out.println("Termination reason: " + result.getTerminationReason()); System.out.println("Termination details: " + result.getTerminationDetails()); System.out.println("Total epochs: " + result.getTotalEpochs()); System.out.println("Best epoch number: " + result.getBestModelEpoch()); System.out.println("Score at best epoch: " + result.getBestModelScore()); model = saver.getBestModel(); // Evaluate Evaluation eval = new Evaluation(numOutputs); validationSetIterator.reset(); for (int i = 0; i < validationSet.numExamples(); i++) { DataSet t = validationSet.get(i); INDArray features = t.getFeatureMatrix(); INDArray labels = t.getLabels(); INDArray predicted = model.output(features, false); eval.eval(labels, predicted); } //Print the evaluation statistics System.out.println(eval.stats()); } catch(Exception ex) { ex.printStackTrace(); } }
Example 11
Source File: RegressionEvalTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testRegressionEvalMethods() { //Basic sanity check MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.ZERO).list() .layer(0, new OutputLayer.Builder().activation(Activation.TANH) .lossFunction(LossFunctions.LossFunction.MSE).nIn(10).nOut(5).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); INDArray f = Nd4j.zeros(4, 10); INDArray l = Nd4j.ones(4, 5); DataSet ds = new DataSet(f, l); DataSetIterator iter = new ExistingDataSetIterator(Collections.singletonList(ds)); org.nd4j.evaluation.regression.RegressionEvaluation re = net.evaluateRegression(iter); for (int i = 0; i < 5; i++) { assertEquals(1.0, re.meanSquaredError(i), 1e-6); assertEquals(1.0, re.meanAbsoluteError(i), 1e-6); } ComputationGraphConfiguration graphConf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.ZERO).graphBuilder() .addInputs("in").addLayer("0", new OutputLayer.Builder() .lossFunction(LossFunctions.LossFunction.MSE) .activation(Activation.TANH).nIn(10).nOut(5).build(), "in") .setOutputs("0").build(); ComputationGraph cg = new ComputationGraph(graphConf); cg.init(); RegressionEvaluation re2 = cg.evaluateRegression(iter); for (int i = 0; i < 5; i++) { assertEquals(1.0, re2.meanSquaredError(i), 1e-6); assertEquals(1.0, re2.meanAbsoluteError(i), 1e-6); } }
Example 12
Source File: TestCompareParameterAveragingSparkVsSingleMachine.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testOneExecutor() { //Idea: single worker/executor on Spark should give identical results to a single machine int miniBatchSize = 10; int nWorkers = 1; for (boolean saveUpdater : new boolean[] {true, false}) { JavaSparkContext sc = getContext(nWorkers); try { //Do training locally, for 3 minibatches int[] seeds = {1, 2, 3}; MultiLayerNetwork net = new MultiLayerNetwork(getConf(12345, new RmsProp(0.5))); net.init(); INDArray initialParams = net.params().dup(); for (int i = 0; i < seeds.length; i++) { DataSet ds = getOneDataSet(miniBatchSize, seeds[i]); if (!saveUpdater) net.setUpdater(null); net.fit(ds); } INDArray finalParams = net.params().dup(); //Do training on Spark with one executor, for 3 separate minibatches TrainingMaster tm = getTrainingMaster(1, miniBatchSize, saveUpdater); SparkDl4jMultiLayer sparkNet = new SparkDl4jMultiLayer(sc, getConf(12345, new RmsProp(0.5)), tm); sparkNet.setCollectTrainingStats(true); INDArray initialSparkParams = sparkNet.getNetwork().params().dup(); for (int i = 0; i < seeds.length; i++) { List<DataSet> list = getOneDataSetAsIndividalExamples(miniBatchSize, seeds[i]); JavaRDD<DataSet> rdd = sc.parallelize(list); sparkNet.fit(rdd); } INDArray finalSparkParams = sparkNet.getNetwork().params().dup(); assertEquals(initialParams, initialSparkParams); assertNotEquals(initialParams, finalParams); assertEquals(finalParams, finalSparkParams); } finally { sc.stop(); } } }
Example 13
Source File: GlobalPoolingMaskingTests.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testMaskingCnnDim2_SingleExample() { //Test masking, where mask is along dimension 2 int minibatch = 1; int depthIn = 2; int depthOut = 2; int nOut = 2; int height = 6; int width = 3; PoolingType[] poolingTypes = new PoolingType[] {PoolingType.SUM, PoolingType.AVG, PoolingType.MAX, PoolingType.PNORM}; for (PoolingType pt : poolingTypes) { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same).seed(12345L).list() .layer(0, new ConvolutionLayer.Builder().nIn(depthIn).nOut(depthOut).kernelSize(2, width) .stride(1, width).activation(Activation.TANH).build()) .layer(1, new org.deeplearning4j.nn.conf.layers.GlobalPoolingLayer.Builder().poolingType(pt) .build()) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(depthOut).nOut(nOut).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); INDArray inToBeMasked = Nd4j.rand(new int[] {minibatch, depthIn, height, width}); //Shape for mask: [minibatch, width] INDArray maskArray = Nd4j.create(new double[] {1, 1, 1, 1, 1, 0}, new int[]{1,1,height,1}); //Multiply the input by the mask array, to ensure the 0s in the mask correspond to 0s in the input vector // as would be the case in practice... Nd4j.getExecutioner().exec(new BroadcastMulOp(inToBeMasked, maskArray, inToBeMasked, 0, 2)); net.setLayerMaskArrays(maskArray, null); INDArray outMasked = net.output(inToBeMasked); net.clearLayerMaskArrays(); int numSteps = height - 1; INDArray subset = inToBeMasked.get(NDArrayIndex.interval(0, 0, true), NDArrayIndex.all(), NDArrayIndex.interval(0, numSteps), NDArrayIndex.all()); assertArrayEquals(new long[] {1, depthIn, 5, width}, subset.shape()); INDArray outSubset = net.output(subset); INDArray outMaskedSubset = outMasked.getRow(0); assertEquals(outSubset, outMaskedSubset); //Finally: check gradient calc for exceptions net.setLayerMaskArrays(maskArray, null); net.setInput(inToBeMasked); INDArray labels = Nd4j.create(new double[] {0, 1}, new long[]{1,2}); net.setLabels(labels); net.computeGradientAndScore(); } }
Example 14
Source File: CNN3DGradientCheckTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testDeconv3d() { Nd4j.getRandom().setSeed(12345); // Note: we checked this with a variety of parameters, but it takes a lot of time. int[] depths = {8, 8, 9}; int[] heights = {8, 9, 9}; int[] widths = {8, 8, 9}; int[][] kernels = {{2, 2, 2}, {3, 3, 3}, {2, 3, 2}}; int[][] strides = {{1, 1, 1}, {1, 1, 1}, {2, 2, 2}}; Activation[] activations = {Activation.SIGMOID, Activation.TANH, Activation.IDENTITY}; ConvolutionMode[] modes = {ConvolutionMode.Truncate, ConvolutionMode.Same, ConvolutionMode.Same}; int[] mbs = {1, 3, 2}; Convolution3D.DataFormat[] dataFormats = new Convolution3D.DataFormat[]{Convolution3D.DataFormat.NCDHW, Convolution3D.DataFormat.NDHWC, Convolution3D.DataFormat.NCDHW}; int convNIn = 2; int finalNOut = 2; int[] deconvOut = {2, 3, 4}; for (int i = 0; i < activations.length; i++) { Activation afn = activations[i]; int miniBatchSize = mbs[i]; int depth = depths[i]; int height = heights[i]; int width = widths[i]; ConvolutionMode mode = modes[i]; int[] kernel = kernels[i]; int[] stride = strides[i]; Convolution3D.DataFormat df = dataFormats[i]; int dOut = deconvOut[i]; INDArray input; if (df == Convolution3D.DataFormat.NDHWC) { input = Nd4j.rand(new int[]{miniBatchSize, depth, height, width, convNIn}); } else { input = Nd4j.rand(new int[]{miniBatchSize, convNIn, depth, height, width}); } INDArray labels = Nd4j.zeros(miniBatchSize, finalNOut); for (int j = 0; j < miniBatchSize; j++) { labels.putScalar(new int[]{j, j % finalNOut}, 1.0); } MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .dataType(DataType.DOUBLE) .updater(new NoOp()) .weightInit(new NormalDistribution(0, 0.1)) .list() .layer(0, new Convolution3D.Builder().activation(afn).kernelSize(kernel) .stride(stride).nIn(convNIn).nOut(dOut).hasBias(false) .convolutionMode(mode).dataFormat(df) .build()) .layer(1, new Deconvolution3D.Builder().activation(afn).kernelSize(kernel) .stride(stride).nOut(dOut).hasBias(false) .convolutionMode(mode).dataFormat(df) .build()) .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(finalNOut).build()) .setInputType(InputType.convolutional3D(df, depth, height, width, convNIn)).build(); String json = conf.toJson(); MultiLayerConfiguration c2 = MultiLayerConfiguration.fromJson(json); assertEquals(conf, c2); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); String msg = "DataFormat = " + df + ", minibatch size = " + miniBatchSize + ", activationFn=" + afn + ", kernel = " + Arrays.toString(kernel) + ", stride = " + Arrays.toString(stride) + ", mode = " + mode.toString() + ", input depth " + depth + ", input height " + height + ", input width " + width; if (PRINT_RESULTS) { log.info(msg); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.MLNConfig().net(net).input(input) .labels(labels).subset(true).maxPerParam(64)); assertTrue(msg, gradOK); TestUtils.testModelSerialization(net); } }
Example 15
Source File: GlobalPoolingMaskingTests.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testMaskLayerDataTypes(){ for(DataType dt : new DataType[]{DataType.FLOAT16, DataType.BFLOAT16, DataType.FLOAT, DataType.DOUBLE, DataType.INT8, DataType.INT16, DataType.INT32, DataType.INT64, DataType.UINT8, DataType.UINT16, DataType.UINT32, DataType.UINT64}){ INDArray mask = Nd4j.rand(DataType.FLOAT, 2, 10).addi(0.3).castTo(dt); for(DataType networkDtype : new DataType[]{DataType.FLOAT16, DataType.BFLOAT16, DataType.FLOAT, DataType.DOUBLE}){ INDArray in = Nd4j.rand(networkDtype, 2, 5, 10); INDArray label1 = Nd4j.rand(networkDtype, 2, 5); INDArray label2 = Nd4j.rand(networkDtype, 2, 5, 10); for(PoolingType pt : PoolingType.values()) { //System.out.println("Net: " + networkDtype + ", mask: " + dt + ", pt=" + pt); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .list() .layer(new GlobalPoolingLayer(pt)) .layer(new OutputLayer.Builder().nIn(5).nOut(5).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build()) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); net.output(in, false, mask, null); net.output(in, false, mask, null); MultiLayerConfiguration conf2 = new NeuralNetConfiguration.Builder() .list() .layer(new RnnOutputLayer.Builder().nIn(5).nOut(5).activation(Activation.TANH).lossFunction(LossFunctions.LossFunction.MSE).build()) .build(); MultiLayerNetwork net2 = new MultiLayerNetwork(conf2); net2.init(); net2.output(in, false, mask, mask); net2.output(in, false, mask, mask); net.fit(in, label1, mask, null); net2.fit(in, label2, mask, mask); } } } }
Example 16
Source File: AttentionLayerTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testSelfAttentionLayer() { int nIn = 3; int nOut = 2; int tsLength = 4; int layerSize = 4; for (int mb : new int[]{1, 3}) { for (boolean inputMask : new boolean[]{false, true}) { for (boolean projectInput : new boolean[]{false, true}) { INDArray in = Nd4j.rand(DataType.DOUBLE, new int[]{mb, nIn, tsLength}); INDArray labels = TestUtils.randomOneHot(mb, nOut); String maskType = (inputMask ? "inputMask" : "none"); INDArray inMask = null; if (inputMask) { inMask = Nd4j.ones(mb, tsLength); for (int i = 0; i < mb; i++) { int firstMaskedStep = tsLength - 1 - i; if (firstMaskedStep == 0) { firstMaskedStep = tsLength; } for (int j = firstMaskedStep; j < tsLength; j++) { inMask.putScalar(i, j, 0.0); } } } String name = "testSelfAttentionLayer() - mb=" + mb + ", tsLength = " + tsLength + ", maskType=" + maskType + ", projectInput = " + projectInput; System.out.println("Starting test: " + name); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .dataType(DataType.DOUBLE) .activation(Activation.TANH) .updater(new NoOp()) .weightInit(WeightInit.XAVIER) .list() .layer(new LSTM.Builder().nOut(layerSize).build()) .layer( projectInput ? new SelfAttentionLayer.Builder().nOut(4).nHeads(2).projectInput(true).build() : new SelfAttentionLayer.Builder().nHeads(1).projectInput(false).build() ) .layer(new GlobalPoolingLayer.Builder().poolingType(PoolingType.MAX).build()) .layer(new OutputLayer.Builder().nOut(nOut).activation(Activation.SOFTMAX) .lossFunction(LossFunctions.LossFunction.MCXENT).build()) .setInputType(InputType.recurrent(nIn)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.MLNConfig().net(net).input(in) .labels(labels).inputMask(inMask).subset(true).maxPerParam(100)); assertTrue(name, gradOK); } } } }
Example 17
Source File: CNNGradientCheckTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testCnnZeroPaddingLayer() { Nd4j.getRandom().setSeed(12345); int nOut = 4; int width = 6; int height = 6; int[] kernel = {2, 2}; int[] stride = {1, 1}; int[] padding = {0, 0}; int[] minibatchSizes = {1, 3, 2}; int[] inputDepths = {1, 3, 2}; int[][] zeroPadLayer = new int[][]{{0, 0, 0, 0}, {1, 1, 0, 0}, {2, 2, 2, 2}}; boolean nchw = format == CNN2DFormat.NCHW; for( int i=0; i<minibatchSizes.length; i++ ){ int minibatchSize = minibatchSizes[i]; int inputDepth = inputDepths[i]; int[] zeroPad = zeroPadLayer[i]; long[] inShape = nchw ? new long[]{minibatchSize, inputDepth, height, width} : new long[]{minibatchSize, height, width, inputDepth}; INDArray input = Nd4j.rand(DataType.DOUBLE, inShape); INDArray labels = TestUtils.randomOneHot(minibatchSize, nOut); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 1)).list() .layer(0, new ConvolutionLayer.Builder(kernel, stride, padding) .nIn(inputDepth).nOut(3).build())//output: (6-2+0)/1+1 = 5 .layer(1, new ZeroPaddingLayer.Builder(zeroPad).build()).layer(2, new ConvolutionLayer.Builder(kernel, stride, padding).nIn(3).nOut(3).build())//output: (6-2+0)/1+1 = 5 .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nOut(4).build()) .setInputType(InputType.convolutional(height, width, inputDepth, format)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); //Check zero padding activation shape org.deeplearning4j.nn.layers.convolution.ZeroPaddingLayer zpl = (org.deeplearning4j.nn.layers.convolution.ZeroPaddingLayer) net.getLayer(1); long[] expShape; if(nchw){ expShape = new long[]{minibatchSize, inputDepth, height + zeroPad[0] + zeroPad[1], width + zeroPad[2] + zeroPad[3]}; } else { expShape = new long[]{minibatchSize, height + zeroPad[0] + zeroPad[1], width + zeroPad[2] + zeroPad[3], inputDepth}; } INDArray out = zpl.activate(input, false, LayerWorkspaceMgr.noWorkspaces()); assertArrayEquals(expShape, out.shape()); String msg = "minibatch=" + minibatchSize + ", channels=" + inputDepth + ", zeroPad = " + Arrays.toString(zeroPad); if (PRINT_RESULTS) { System.out.println(msg); // for (int j = 0; j < net.getnLayers(); j++) // System.out.println("Layer " + j + " # params: " + net.getLayer(j).numParams()); } boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels); assertTrue(msg, gradOK); TestUtils.testModelSerialization(net); } }
Example 18
Source File: NoBiasGradientCheckTests.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testCnnWithSubsamplingNoBias() { int nOut = 4; int[] minibatchSizes = {1, 3}; int width = 5; int height = 5; int inputDepth = 1; int[] kernel = {2, 2}; int[] stride = {1, 1}; int[] padding = {0, 0}; int pNorm = 3; for (int minibatchSize : minibatchSizes) { INDArray input = Nd4j.rand(minibatchSize, width * height * inputDepth); INDArray labels = Nd4j.zeros(minibatchSize, nOut); for (int i = 0; i < minibatchSize; i++) { labels.putScalar(new int[]{i, i % nOut}, 1.0); } for(boolean cnnHasBias : new boolean[]{true, false}) { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder().updater(new NoOp()) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 1)) .list() .layer(new ConvolutionLayer.Builder(kernel, stride, padding).nIn(inputDepth) .hasBias(false) .nOut(3).build())//output: (5-2+0)/1+1 = 4 .layer(new SubsamplingLayer.Builder(PoolingType.MAX) .kernelSize(kernel).stride(stride).padding(padding) .pnorm(pNorm).build()) //output: (4-2+0)/1+1 =3 -> 3x3x3 .layer(new ConvolutionLayer.Builder(kernel, stride, padding) .hasBias(cnnHasBias) .nOut(2).build()) //Output: (3-2+0)/1+1 = 2 .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX) .nOut(4).build()) .setInputType(InputType.convolutionalFlat(height, width, inputDepth)) .build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); if(cnnHasBias){ assertEquals(3 * 2 * kernel[0] * kernel[1] + 2, net.getLayer(2).numParams()); } else { assertEquals(3 * 2 * kernel[0] * kernel[1], net.getLayer(2).numParams()); } String msg = "testCnnWithSubsamplingNoBias(), minibatch = " + minibatchSize + ", cnnHasBias = " + cnnHasBias; System.out.println(msg); boolean gradOK = GradientCheckUtil.checkGradients(net, DEFAULT_EPS, DEFAULT_MAX_REL_ERROR, DEFAULT_MIN_ABS_ERROR, PRINT_RESULTS, RETURN_ON_FIRST_FAILURE, input, labels); assertTrue(msg, gradOK); TestUtils.testModelSerialization(net); } } }
Example 19
Source File: GradientCheckTestsMasking.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testBidirectionalLSTMMasking() { Nd4j.getRandom().setSeed(12345L); int timeSeriesLength = 5; int nIn = 3; int layerSize = 3; int nOut = 2; int miniBatchSize = 2; INDArray[] masks = new INDArray[] { Nd4j.create(new double[][] {{1, 1, 1, 1, 1}, {1, 1, 1, 0, 0}}), Nd4j.create(new double[][] {{1, 1, 1, 1, 1}, {0, 1, 1, 1, 1}})}; int testNum = 0; for (INDArray mask : masks) { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .updater(new NoOp()) .dataType(DataType.DOUBLE) .dist(new NormalDistribution(0, 1.0)).seed(12345L).list() .layer(0, new SimpleRnn.Builder().nIn(nIn).nOut(2).activation(Activation.TANH).build()) .layer(1, new GravesBidirectionalLSTM.Builder().nIn(2).nOut(layerSize) .activation(Activation.TANH).build()) .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX).nIn(layerSize).nOut(nOut).build()) .build(); MultiLayerNetwork mln = new MultiLayerNetwork(conf); mln.init(); INDArray input = Nd4j.rand(new int[]{miniBatchSize, nIn, timeSeriesLength}, 'f').subi(0.5); INDArray labels = TestUtils.randomOneHotTimeSeries(miniBatchSize, nOut, timeSeriesLength); if (PRINT_RESULTS) { System.out.println("testBidirectionalLSTMMasking() - testNum = " + testNum++); } boolean gradOK = GradientCheckUtil.checkGradients(new GradientCheckUtil.MLNConfig().net(mln).input(input) .labels(labels).inputMask(mask).labelMask(mask).subset(true).maxPerParam(12)); assertTrue(gradOK); TestUtils.testModelSerialization(mln); } }
Example 20
Source File: Word2VecTestsSmall.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test(timeout = 300000) public void testW2VEmbeddingLayerInit() throws Exception { Nd4j.setDefaultDataTypes(DataType.FLOAT, DataType.FLOAT); val inputFile = Resources.asFile("big/raw_sentences.txt"); val iter = ParagraphVectorsTest.getIterator(isIntegrationTests(), inputFile); // val iter = new BasicLineIterator(inputFile); val t = new DefaultTokenizerFactory(); t.setTokenPreProcessor(new CommonPreprocessor()); Word2Vec vec = new Word2Vec.Builder() .minWordFrequency(1) .epochs(1) .layerSize(300) .limitVocabularySize(1) // Limit the vocab size to 2 words .windowSize(5) .allowParallelTokenization(true) .batchSize(512) .learningRate(0.025) .minLearningRate(0.0001) .negativeSample(0.0) .sampling(0.0) .useAdaGrad(false) .useHierarchicSoftmax(true) .iterations(1) .useUnknown(true) // Using UNK with limited vocab size causes the issue .seed(42) .iterate(iter) .workers(4) .tokenizerFactory(t).build(); vec.fit(); INDArray w = vec.lookupTable().getWeights(); System.out.println(w); MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(12345).list() .layer(new EmbeddingLayer.Builder().weightInit(vec).build()) .layer(new DenseLayer.Builder().activation(Activation.TANH).nIn(w.size(1)).nOut(3).build()) .layer(new OutputLayer.Builder().lossFunction(LossFunctions.LossFunction.MSE).nIn(3) .nOut(4).build()) .build(); final MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); INDArray w0 = net.getParam("0_W"); assertEquals(w, w0); ByteArrayOutputStream baos = new ByteArrayOutputStream(); ModelSerializer.writeModel(net, baos, true); byte[] bytes = baos.toByteArray(); ByteArrayInputStream bais = new ByteArrayInputStream(bytes); final MultiLayerNetwork restored = ModelSerializer.restoreMultiLayerNetwork(bais, true); assertEquals(net.getLayerWiseConfigurations(), restored.getLayerWiseConfigurations()); assertTrue(net.params().equalsWithEps(restored.params(), 2e-3)); }