Java Code Examples for org.deeplearning4j.nn.conf.ComputationGraphConfiguration#GraphBuilder
The following examples show how to use
org.deeplearning4j.nn.conf.ComputationGraphConfiguration#GraphBuilder .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Dl4jMlpClassifier.java From wekaDeeplearning4j with GNU General Public License v3.0 | 6 votes |
/** * Build the multilayer network defined by the networkconfiguration and the list of layers. */ protected void createModel() throws Exception { final INDArray features = getFirstBatchFeatures(trainData); ComputationGraphConfiguration.GraphBuilder gb = netConfig.builder().seed(getSeed()).graphBuilder(); // Set ouput size final Layer lastLayer = layers[layers.length - 1]; final int nOut = trainData.numClasses(); if (lastLayer instanceof FeedForwardLayer) { ((FeedForwardLayer) lastLayer).setNOut(nOut); } if (getInstanceIterator() instanceof CnnTextEmbeddingInstanceIterator) { makeCnnTextLayerSetup(gb); } else { makeDefaultLayerSetup(gb); } gb.setInputTypes(InputType.inferInputType(features)); ComputationGraphConfiguration conf = gb.build(); ComputationGraph model = new ComputationGraph(conf); model.init(); this.model = model; }
Example 2
Source File: DarknetHelper.java From deeplearning4j with Apache License 2.0 | 5 votes |
public static ComputationGraphConfiguration.GraphBuilder addLayers(ComputationGraphConfiguration.GraphBuilder graphBuilder, int layerNumber, String input, int filterSize, int nIn, int nOut, int poolSize, int poolStride) { graphBuilder .addLayer("convolution2d_" + layerNumber, new ConvolutionLayer.Builder(filterSize,filterSize) .nIn(nIn) .nOut(nOut) .weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same) .hasBias(false) .stride(1,1) .activation(Activation.IDENTITY) .build(), input) .addLayer("batchnormalization_" + layerNumber, new BatchNormalization.Builder() .nIn(nOut).nOut(nOut) .weightInit(WeightInit.XAVIER) .activation(Activation.IDENTITY) .build(), "convolution2d_" + layerNumber) .addLayer("activation_" + layerNumber, new ActivationLayer.Builder() .activation(new ActivationLReLU(0.1)) .build(), "batchnormalization_" + layerNumber); if (poolSize > 0) { graphBuilder .addLayer("maxpooling2d_" + layerNumber, new SubsamplingLayer.Builder() .kernelSize(poolSize, poolSize) .stride(poolStride, poolStride) .convolutionMode(ConvolutionMode.Same) .build(), "activation_" + layerNumber); } return graphBuilder; }
Example 3
Source File: FaceNetHelper.java From deeplearning4j with Apache License 2.0 | 5 votes |
public static ComputationGraphConfiguration.GraphBuilder appendGraph( ComputationGraphConfiguration.GraphBuilder graph, String moduleLayerName, int inputSize, int[] kernelSize, int[] kernelStride, int[] outputSize, int[] reduceSize, SubsamplingLayer.PoolingType poolingType, int poolSize, int poolStride, Activation transferFunction, String inputLayer) { return appendGraph(graph, moduleLayerName, inputSize, kernelSize, kernelStride, outputSize, reduceSize, poolingType, 0, poolSize, poolStride, transferFunction, inputLayer); }
Example 4
Source File: RnnSequenceClassifier.java From wekaDeeplearning4j with GNU General Public License v3.0 | 5 votes |
@Override protected void createModel() throws Exception { final INDArray features = getFirstBatchFeatures(trainData); log.info("Feature shape: {}", features.shape()); ComputationGraphConfiguration.GraphBuilder gb = netConfig .builder() .seed(getSeed()) .graphBuilder() .backpropType(BackpropType.TruncatedBPTT) .tBPTTBackwardLength(tBPTTbackwardLength) .tBPTTForwardLength(tBPTTforwardLength); // Set ouput size final Layer lastLayer = layers[layers.length - 1]; final int nOut = trainData.numClasses(); if (lastLayer.getBackend() instanceof RnnOutputLayer) { ((weka.dl4j.layers.RnnOutputLayer) lastLayer).setNOut(nOut); } String currentInput = "input"; gb.addInputs(currentInput); // Collect layers for (Layer layer : layers) { String lName = layer.getLayerName(); gb.addLayer(lName, layer.getBackend().clone(), currentInput); currentInput = lName; } gb.setOutputs(currentInput); gb.setInputTypes(InputType.inferInputType(features)); ComputationGraphConfiguration conf = gb.build(); ComputationGraph model = new ComputationGraph(conf); model.init(); this.model = model; }
Example 5
Source File: ComputationGraphSpace.java From deeplearning4j with Apache License 2.0 | 5 votes |
@Override public GraphConfiguration getValue(double[] values) { //Create ComputationGraphConfiguration... NeuralNetConfiguration.Builder builder = randomGlobalConf(values); ComputationGraphConfiguration.GraphBuilder graphBuilder = builder.graphBuilder(); graphBuilder.addInputs(this.networkInputs); graphBuilder.setOutputs(this.networkOutputs); if (inputTypes != null) graphBuilder.setInputTypes(inputTypes.getValue(values)); //Build/add our layers and vertices: for (LayerConf c : layerSpaces) { org.deeplearning4j.nn.conf.layers.Layer l = c.layerSpace.getValue(values); graphBuilder.addLayer(c.getLayerName(), l, c.getPreProcessor(), c.getInputs()); } for (VertexConf gv : vertices) { graphBuilder.addVertex(gv.getVertexName(), gv.getGraphVertex(), gv.getInputs()); } if (backpropType != null) graphBuilder.backpropType(backpropType.getValue(values)); if (tbpttFwdLength != null) graphBuilder.tBPTTForwardLength(tbpttFwdLength.getValue(values)); if (tbpttBwdLength != null) graphBuilder.tBPTTBackwardLength(tbpttBwdLength.getValue(values)); graphBuilder.validateOutputLayerConfig(validateOutputLayerConfig); ComputationGraphConfiguration configuration = graphBuilder.build(); if (trainingWorkspaceMode != null) configuration.setTrainingWorkspaceMode(trainingWorkspaceMode); if (inferenceWorkspaceMode != null) configuration.setInferenceWorkspaceMode(inferenceWorkspaceMode); return new GraphConfiguration(configuration, earlyStoppingConfiguration, numEpochs); }
Example 6
Source File: RnnSequenceClassifier.java From wekaDeeplearning4j with GNU General Public License v3.0 | 5 votes |
@Override protected void createModel() throws Exception { final INDArray features = getFirstBatchFeatures(trainData); log.info("Feature shape: {}", features.shape()); ComputationGraphConfiguration.GraphBuilder gb = netConfig .builder() .seed(getSeed()) .graphBuilder() .backpropType(BackpropType.TruncatedBPTT) .tBPTTBackwardLength(tBPTTbackwardLength) .tBPTTForwardLength(tBPTTforwardLength); // Set ouput size final Layer lastLayer = layers[layers.length - 1]; final int nOut = trainData.numClasses(); if (lastLayer.getBackend() instanceof RnnOutputLayer) { ((weka.dl4j.layers.RnnOutputLayer) lastLayer).setNOut(nOut); } String currentInput = "input"; gb.addInputs(currentInput); // Collect layers for (Layer layer : layers) { String lName = layer.getLayerName(); gb.addLayer(lName, layer.getBackend().clone(), currentInput); currentInput = lName; } gb.setOutputs(currentInput); gb.setInputTypes(InputType.inferInputType(features)); ComputationGraphConfiguration conf = gb.build(); ComputationGraph model = new ComputationGraph(conf); model.init(); this.model = model; }
Example 7
Source File: FaceNetHelper.java From deeplearning4j with Apache License 2.0 | 5 votes |
public static ComputationGraphConfiguration.GraphBuilder appendGraph( ComputationGraphConfiguration.GraphBuilder graph, String moduleLayerName, int inputSize, int[] kernelSize, int[] kernelStride, int[] outputSize, int[] reduceSize, SubsamplingLayer.PoolingType poolingType, Activation transferFunction, String inputLayer) { return appendGraph(graph, moduleLayerName, inputSize, kernelSize, kernelStride, outputSize, reduceSize, poolingType, 0, 3, 1, transferFunction, inputLayer); }
Example 8
Source File: NASNetHelper.java From deeplearning4j with Apache License 2.0 | 5 votes |
public static String sepConvBlock(ComputationGraphConfiguration.GraphBuilder graphBuilder, int filters, int kernelSize, int stride, String blockId, String input) { String prefix = "sepConvBlock"+blockId; graphBuilder .addLayer(prefix+"_act", new ActivationLayer(Activation.RELU), input) .addLayer(prefix+"_sepconv1", new SeparableConvolution2D.Builder(kernelSize, kernelSize).stride(stride, stride).nOut(filters).hasBias(false) .convolutionMode(ConvolutionMode.Same).build(), prefix+"_act") .addLayer(prefix+"_conv1_bn", new BatchNormalization.Builder().eps(1e-3).gamma(0.9997).build(), prefix+"_sepconv1") .addLayer(prefix+"_act2", new ActivationLayer(Activation.RELU), prefix+"_conv1_bn") .addLayer(prefix+"_sepconv2", new SeparableConvolution2D.Builder(kernelSize, kernelSize).stride(stride, stride).nOut(filters).hasBias(false) .convolutionMode(ConvolutionMode.Same).build(), prefix+"_act2") .addLayer(prefix+"_conv2_bn", new BatchNormalization.Builder().eps(1e-3).gamma(0.9997).build(), prefix+"_sepconv2"); return prefix+"_conv2_bn"; }
Example 9
Source File: FaceNetHelper.java From deeplearning4j with Apache License 2.0 | 5 votes |
public static ComputationGraphConfiguration.GraphBuilder appendGraph( ComputationGraphConfiguration.GraphBuilder graph, String moduleLayerName, int inputSize, int[] kernelSize, int[] kernelStride, int[] outputSize, int[] reduceSize, SubsamplingLayer.PoolingType poolingType, int pNorm, Activation transferFunction, String inputLayer) { return appendGraph(graph, moduleLayerName, inputSize, kernelSize, kernelStride, outputSize, reduceSize, poolingType, pNorm, 3, 1, transferFunction, inputLayer); }
Example 10
Source File: DarknetHelper.java From deeplearning4j with Apache License 2.0 | 5 votes |
public static ComputationGraphConfiguration.GraphBuilder addLayers(ComputationGraphConfiguration.GraphBuilder graphBuilder, int layerNumber, int filterSize, int nIn, int nOut, int poolSize, int poolStride) { String input = "maxpooling2d_" + (layerNumber - 1); if (!graphBuilder.getVertices().containsKey(input)) { input = "activation_" + (layerNumber - 1); } if (!graphBuilder.getVertices().containsKey(input)) { input = "concatenate_" + (layerNumber - 1); } if (!graphBuilder.getVertices().containsKey(input)) { input = "input"; } return addLayers(graphBuilder, layerNumber, input, filterSize, nIn, nOut, poolSize, poolStride); }
Example 11
Source File: InceptionResNetHelper.java From deeplearning4j with Apache License 2.0 | 4 votes |
/** * Append Inception-ResNet B to a computation graph. * @param graph * @param blockName * @param scale * @param activationScale * @param input * @return */ public static ComputationGraphConfiguration.GraphBuilder inceptionV1ResB( ComputationGraphConfiguration.GraphBuilder graph, String blockName, int scale, double activationScale, String input) { // first add the RELU activation layer graph.addLayer(nameLayer(blockName, "activation1", 0), new ActivationLayer.Builder().activation(Activation.TANH).build(), input); // loop and add each subsequent resnet blocks String previousBlock = nameLayer(blockName, "activation1", 0); for (int i = 1; i <= scale; i++) { graph // 1x1 .addLayer(nameLayer(blockName, "cnn1", i), new ConvolutionLayer.Builder(new int[] {1, 1}) .convolutionMode(ConvolutionMode.Same).nIn(576).nOut(128) .cudnnAlgoMode(ConvolutionLayer.AlgoMode.NO_WORKSPACE) .build(), previousBlock) .addLayer(nameLayer(blockName, "batch1", i), new BatchNormalization.Builder(false).decay(0.995).eps(0.001).nIn(128) .nOut(128).build(), nameLayer(blockName, "cnn1", i)) // 1x1 -> 3x3 -> 3x3 .addLayer(nameLayer(blockName, "cnn2", i), new ConvolutionLayer.Builder(new int[] {1, 1}) .convolutionMode(ConvolutionMode.Same).nIn(576).nOut(128) .cudnnAlgoMode(ConvolutionLayer.AlgoMode.NO_WORKSPACE) .build(), previousBlock) .addLayer(nameLayer(blockName, "batch2", i), new BatchNormalization.Builder(false).decay(0.995).eps(0.001).nIn(128) .nOut(128).build(), nameLayer(blockName, "cnn2", i)) .addLayer(nameLayer(blockName, "cnn3", i), new ConvolutionLayer.Builder(new int[] {1, 3}) .convolutionMode(ConvolutionMode.Same).nIn(128).nOut(128) .cudnnAlgoMode(ConvolutionLayer.AlgoMode.NO_WORKSPACE) .build(), nameLayer(blockName, "batch2", i)) .addLayer(nameLayer(blockName, "batch3", i), new BatchNormalization.Builder(false).decay(0.995).eps(0.001).nIn(128) .nOut(128).build(), nameLayer(blockName, "cnn3", i)) .addLayer(nameLayer(blockName, "cnn4", i), new ConvolutionLayer.Builder(new int[] {3, 1}) .convolutionMode(ConvolutionMode.Same).nIn(128).nOut(128) .cudnnAlgoMode(ConvolutionLayer.AlgoMode.NO_WORKSPACE) .build(), nameLayer(blockName, "batch3", i)) .addLayer(nameLayer(blockName, "batch4", i), new BatchNormalization.Builder(false).decay(0.995).eps(0.001).nIn(128) .nOut(128).build(), nameLayer(blockName, "cnn4", i)) // --> 1x1 --> scaling --> .addVertex(nameLayer(blockName, "merge1", i), new MergeVertex(), nameLayer(blockName, "batch1", i), nameLayer(blockName, "batch4", i)) .addLayer(nameLayer(blockName, "cnn5", i), new ConvolutionLayer.Builder(new int[] {1, 1}) .convolutionMode(ConvolutionMode.Same).nIn(256).nOut(576) .cudnnAlgoMode(ConvolutionLayer.AlgoMode.NO_WORKSPACE) .build(), nameLayer(blockName, "merge1", i)) .addLayer(nameLayer(blockName, "batch5", i), new BatchNormalization.Builder(false).decay(0.995).eps(0.001).nIn(576) .nOut(576).build(), nameLayer(blockName, "cnn5", i)) .addVertex(nameLayer(blockName, "scaling", i), new ScaleVertex(activationScale), nameLayer(blockName, "batch5", i)) // --> .addLayer(nameLayer(blockName, "shortcut-identity", i), new ActivationLayer.Builder().activation(Activation.IDENTITY).build(), previousBlock) .addVertex(nameLayer(blockName, "shortcut", i), new ElementWiseVertex(ElementWiseVertex.Op.Add), nameLayer(blockName, "scaling", i), nameLayer(blockName, "shortcut-identity", i)); // leave the last vertex as the block name for convenience if (i == scale) graph.addLayer(blockName, new ActivationLayer.Builder().activation(Activation.TANH).build(), nameLayer(blockName, "shortcut", i)); else graph.addLayer(nameLayer(blockName, "activation", i), new ActivationLayer.Builder().activation(Activation.TANH).build(), nameLayer(blockName, "shortcut", i)); previousBlock = nameLayer(blockName, "activation", i); } return graph; }
Example 12
Source File: DarknetHelper.java From deeplearning4j with Apache License 2.0 | 4 votes |
public static ComputationGraphConfiguration.GraphBuilder addLayers(ComputationGraphConfiguration.GraphBuilder graphBuilder, int layerNumber, int filterSize, int nIn, int nOut, int poolSize) { return addLayers(graphBuilder, layerNumber, filterSize, nIn, nOut, poolSize, poolSize); }
Example 13
Source File: LocallyConnectedLayerTest.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testLocallyConnected(){ for (DataType globalDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) { Nd4j.setDefaultDataTypes(globalDtype, globalDtype); for (DataType networkDtype : new DataType[]{DataType.DOUBLE, DataType.FLOAT, DataType.HALF}) { assertEquals(globalDtype, Nd4j.dataType()); assertEquals(globalDtype, Nd4j.defaultFloatingPointType()); for (int test = 0; test < 2; test++) { String msg = "Global dtype: " + globalDtype + ", network dtype: " + networkDtype + ", test=" + test; ComputationGraphConfiguration.GraphBuilder b = new NeuralNetConfiguration.Builder() .dataType(networkDtype) .seed(123) .updater(new NoOp()) .weightInit(WeightInit.XAVIER) .convolutionMode(ConvolutionMode.Same) .graphBuilder(); INDArray[] in; INDArray label; switch (test){ case 0: b.addInputs("in") .addLayer("1", new LSTM.Builder().nOut(5).build(), "in") .addLayer("2", new LocallyConnected1D.Builder().kernelSize(2).nOut(4).build(), "1") .addLayer("out", new RnnOutputLayer.Builder().nOut(10).build(), "2") .setOutputs("out") .setInputTypes(InputType.recurrent(5, 4)); in = new INDArray[]{Nd4j.rand(networkDtype, 2, 5, 4)}; label = TestUtils.randomOneHotTimeSeries(2, 10, 4).castTo(networkDtype); break; case 1: b.addInputs("in") .addLayer("1", new ConvolutionLayer.Builder().kernelSize(2,2).nOut(5).convolutionMode(ConvolutionMode.Same).build(), "in") .addLayer("2", new LocallyConnected2D.Builder().kernelSize(2,2).nOut(5).build(), "1") .addLayer("out", new OutputLayer.Builder().nOut(10).build(), "2") .setOutputs("out") // .setInputTypes(InputType.convolutional(28, 28, 1)); // in = new INDArray[]{Nd4j.rand(networkDtype, 2, 1, 28, 28)}; .setInputTypes(InputType.convolutional(8, 8, 1)); in = new INDArray[]{Nd4j.rand(networkDtype, 2, 1, 8, 8)}; label = TestUtils.randomOneHot(2, 10).castTo(networkDtype); break; default: throw new RuntimeException(); } ComputationGraph net = new ComputationGraph(b.build()); net.init(); INDArray out = net.outputSingle(in); assertEquals(msg, networkDtype, out.dataType()); Map<String, INDArray> ff = net.feedForward(in, false); for (Map.Entry<String, INDArray> e : ff.entrySet()) { if (e.getKey().equals("in")) continue; String s = msg + " - layer: " + e.getKey(); assertEquals(s, networkDtype, e.getValue().dataType()); } net.setInputs(in); net.setLabels(label); net.computeGradientAndScore(); net.fit(new MultiDataSet(in, new INDArray[]{label})); } } } }
Example 14
Source File: FaceNetSmallV2Model.java From Java-Machine-Learning-for-Computer-Vision with MIT License | 4 votes |
public ComputationGraphConfiguration conf() { ComputationGraphConfiguration.GraphBuilder graph = new NeuralNetConfiguration.Builder().seed(seed) .activation(Activation.IDENTITY) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(updater) .weightInit(WeightInit.RELU) .l2(5e-5) .miniBatch(true) .graphBuilder(); graph.addInputs("input1") .addLayer("pad1", zeroPadding(3), "input1") .addLayer("conv1", convolution(7, inputShape[0], 64, 2), "pad1") .addLayer("bn1", batchNorm(64), "conv1") .addLayer(nextReluId(), relu(), "bn1") .addLayer("pad2", zeroPadding(1), lastReluId()) // pool -> norm .addLayer("pool1", new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[]{3, 3}, new int[]{2, 2}) .convolutionMode(ConvolutionMode.Truncate) .build(), "pad2") // Inception 2 .addLayer("conv2", convolution(1, 64, 64), "pool1") .addLayer("bn2", batchNorm(64), "conv2") .addLayer(nextReluId(), relu(), "bn2") .addLayer("pad3", zeroPadding(1), lastReluId()) .addLayer("conv3", convolution(3, 64, 192), "pad3") .addLayer("bn3", batchNorm(192), "conv3") .addLayer(nextReluId(), relu(), "bn3") .addLayer("pad4", zeroPadding(1), lastReluId()) .addLayer("pool2", new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[]{3, 3}, new int[]{2, 2}) .convolutionMode(ConvolutionMode.Truncate) .build(), "pad4"); buildBlock3a(graph); buildBlock3b(graph); buildBlock3c(graph); buildBlock4a(graph); buildBlock4e(graph); buildBlock5a(graph); buildBlock5b(graph); graph.addLayer("avgpool", new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.AVG, new int[]{3, 3}, new int[]{1, 1}) .convolutionMode(ConvolutionMode.Truncate) .build(), "inception_5b") .addLayer("dense", new DenseLayer.Builder().nIn(736).nOut(encodings) .activation(Activation.IDENTITY).build(), "avgpool") .addVertex("encodings", new L2NormalizeVertex(new int[]{}, 1e-12), "dense") .setInputTypes(InputType.convolutional(96, 96, inputShape[0])).pretrain(true); /* Uncomment in case of training the network, graph.setOutputs should be lossLayer then .addLayer("lossLayer", new CenterLossOutputLayer.Builder() .lossFunction(LossFunctions.LossFunction.SQUARED_LOSS) .activation(Activation.SOFTMAX).nIn(128).nOut(numClasses).lambda(1e-4).alpha(0.9) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer).build(), "embeddings")*/ graph.setOutputs("encodings"); return graph.build(); }
Example 15
Source File: ActorCriticFactoryCompGraphStdConv.java From deeplearning4j with Apache License 2.0 | 4 votes |
public ActorCriticCompGraph buildActorCritic(int shapeInputs[], int numOutputs) { if (shapeInputs.length == 1) throw new AssertionError("Impossible to apply convolutional layer on a shape == 1"); int h = (((shapeInputs[1] - 8) / 4 + 1) - 4) / 2 + 1; int w = (((shapeInputs[2] - 8) / 4 + 1) - 4) / 2 + 1; ComputationGraphConfiguration.GraphBuilder confB = new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(conf.getUpdater() != null ? conf.getUpdater() : new Adam()) .weightInit(WeightInit.XAVIER) .l2(conf.getL2()).graphBuilder() .addInputs("input").addLayer("0", new ConvolutionLayer.Builder(8, 8).nIn(shapeInputs[0]).nOut(16) .stride(4, 4).activation(Activation.RELU).build(), "input"); confB.addLayer("1", new ConvolutionLayer.Builder(4, 4).nIn(16).nOut(32).stride(2, 2).activation(Activation.RELU).build(), "0"); confB.addLayer("2", new DenseLayer.Builder().nIn(w * h * 32).nOut(256).activation(Activation.RELU).build(), "1"); if (conf.isUseLSTM()) { confB.addLayer("3", new LSTM.Builder().nIn(256).nOut(256).activation(Activation.TANH).build(), "2"); confB.addLayer("value", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY) .nIn(256).nOut(1).build(), "3"); confB.addLayer("softmax", new RnnOutputLayer.Builder(new ActorCriticLoss()).activation(Activation.SOFTMAX) .nIn(256).nOut(numOutputs).build(), "3"); } else { confB.addLayer("value", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY) .nIn(256).nOut(1).build(), "2"); confB.addLayer("softmax", new OutputLayer.Builder(new ActorCriticLoss()).activation(Activation.SOFTMAX) .nIn(256).nOut(numOutputs).build(), "2"); } confB.setOutputs("value", "softmax"); if (conf.isUseLSTM()) { confB.inputPreProcessor("0", new RnnToCnnPreProcessor(shapeInputs[1], shapeInputs[2], shapeInputs[0])); confB.inputPreProcessor("2", new CnnToFeedForwardPreProcessor(h, w, 32)); confB.inputPreProcessor("3", new FeedForwardToRnnPreProcessor()); } else { confB.setInputTypes(InputType.convolutional(shapeInputs[1], shapeInputs[2], shapeInputs[0])); } ComputationGraphConfiguration cgconf = confB.build(); ComputationGraph model = new ComputationGraph(cgconf); model.init(); if (conf.getListeners() != null) { model.setListeners(conf.getListeners()); } else { model.setListeners(new ScoreIterationListener(Constants.NEURAL_NET_ITERATION_LISTENER)); } return new ActorCriticCompGraph(model); }
Example 16
Source File: ActorCriticFactoryCompGraphStdDense.java From deeplearning4j with Apache License 2.0 | 4 votes |
public ActorCriticCompGraph buildActorCritic(int[] numInputs, int numOutputs) { int nIn = 1; for (int i : numInputs) { nIn *= i; } ComputationGraphConfiguration.GraphBuilder confB = new NeuralNetConfiguration.Builder().seed(Constants.NEURAL_NET_SEED) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(conf.getUpdater() != null ? conf.getUpdater() : new Adam()) .weightInit(WeightInit.XAVIER) .l2(conf.getL2()).graphBuilder() .setInputTypes(conf.isUseLSTM() ? InputType.recurrent(nIn) : InputType.feedForward(nIn)).addInputs("input") .addLayer("0", new DenseLayer.Builder().nIn(nIn) .nOut(conf.getNumHiddenNodes()).activation(Activation.RELU).build(), "input"); for (int i = 1; i < conf.getNumLayers(); i++) { confB.addLayer(i + "", new DenseLayer.Builder().nIn(conf.getNumHiddenNodes()).nOut(conf.getNumHiddenNodes()) .activation(Activation.RELU).build(), (i - 1) + ""); } if (conf.isUseLSTM()) { confB.addLayer(getConf().getNumLayers() + "", new LSTM.Builder().activation(Activation.TANH) .nOut(conf.getNumHiddenNodes()).build(), (getConf().getNumLayers() - 1) + ""); confB.addLayer("value", new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY) .nOut(1).build(), getConf().getNumLayers() + ""); confB.addLayer("softmax", new RnnOutputLayer.Builder(new ActorCriticLoss()).activation(Activation.SOFTMAX) .nOut(numOutputs).build(), getConf().getNumLayers() + ""); } else { confB.addLayer("value", new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY) .nOut(1).build(), (getConf().getNumLayers() - 1) + ""); confB.addLayer("softmax", new OutputLayer.Builder(new ActorCriticLoss()).activation(Activation.SOFTMAX) .nOut(numOutputs).build(), (getConf().getNumLayers() - 1) + ""); } confB.setOutputs("value", "softmax"); ComputationGraphConfiguration cgconf = confB.build(); ComputationGraph model = new ComputationGraph(cgconf); model.init(); if (conf.getListeners() != null) { model.setListeners(conf.getListeners()); } else { model.setListeners(new ScoreIterationListener(Constants.NEURAL_NET_ITERATION_LISTENER)); } return new ActorCriticCompGraph(model); }
Example 17
Source File: NASNetHelper.java From deeplearning4j with Apache License 2.0 | 4 votes |
public static String adjustBlock(ComputationGraphConfiguration.GraphBuilder graphBuilder, int filters, String blockId, String input) { return adjustBlock(graphBuilder, filters, blockId, input, null); }
Example 18
Source File: NASNetHelper.java From deeplearning4j with Apache License 2.0 | 4 votes |
public static Pair<String, String> reductionA(ComputationGraphConfiguration.GraphBuilder graphBuilder, int filters, String blockId, String inputX, String inputP) { String prefix = "reductionA"+blockId; String topAdjust = adjustBlock(graphBuilder, filters, prefix, inputP, inputX); // top block graphBuilder .addLayer(prefix+"_relu1", new ActivationLayer(Activation.RELU), topAdjust) .addLayer(prefix+"_conv1", new ConvolutionLayer.Builder(1,1).stride(1,1).nOut(filters).hasBias(false) .convolutionMode(ConvolutionMode.Same).build(), prefix+"_relu1") .addLayer(prefix+"_bn1", new BatchNormalization.Builder().eps(1e-3).gamma(0.9997) .build(), prefix+"_conv1"); // block 1 String left1 = sepConvBlock(graphBuilder, filters, 5, 2, prefix+"_left1", prefix+"_bn1"); String right1 = sepConvBlock(graphBuilder, filters, 7, 2, prefix+"_right1", topAdjust); graphBuilder.addVertex(prefix+"_add1", new ElementWiseVertex(ElementWiseVertex.Op.Add), left1, right1); // block 2 graphBuilder.addLayer(prefix+"_left2", new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX).kernelSize(3,3).stride(2,2) .convolutionMode(ConvolutionMode.Same).build(), prefix+"_bn1"); String right2 = sepConvBlock(graphBuilder, filters, 3, 1, prefix+"_right2", topAdjust); graphBuilder.addVertex(prefix+"_add2", new ElementWiseVertex(ElementWiseVertex.Op.Add), prefix+"_left2", right2); // block 3 graphBuilder.addLayer(prefix+"_left3", new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.AVG).kernelSize(3,3).stride(2,2) .convolutionMode(ConvolutionMode.Same).build(), prefix+"_bn1"); String right3 = sepConvBlock(graphBuilder, filters, 5, 2, prefix+"_right3", topAdjust); graphBuilder.addVertex(prefix+"_add3", new ElementWiseVertex(ElementWiseVertex.Op.Add), prefix+"_left3", right3); // block 4 graphBuilder .addLayer(prefix+"_left4", new SubsamplingLayer.Builder(PoolingType.AVG).kernelSize(3,3).stride(1,1) .convolutionMode(ConvolutionMode.Same).build(), prefix+"_add1") .addVertex(prefix+"_add4", new ElementWiseVertex(ElementWiseVertex.Op.Add), prefix+"_add2", prefix+"_left4"); // block 5 String left5 = sepConvBlock(graphBuilder, filters, 3, 2, prefix+"_left5", prefix+"_add1"); graphBuilder .addLayer(prefix+"_right5", new SubsamplingLayer.Builder(PoolingType.MAX).kernelSize(3,3).stride(2,2) .convolutionMode(ConvolutionMode.Same).build(), prefix+"_bn1") .addVertex(prefix+"_add5", new ElementWiseVertex(ElementWiseVertex.Op.Add), left5, prefix+"_right5"); // output graphBuilder.addVertex(prefix, new MergeVertex(), prefix+"_add2", prefix+"_add3", prefix+"_add4", prefix+"_add5"); return new Pair<>(prefix, inputX); }
Example 19
Source File: ResNetwork.java From FancyBing with GNU General Public License v3.0 | 4 votes |
public ComputationGraph init() { ComputationGraphConfiguration.GraphBuilder graph = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(iterations) .activation(Activation.LEAKYRELU) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer) .lrPolicyDecayRate(0.5) .learningRateDecayPolicy(LearningRatePolicy.Score) .updater(Adam.builder().build()) .weightInit(WeightInit.XAVIER) .learningRate(0.02) .miniBatch(miniBatch) .convolutionMode(ConvolutionMode.Truncate) .trainingWorkspaceMode(WorkspaceMode.SINGLE) .inferenceWorkspaceMode(WorkspaceMode.SINGLE) .graphBuilder(); // set input & output graph .addInputs("input").setInputTypes(InputType.convolutionalFlat(height, width, channels)) .addLayer("policy", new OutputLayer.Builder() .lossFunction(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .activation(Activation.SOFTMAX) .nOut(numClasses).build(), "embeddings_c") .addLayer("value1", new OutputLayer.Builder() .lossFunction(LossFunctions.LossFunction.MSE) .activation(Activation.TANH) .nOut(1).build(), "embeddings_r1") // .addLayer("value2", new OutputLayer.Builder() // .lossFunction(LossFunctions.LossFunction.MSE) // .activation(Activation.TANH) // .nOut(1).build(), "embeddings_r2") .setOutputs("policy", "value1", "value2") .backprop(true).pretrain(false); int kernelSize = 128; graph.addLayer("c-layer0", new ConvolutionLayer.Builder(new int[]{3,3}, new int[]{1,1}, new int[]{1,1}).activation(Activation.LEAKYRELU).nOut(kernelSize).build(), "input"); int blockNum = 8; String prevLayer = "c-layer0"; for (int i = 1; i <= blockNum; i++) { String layerName = "c-block" + i + "-"; graph.addLayer(layerName + "1", new ConvolutionLayer.Builder(new int[]{3,3}, new int[]{1,1}, new int[]{1,1}).activation(Activation.LEAKYRELU).nOut(kernelSize).build(), prevLayer); graph.addLayer(layerName + "2", new ConvolutionLayer.Builder(new int[]{3,3}, new int[]{1,1}, new int[]{1,1}).activation(Activation.IDENTITY).nOut(kernelSize).build(), layerName + "1"); graph.addVertex("shortcut" + i, new ElementWiseVertex(ElementWiseVertex.Op.Add), layerName + "2", prevLayer); graph.addLayer(layerName + "3", new ActivationLayer.Builder().activation(Activation.LEAKYRELU).build(), "shortcut" + i); prevLayer = layerName + "3"; } // for classification graph.addLayer("embeddings_c", new ConvolutionLayer.Builder(new int[]{3,3}, new int[]{1,1}, new int[]{1,1}).activation(Activation.IDENTITY).nOut(2).build(), prevLayer); // for value regression graph.addLayer("reg-c-layer1", new ConvolutionLayer.Builder(new int[]{3,3}, new int[]{1,1}, new int[]{1,1}).activation(Activation.IDENTITY).nOut(1).build(), prevLayer); graph.addLayer("embeddings_r1", new DenseLayer.Builder().activation(Activation.IDENTITY).nOut(256).build(), "reg-c-layer1"); // graph.addLayer("reg-c-layer2", new ConvolutionLayer.Builder(new int[]{3,3}, new int[]{1,1}, new int[]{1,1}).activation(Activation.IDENTITY).nOut(1).build(), prevLayer); // graph.addLayer("embeddings_r2", new DenseLayer.Builder().activation(Activation.IDENTITY).nOut(256).build(), "reg-c-layer2"); ComputationGraphConfiguration conf = graph.build(); ComputationGraph model = new ComputationGraph(conf); model.init(); log.info("\nNumber of params: " + model.numParams()+"\n"); return model; }
Example 20
Source File: TestLastTimeStepLayer.java From deeplearning4j with Apache License 2.0 | 4 votes |
@Test public void testMaskingAndAllMasked(){ ComputationGraphConfiguration.GraphBuilder builder = new NeuralNetConfiguration.Builder() .optimizationAlgo(STOCHASTIC_GRADIENT_DESCENT) .weightInit(XAVIER_UNIFORM) .activation(TANH) .updater(new AdaGrad(0.01)) .l2(0.0001) .seed(1234) .graphBuilder() .addInputs("in") .setInputTypes(InputType.recurrent(1, rnnDataFormat)) .addLayer("RNN", new LastTimeStep(new LSTM.Builder() .nOut(10).dataFormat(rnnDataFormat) .build()), "in") .addLayer("dense", new DenseLayer.Builder() .nOut(10) .build(), "RNN") .addLayer("out", new OutputLayer.Builder() .activation(IDENTITY) .lossFunction(MSE) .nOut(10) .build(), "dense") .setOutputs("out"); ComputationGraphConfiguration conf = builder.build(); ComputationGraph cg = new ComputationGraph(conf); cg.init(); INDArray f = Nd4j.rand(new long[]{1,1,24}); INDArray fm1 = Nd4j.ones(1,24); INDArray fm2 = Nd4j.zeros(1,24); INDArray fm3 = Nd4j.zeros(1,24); fm3.get(NDArrayIndex.point(0), NDArrayIndex.interval(0,5)).assign(1); if (rnnDataFormat == RNNFormat.NWC){ f = f.permute(0, 2, 1); } INDArray[] out1 = cg.output(false, new INDArray[]{f}, new INDArray[]{fm1}); try { cg.output(false, new INDArray[]{f}, new INDArray[]{fm2}); fail("Expected exception"); } catch (Exception e){ assertTrue(e.getMessage().contains("mask is all 0s")); } INDArray[] out3 = cg.output(false, new INDArray[]{f}, new INDArray[]{fm3}); System.out.println(out1[0]); System.out.println(out3[0]); assertNotEquals(out1[0], out3[0]); }