Java Code Examples for org.deeplearning4j.nn.workspace.LayerWorkspaceMgr#getWorkspaceName()

The following examples show how to use org.deeplearning4j.nn.workspace.LayerWorkspaceMgr#getWorkspaceName() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SameDiffLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray activate(boolean training, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(false);

    try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) {
        if (sameDiff == null) {
            doInit();
        }
    }

    org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer bl = (org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer) layerConf();
    bl.validateInput(input);

    Map<String,INDArray> phMap = new HashMap<>();
    phMap.put(INPUT_KEY, input);
    if(maskArray != null){
        phMap.put(MASK_KEY, maskArray);
    } else {
        phMap.put(MASK_KEY, layerConf().onesMaskForInput(input));
    }

    //Configure memory management for SameDiff instance - use DL4J workspaces
    String wsNameWorking = workspaceMgr.getWorkspaceName(ArrayType.FF_WORKING_MEM);
    String wsNameOutput = workspaceMgr.getWorkspaceName(ArrayType.ACTIVATIONS);
    WorkspaceConfiguration confWorking = workspaceMgr.getConfiguration(ArrayType.FF_WORKING_MEM);
    WorkspaceConfiguration confOutput = workspaceMgr.getConfiguration(ArrayType.ACTIVATIONS);
    boolean actScopedOut = workspaceMgr.isScopedOut(ArrayType.ACTIVATIONS);
    Preconditions.checkState(actScopedOut || wsNameOutput != null, "Activations must have a workspace or must be scoped out");
    SessionMemMgr mmgr = new DL4JSameDiffMemoryMgr(wsNameWorking, wsNameOutput, confWorking, confOutput);

    InferenceSession is = sameDiff.getSessions().get(Thread.currentThread().getId());
    if(is == null){
        is = new InferenceSession(sameDiff);
        sameDiff.getSessions().put(Thread.currentThread().getId(), is);
    }
    is.setMmgr(mmgr);

    Map<String,INDArray> out = sameDiff.output(phMap, outputKey);
    INDArray result = out.get(outputKey);

    //Edge case - identity activation
    //TODO there may be a cleaner way to do this...
    if(!actScopedOut && !result.data().getParentWorkspace().getId().equals(wsNameOutput)){
        result = workspaceMgr.dup(ArrayType.ACTIVATIONS, result);
    } else if(actScopedOut && result.isAttached()){
        result = result.detach();
    }


    //Clear placeholders and op inputs to ensure no out-of-scope arrays are still referenced anywhere
    sameDiff.clearPlaceholders(true);
    sameDiff.clearOpInputs();

    return result;
}
 
Example 2
Source File: SameDiffLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(true);

    Gradient g = new DefaultGradient();

    INDArray dLdIn;

    try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) {
        if (sameDiff == null) {
            doInit();
        }
        if (!sameDiff.hasGradientFunction()) {
            //Create when scoped out, to ensure any arrays are not in WS
            sameDiff.createGradFunction(INPUT_KEY);
        }
    }
    //Configure memory management for SameDiff instance - use DL4J workspaces
    Map<Long,InferenceSession> sessionMap = sameDiff.getFunction("grad").getSessions();
    if(!sessionMap.containsKey(Thread.currentThread().getId())){
        sessionMap.put(Thread.currentThread().getId(), new InferenceSession(sameDiff.getFunction("grad")));
    }
    String wsNameWorking = workspaceMgr.getWorkspaceName(ArrayType.BP_WORKING_MEM);
    String wsNameActGrad = workspaceMgr.getWorkspaceName(ArrayType.ACTIVATION_GRAD);
    WorkspaceConfiguration confWorking = workspaceMgr.getConfiguration(ArrayType.BP_WORKING_MEM);
    WorkspaceConfiguration confOutput = workspaceMgr.getConfiguration(ArrayType.ACTIVATION_GRAD);

    boolean actGradScopedOut = workspaceMgr.isScopedOut(ArrayType.ACTIVATION_GRAD);
    Preconditions.checkState(actGradScopedOut || wsNameActGrad != null, "Activation gradients must have a workspace or be scoped out");
    SessionMemMgr mmgr = new DL4JSameDiffMemoryMgr(wsNameWorking, wsNameActGrad, confWorking, confOutput);
    sessionMap.get(Thread.currentThread().getId()).setMmgr(mmgr);


    org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer bl = (org.deeplearning4j.nn.conf.layers.samediff.SameDiffLayer) layerConf();
    bl.validateInput(input);

    Map<String,INDArray> phMap = new HashMap<>();
    phMap.put(INPUT_KEY, input);
    phMap.put(fn.getGradPlaceholderName(), epsilon);
    if(maskArray != null){
        phMap.put(MASK_KEY, maskArray);
    } else {
        phMap.put(MASK_KEY, layerConf().onesMaskForInput(input));
    }

    List<String> requiredGrads = new ArrayList<>(paramTable.size() + 1);
    requiredGrads.add(INPUT_KEY);
    requiredGrads.addAll(paramTable.keySet());

    Map<String,INDArray> m = sameDiff.calculateGradients(phMap, requiredGrads);
    for(String s : paramTable.keySet() ){
        INDArray sdGrad = m.get(s);
        INDArray dl4jGrad = gradTable.get(s);
        dl4jGrad.assign(sdGrad);                                            //TODO OPTIMIZE THIS
        g.gradientForVariable().put(s, dl4jGrad);
    }

    dLdIn = m.get(INPUT_KEY);


    //Clear placeholders and op inputs to ensure no out-of-scope arrays are still referenced anywhere
    sameDiff.clearPlaceholders(true);
    sameDiff.clearOpInputs();

    Pair<Gradient, INDArray> ret = new Pair<>(g, workspaceMgr.dup(ArrayType.ACTIVATION_GRAD, dLdIn));   //TODO OPTIMIZE THIS
    return ret;
}
 
Example 3
Source File: SameDiffOutputLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
private INDArray activateHelper(boolean activations, LayerWorkspaceMgr workspaceMgr){
    assertInputSet(false);

    //Check where the output occurs. If it's a simple loss layer (no params) this could
    // just be the input!
    if(activations && INPUT_KEY.equals(layerConf().activationsVertexName())){
        return workspaceMgr.leverageTo(ArrayType.ACTIVATIONS, input);
    }

    //TODO optimize
    try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) {
        if (sameDiff == null) {
            doInit();
        }
    }

    //Configure memory management for SameDiff instance - use DL4J workspaces
    String wsNameWorking = workspaceMgr.getWorkspaceName(ArrayType.FF_WORKING_MEM);
    String wsNameOutput = workspaceMgr.getWorkspaceName(ArrayType.ACTIVATIONS);
    WorkspaceConfiguration confWorking = workspaceMgr.getConfiguration(ArrayType.FF_WORKING_MEM);
    WorkspaceConfiguration confOutput = workspaceMgr.getConfiguration(ArrayType.ACTIVATIONS);
    boolean actScopedOut = workspaceMgr.isScopedOut(ArrayType.ACTIVATIONS);
    Preconditions.checkState(actScopedOut || wsNameOutput != null, "Activations must have a workspace or must be scoped out");
    SessionMemMgr mmgr = new DL4JSameDiffMemoryMgr(wsNameWorking, wsNameOutput, confWorking, confOutput);

    InferenceSession is = sameDiff.getSessions().get(Thread.currentThread().getId());
    if(is == null){
        is = new InferenceSession(sameDiff);
        sameDiff.getSessions().put(Thread.currentThread().getId(), is);
    }
    is.setMmgr(mmgr);

    Map<String,INDArray> phMap = new HashMap<>();
    phMap.put(INPUT_KEY, input);
    if(!activations && layerConf().labelsRequired() && labels != null) {
        phMap.put(LABELS_KEY, labels);
    }

    String s = activations ? layerConf().activationsVertexName() : outputVar.name();

    INDArray out = sameDiff.outputSingle(phMap, s);

    //Clear placeholders and op inputs to ensure no out-of-scope arrays are still referenced anywhere
    sameDiff.clearPlaceholders(true);
    sameDiff.clearOpInputs();

    //Edge case: vertex is just an Identity function, for example
    //TODO there may be a cleaner way to do this...
    if(!actScopedOut && !out.data().getParentWorkspace().getId().equals(wsNameOutput)){
        out = workspaceMgr.dup(ArrayType.ACTIVATIONS, out);
    } else if(actScopedOut && out.isAttached()){
        out = out.detach();
    }

    return out;
}
 
Example 4
Source File: SameDiffOutputLayer.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public Pair<Gradient, INDArray> backpropGradient(INDArray epsilon, LayerWorkspaceMgr workspaceMgr) {
    assertInputSet(true);
    Preconditions.checkState(!layerConf().labelsRequired() || labels != null, "Cannot execute backprop: Labels are not set. " +
            "If labels are not required for this SameDiff output layer, override SameDiffOutputLayer.labelsRequired()" +
            " to return false instead");
    Gradient g = new DefaultGradient();

    INDArray dLdIn;
    try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) {
        if (sameDiff == null) {
            //Usually doInit will be called in forward pass; not necessarily the case in output layers
            // (for efficiency, we skip output layer forward pass in MultiLayerNetwork/ComputationGraph)
            doInit();
        }
        if(sameDiff.getFunction("grad") == null)
            sameDiff.createGradFunction(INPUT_KEY);
    }

    //Configure memory management for SameDiff instance - use DL4J workspaces
    Map<Long,InferenceSession> sessionMap = sameDiff.getFunction("grad").getSessions();
    if(!sessionMap.containsKey(Thread.currentThread().getId())){
        sessionMap.put(Thread.currentThread().getId(), new InferenceSession(sameDiff.getFunction("grad")));
    }
    String wsNameWorking = workspaceMgr.getWorkspaceName(ArrayType.BP_WORKING_MEM);
    String wsNameActGrad = workspaceMgr.getWorkspaceName(ArrayType.ACTIVATION_GRAD);
    WorkspaceConfiguration confWorking = workspaceMgr.getConfiguration(ArrayType.BP_WORKING_MEM);
    WorkspaceConfiguration confOutput = workspaceMgr.getConfiguration(ArrayType.ACTIVATION_GRAD);

    boolean actGradScopedOut = workspaceMgr.isScopedOut(ArrayType.ACTIVATION_GRAD);
    Preconditions.checkState(actGradScopedOut || wsNameActGrad != null, "Activation gradients must have a workspace or be scoped out");
    SessionMemMgr mmgr = new DL4JSameDiffMemoryMgr(wsNameWorking, wsNameActGrad, confWorking, confOutput);
    sessionMap.get(Thread.currentThread().getId()).setMmgr(mmgr);

    if(!sameDiff.hasGradientFunction()) {
        //Create when scoped out, to ensure any arrays are not in WS
        sameDiff.createGradFunction(INPUT_KEY);
    }

    List<String> gradVarNames = new ArrayList<>();
    gradVarNames.addAll(paramTable.keySet());
    gradVarNames.add(INPUT_KEY);

    Map<String,INDArray> phMap = new HashMap<>();
    phMap.put(INPUT_KEY, input);
    phMap.put(LABELS_KEY, labels);

    Map<String,INDArray> grads = sameDiff.calculateGradients(phMap, gradVarNames);
    for(String s : paramTable.keySet() ){
        INDArray sdGrad = grads.get(s);
        INDArray dl4jGrad = gradTable.get(s);
        dl4jGrad.assign(sdGrad);                                            //TODO OPTIMIZE THIS
        g.gradientForVariable().put(s, dl4jGrad);
        if(sdGrad.closeable()){
            sdGrad.close();
        }
    }

    dLdIn = grads.get(INPUT_KEY);

    //Clear placeholders and op inputs to ensure no out-of-scope arrays are still referenced anywhere
    sameDiff.clearPlaceholders(true);
    sameDiff.clearOpInputs();

    //TODO there may be a cleaner way to do this...
    if(!actGradScopedOut && !dLdIn.data().getParentWorkspace().getId().equals(wsNameActGrad)){
        dLdIn = workspaceMgr.dup(ArrayType.ACTIVATION_GRAD, dLdIn);
    } else if(actGradScopedOut && dLdIn.isAttached()){
        dLdIn = dLdIn.detach();
    }

    return new Pair<>(g, dLdIn);
}
 
Example 5
Source File: SameDiffGraphVertex.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@Override
public INDArray doForward(boolean training, LayerWorkspaceMgr workspaceMgr) {
    try(MemoryWorkspace ws = Nd4j.getWorkspaceManager().scopeOutOfWorkspaces()) {
        if (sameDiff == null) {
            doInit();
        }
    }

    Map<String,INDArray> phMap = new HashMap<>();
    config.validateInput(inputs);
    for(int i=0; i<inputs.length; i++ ){
        String name = config.getVertexParams().getInputs().get(i);
        final String maskName = name + "_mask";
        phMap.put(name, inputs[i]);
        if(maskArrays != null && maskArrays[i] != null) {
            phMap.put(maskName, maskArrays[i]);
        }else{
            phMap.put(maskName, createMask(dataType, inputs[i].shape()));
        }
    }


    //Configure memory management for SameDiff instance - use DL4J workspaces
    String wsNameWorking = workspaceMgr.getWorkspaceName(ArrayType.FF_WORKING_MEM);
    String wsNameOutput = workspaceMgr.getWorkspaceName(ArrayType.ACTIVATIONS);
    WorkspaceConfiguration confWorking = workspaceMgr.getConfiguration(ArrayType.FF_WORKING_MEM);
    WorkspaceConfiguration confOutput = workspaceMgr.getConfiguration(ArrayType.ACTIVATIONS);
    boolean actScopedOut = workspaceMgr.isScopedOut(ArrayType.ACTIVATIONS);
    Preconditions.checkState(actScopedOut || wsNameOutput != null, "Activations must have a workspace or must be scoped out");
    SessionMemMgr mmgr = new DL4JSameDiffMemoryMgr(wsNameWorking, wsNameOutput, confWorking, confOutput);

    InferenceSession is = sameDiff.getSessions().get(Thread.currentThread().getId());
    if(is == null){
        is = new InferenceSession(sameDiff);
        sameDiff.getSessions().put(Thread.currentThread().getId(), is);
    }
    is.setMmgr(mmgr);

    INDArray result = sameDiff.outputSingle(phMap, outputKey);

    //Edge case: "vertex" is just an identity activation, for example
    //TODO there may be a cleaner way to do this...
    if(!actScopedOut && !result.data().getParentWorkspace().getId().equals(wsNameOutput)){
        result = workspaceMgr.dup(ArrayType.ACTIVATIONS, result);
    } else if(actScopedOut && result.isAttached()){
        result = result.detach();
    }

    //Clear placeholders and op inputs to ensure no out-of-scope arrays are still referenced anywhere
    sameDiff.clearPlaceholders(true);
    sameDiff.clearOpInputs();
    return workspaceMgr.dup(ArrayType.ACTIVATIONS, result);
}