Python rnn.RNN Examples

The following are 3 code examples of rnn.RNN(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module rnn , or try the search function .
Example #1
Source File: main.py    From rnn-from-scratch with MIT License 5 votes vote down vote up
def softmax(xs):
  # Applies the Softmax Function to the input array.
  return np.exp(xs) / sum(np.exp(xs))

# Initialize our RNN! 
Example #2
Source File: main.py    From rnn-from-scratch with MIT License 5 votes vote down vote up
def processData(data, backprop=True):
  '''
  Returns the RNN's loss and accuracy for the given data.
  - data is a dictionary mapping text to True or False.
  - backprop determines if the backward phase should be run.
  '''
  items = list(data.items())
  random.shuffle(items)

  loss = 0
  num_correct = 0

  for x, y in items:
    inputs = createInputs(x)
    target = int(y)

    # Forward
    out, _ = rnn.forward(inputs)
    probs = softmax(out)

    # Calculate loss / accuracy
    loss -= np.log(probs[target])
    num_correct += int(np.argmax(probs) == target)

    if backprop:
      # Build dL/dy
      d_L_d_y = probs
      d_L_d_y[target] -= 1

      # Backward
      rnn.backprop(d_L_d_y)

  return loss / len(data), num_correct / len(data)

# Training loop 
Example #3
Source File: runNNet.py    From cs224d with MIT License 4 votes vote down vote up
def test(netFile,dataSet, model='RNN', trees=None):
    if trees==None:
        trees = tr.loadTrees(dataSet)
    assert netFile is not None, "Must give model to test"
    print "Testing netFile %s"%netFile
    with open(netFile,'r') as fid:
        opts = pickle.load(fid)
        _ = pickle.load(fid)
        
        if (model=='RNTN'):
            nn = RNTN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(model=='RNN'):
            nn = RNN(opts.wvecDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(model=='RNN2'):
            nn = RNN2(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(model=='RNN2Drop'):
            nn = RNN2Drop(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(model=='RNN2DropMaxout'):
            nn = RNN2DropMaxout(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(opts.model=='RNN3'):
            nn = RNN3(opts.wvecDim,opts.middleDim,opts.outputDim,opts.numWords,opts.minibatch)
        elif(model=='DCNN'):
            nn = DCNN(opts.wvecDim,opts.ktop,opts.m1,opts.m2, opts.n1, opts.n2,0, opts.outputDim,opts.numWords, 2, opts.minibatch,rho=1e-4)
            trees = cnn.tree2matrix(trees)
        else:
            raise '%s is not a valid neural network so far only RNTN, RNN, RNN2, RNN3, and DCNN'%opts.model
        
        nn.initParams()
        nn.fromFile(fid)

    print "Testing %s..."%model

    cost,correct, guess, total = nn.costAndGrad(trees,test=True)
    correct_sum = 0
    for i in xrange(0,len(correct)):        
        correct_sum+=(guess[i]==correct[i])
    
    # TODO
    # Plot the confusion matrix?

    confuse_matrix = np.zeros((5,5))
                              
    for i in range(len(correct)):
        confuse_matrix[correct[i]][guess[i]] += 1
    print "Cost %f, Acc %f"%(cost,correct_sum/float(total))
    makeconf(confuse_matrix,model)
    return cost, correct_sum/float(total)