Python autograd.numpy.full() Examples

The following are 18 code examples of autograd.numpy.full(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: rnn.py    From MLAlgorithms with MIT License 6 votes vote down vote up
def setup(self, x_shape):
        """
        Parameters
        ----------
        x_shape : np.array(batch size, time steps, input shape)
        """
        self.input_dim = x_shape[2]

        # Input -> Hidden
        self._params["W"] = self._params.init((self.input_dim, self.hidden_dim))
        # Bias
        self._params["b"] = np.full((self.hidden_dim,), self._params.initial_bias)
        # Hidden -> Hidden layer
        self._params["U"] = self.inner_init((self.hidden_dim, self.hidden_dim))

        # Init gradient arrays
        self._params.init_grad()

        self.hprev = np.zeros((x_shape[0], self.hidden_dim)) 
Example #2
Source File: kernel.py    From kernel-gof with MIT License 6 votes vote down vote up
def __init__(self, sigma2s, wts=None):
        """
        Mixture of isotropic Gaussian kernels:
          sum wts[i] * exp(- ||x - y||^2 / (2 * sigma2s[i]))

        sigma2s: a list/array of squared bandwidths
        wts: a list/array of weights. Defaults to equal weights summing to 1.
        """
        self.sigma2s = sigma2s = np.asarray(sigma2s)
        assert len(sigma2s) > 0

        if wts is None:
            self.wts = wts = np.full(len(sigma2s), 1/len(sigma2s))
        else:
            self.wts = wts = np.asarray(wts)
            assert len(wts) == len(sigma2s)
            assert all(w >= 0 for w in wts) 
Example #3
Source File: data.py    From autograd with MIT License 6 votes vote down vote up
def plot_images(images, ax, ims_per_row=5, padding=5, digit_dimensions=(28, 28),
                cmap=matplotlib.cm.binary, vmin=None, vmax=None):
    """Images should be a (N_images x pixels) matrix."""
    N_images = images.shape[0]
    N_rows = (N_images - 1) // ims_per_row + 1
    pad_value = np.min(images.ravel())
    concat_images = np.full(((digit_dimensions[0] + padding) * N_rows + padding,
                             (digit_dimensions[1] + padding) * ims_per_row + padding), pad_value)
    for i in range(N_images):
        cur_image = np.reshape(images[i, :], digit_dimensions)
        row_ix = i // ims_per_row
        col_ix = i % ims_per_row
        row_start = padding + (padding + digit_dimensions[0]) * row_ix
        col_start = padding + (padding + digit_dimensions[1]) * col_ix
        concat_images[row_start: row_start + digit_dimensions[0],
                      col_start: col_start + digit_dimensions[1]] = cur_image
    cax = ax.matshow(concat_images, cmap=cmap, vmin=vmin, vmax=vmax)
    plt.xticks(np.array([]))
    plt.yticks(np.array([]))
    return cax 
Example #4
Source File: lstm.py    From MLAlgorithms with MIT License 5 votes vote down vote up
def setup(self, x_shape):
        """
        Naming convention:
        i : input gate
        f : forget gate
        c : cell
        o : output gate

        Parameters
        ----------
        x_shape : np.array(batch size, time steps, input shape)
        """
        self.input_dim = x_shape[2]
        # Input -> Hidden
        W_params = ["W_i", "W_f", "W_o", "W_c"]
        # Hidden -> Hidden
        U_params = ["U_i", "U_f", "U_o", "U_c"]
        # Bias terms
        b_params = ["b_i", "b_f", "b_o", "b_c"]

        # Initialize params
        for param in W_params:
            self._params[param] = self._params.init((self.input_dim, self.hidden_dim))

        for param in U_params:
            self._params[param] = self.inner_init((self.hidden_dim, self.hidden_dim))

        for param in b_params:
            self._params[param] = np.full((self.hidden_dim,), self._params.initial_bias)

        # Combine weights for simplicity
        self.W = [self._params[param] for param in W_params]
        self.U = [self._params[param] for param in U_params]

        # Init gradient arrays for all weights
        self._params.init_grad()

        self.hprev = np.zeros((x_shape[0], self.hidden_dim))
        self.oprev = np.zeros((x_shape[0], self.hidden_dim)) 
Example #5
Source File: dtlz.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def get_scale(n, scale_factor):
        return anp.power(anp.full(n, scale_factor), anp.arange(n)) 
Example #6
Source File: dtlz.py    From pymop with Apache License 2.0 5 votes vote down vote up
def get_power(n):
        p = anp.full(n, 4.0)
        p[-1] = 2.0
        return p 
Example #7
Source File: dtlz.py    From pymop with Apache License 2.0 5 votes vote down vote up
def get_scale(n, scale_factor):
        return anp.power(anp.full(n, scale_factor), anp.arange(n)) 
Example #8
Source File: util.py    From pymop with Apache License 2.0 5 votes vote down vote up
def uniform_reference_directions(self, n_partitions, n_dim):
        ref_dirs = []
        ref_dir = anp.full(n_dim, anp.inf)
        self.__uniform_reference_directions(ref_dirs, ref_dir, n_partitions, n_partitions, 0)
        return anp.concatenate(ref_dirs, axis=0) 
Example #9
Source File: kernel.py    From kernel-gof with MIT License 5 votes vote down vote up
def __init__(self, ks, wts=None):
        self.ks = ks
        if wts is None:
            self.wts = np.full(len(ks), 1/len(ks))
        else:
            self.wts = np.asarray(wts) 
Example #10
Source File: define_gradient.py    From autograd with MIT License 5 votes vote down vote up
def logsumexp_vjp(ans, x):
    # If you want to be able to take higher-order derivatives, then all the
    # code inside this function must be itself differentiable by Autograd.
    # This closure multiplies g with the Jacobian of logsumexp (d_ans/d_x).
    # Because Autograd uses reverse-mode differentiation, g contains
    # the gradient of the objective w.r.t. ans, the output of logsumexp.
    # This returned VJP function doesn't close over `x`, so Python can
    # garbage-collect `x` if there are no references to it elsewhere.
    x_shape = x.shape
    return lambda g: np.full(x_shape, g) * np.exp(x - np.full(x_shape, ans))

# Now we tell Autograd that logsumexmp has a gradient-making function. 
Example #11
Source File: model.py    From tree-regularization-public with MIT License 5 votes vote down vote up
def make_grad_softplus(ans, x):
    x = np.asarray(x)
    def gradient_product(g):
        return np.full(x.shape, g) * np.exp(x - ans)
    return gradient_product 
Example #12
Source File: schwefel.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_set(self):
        return np.full(self.n_var, 420.9687) 
Example #13
Source File: sphere.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_set(self):
        return anp.full(self.n_var, 0.5) 
Example #14
Source File: rastrigin.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_set(self):
        return anp.full(self.n_var, 0) 
Example #15
Source File: zakharov.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_set(self):
        return anp.full(self.n_var, 0) 
Example #16
Source File: ackley.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_set(self):
        return anp.full(self.n_var, 0) 
Example #17
Source File: griewank.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _calc_pareto_set(self):
        return np.full(self.n_var, 0) 
Example #18
Source File: dtlz.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def get_power(self, n):
        p = anp.full(n, 4.0)
        p[-1] = 2.0
        return p