Python tensorflow.placeholders() Examples

The following are 13 code examples of tensorflow.placeholders(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: utils.py    From btgym with GNU Lesser General Public License v3.0 6 votes vote down vote up
def rnn_placeholders(state):
    """
    Given nested [multilayer] RNN state tensor, infers and returns state placeholders.

    Args:
        state:  tf.nn.lstm zero-state tuple.

    Returns:    tuple of placeholders
    """
    if isinstance(state, tf.contrib.rnn.LSTMStateTuple):
        c, h = state
        c = tf.placeholder(tf.float32, tf.TensorShape([None]).concatenate(c.get_shape()[1:]), c.op.name + '_c_pl')
        h = tf.placeholder(tf.float32, tf.TensorShape([None]).concatenate(h.get_shape()[1:]), h.op.name + '_h_pl')
        return tf.contrib.rnn.LSTMStateTuple(c, h)
    elif isinstance(state, tf.Tensor):
        h = state
        h = tf.placeholder(tf.float32, tf.TensorShape([None]).concatenate(h.get_shape()[1:]), h.op.name + '_h_pl')
        return h
    else:
        structure = [rnn_placeholders(x) for x in state]
        return tuple(structure) 
Example #2
Source File: utils.py    From btgym with GNU Lesser General Public License v3.0 6 votes vote down vote up
def nested_placeholders(ob_space, batch_dim=None, name='nested'):
    """
    Given nested observation space as dictionary of shape tuples,
    returns nested state batch-wise placeholders.

    Args:
        ob_space:   [nested] dict of shapes
        name:       name scope
        batch_dim:  batch dimension
    Returns:
        nested dictionary of placeholders
    """
    if isinstance(ob_space, dict):
        out = {key: nested_placeholders(value, batch_dim, name + '_' + key) for key, value in ob_space.items()}
        return out
    else:
        out = tf.placeholder(tf.float32, [batch_dim] + list(ob_space), name + '_pl')
        return out 
Example #3
Source File: conjugate_gradient_optimizer.py    From ProMP with MIT License 6 votes vote down vote up
def build_graph(self, constraint_obj, target, input_val_dict, reg_coeff):
        """
        Sets the objective function and target weights for the optimize function

        Args:
            constraint_obj (tf_op) : constraint objective
            target (Policy) : Policy whose values we are optimizing over
            inputs (list) : tuple of tf.placeholders for input data which may be subsampled. The first dimension corresponds to the number of data points
            reg_coeff (float): regularization coefficient
        """
        self._target = target
        self.reg_coeff = reg_coeff
        self._input_ph_dict = input_val_dict

        params = list(target.get_params().values())
        constraint_grads = tf.gradients(constraint_obj, xs=params)

        for idx, (grad, param) in enumerate(zip(constraint_grads, params)):
            if grad is None:
                constraint_grads[idx] = tf.zeros_like(param)

        constraint_gradient = tf.concat([tf.reshape(grad, [-1]) for grad in constraint_grads], axis=0)

        self._constraint_gradient = constraint_gradient 
Example #4
Source File: base.py    From ProMP with MIT License 6 votes vote down vote up
def likelihood_ratio_sym(self, obs, action, dist_info_old, policy_params):
        """
        Computes the likelihood p_new(obs|act)/p_old ratio between

        Args:
            obs (tf.Tensor): symbolic variable for observations
            action (tf.Tensor): symbolic variable for actions
            dist_info_old (dict): dictionary of tf.placeholders with old policy information
            policy_params (dict): dictionary of the policy parameters (each value is a tf.Tensor)

        Returns:
            (tf.Tensor) : likelihood ratio
        """

        distribution_info_new = self.distribution_info_sym(obs, params=policy_params)
        likelihood_ratio = self._dist.likelihood_ratio_sym(action, dist_info_old, distribution_info_new)
        return likelihood_ratio 
Example #5
Source File: utils.py    From btgym with GNU Lesser General Public License v3.0 5 votes vote down vote up
def flat_placeholders(ob_space, batch_dim=None, name='flt'):
    """
    Given nested observation space as dictionary of shape tuples,
    returns flattened dictionary of batch-wise placeholders.

    Args:
        ob_space:   [nested dict] of tuples
        name:       name_scope
        batch_dim:  batch dimension
    Returns:
        flat dictionary of tf.placeholders
    """
    return flatten_nested(nested_placeholders(ob_space, batch_dim=batch_dim, name=name)) 
Example #6
Source File: utils.py    From btgym with GNU Lesser General Public License v3.0 5 votes vote down vote up
def feed_dict_from_nested(placeholder, value, expand_batch=False):
    """
    Zips flat feed dictionary form nested dictionaries of placeholders and values.

    Args:
        placeholder:    nested dictionary of placeholders
        value:          nested dictionary of values
        expand_batch:   if true - add fake batch dimension to values

    Returns:
        flat feed_dict
    """
    assert_same_structure(placeholder, value, check_types=True)
    return _flat_from_nested(placeholder, value, expand_batch) 
Example #7
Source File: utils.py    From btgym with GNU Lesser General Public License v3.0 5 votes vote down vote up
def feed_dict_rnn_context(placeholders, values):
    """
    Creates tf.feed_dict for flat placeholders and nested values.

    Args:
        placeholders:       flat structure of placeholders
        values:             nested structure of values

    Returns:
        flat feed dictionary
    """
    return {key: value for key, value in zip(placeholders, flatten_nested(values))} 
Example #8
Source File: legacy_models.py    From DeepChatModels with MIT License 5 votes vote down vote up
def _get_placeholder_list(name, length, dtype=tf.int32):
        """
        Args:
            name: prefix of name of each tf.placeholder list item, where i'th name is [name]i.
            length: number of items (tf.placeholders) in the returned list.
        Returns:
            list of tensorflow placeholder of dtype=tf.int32 and unspecified shape.
        """
        return [tf.placeholder(dtype, shape=[None], name=name+str(i)) for i in range(length)] 
Example #9
Source File: train.py    From darkflow with GNU General Public License v3.0 5 votes vote down vote up
def loss(self, net_out):
	m = self.meta
	loss_type = self.meta['type']
	assert loss_type in _LOSS_TYPE, \
	'Loss type {} not implemented'.format(loss_type)

	out = net_out
	out_shape = out.get_shape()
	out_dtype = out.dtype.base_dtype
	_truth = tf.placeholders(out_dtype, out_shape)

	self.placeholders = dict({
			'truth': _truth
		})

	diff = _truth - out
	if loss_type in ['sse','12']:
		loss = tf.nn.l2_loss(diff)

	elif loss_type == ['smooth']:
		small = tf.cast(diff < 1, tf.float32)
		large = 1. - small
		l1_loss = tf.nn.l1_loss(tf.multiply(diff, large))
		l2_loss = tf.nn.l2_loss(tf.multiply(diff, small))
		loss = l1_loss + l2_loss

	elif loss_type in ['sparse', 'l1']:
		loss = l1_loss(diff)

	elif loss_type == 'softmax':
		loss = tf.nn.softmax_cross_entropy_with_logits(logits, y)
		loss = tf.reduce_mean(loss)

	elif loss_type == 'svm':
		assert 'train_size' in m, \
		'Must specify'
		size = m['train_size']
		self.nu = tf.Variable(tf.ones([train_size, num_classes])) 
Example #10
Source File: train.py    From VideoRecognition-realtime-autotrainer-alerts with GNU General Public License v3.0 5 votes vote down vote up
def loss(self, net_out):
    m = self.meta
    loss_type = self.meta['type']
    assert loss_type in _LOSS_TYPE, \
        'Loss type {} not implemented'.format(loss_type)

    out = net_out
    out_shape = out.get_shape()
    out_dtype = out.dtype.base_dtype
    _truth = tf.placeholders(out_dtype, out_shape)

    self.placeholders = dict({
        'truth': _truth
    })

    diff = _truth - out
    if loss_type in ['sse', '12']:
        loss = tf.nn.l2_loss(diff)

    elif loss_type == ['smooth']:
        small = tf.cast(diff < 1, tf.float32)
        large = 1. - small
        l1_loss = tf.nn.l1_loss(tf.multiply(diff, large))
        l2_loss = tf.nn.l2_loss(tf.multiply(diff, small))
        loss = l1_loss + l2_loss

    elif loss_type in ['sparse', 'l1']:
        loss = l1_loss(diff)

    elif loss_type == 'softmax':
        loss = tf.nn.softmax_cross_entropy_with_logits(logits, y)
        loss = tf.reduce_mean(loss)

    elif loss_type == 'svm':
        assert 'train_size' in m, \
            'Must specify'
        size = m['train_size']
        self.nu = tf.Variable(tf.ones([train_size, num_classes])) 
Example #11
Source File: conjugate_gradient_optimizer.py    From ProMP with MIT License 5 votes vote down vote up
def build_graph(self, loss, target, input_ph_dict, leq_constraint):
        """
        Sets the objective function and target weights for the optimize function

        Args:
            loss (tf_op) : minimization objective
            target (Policy) : Policy whose values we are optimizing over
            inputs (list) : tuple of tf.placeholders for input data which may be subsampled. The first dimension corresponds to the number of data points
            extra_inputs (list) : tuple of tf.placeholders for hyperparameters (e.g. learning rate, if annealed)
            leq_constraint (tuple) : A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
        """
        assert isinstance(loss, tf.Tensor)
        assert hasattr(target, 'get_params')
        assert isinstance(input_ph_dict, dict)
        
        constraint_objective, constraint_value = leq_constraint

        self._target = target
        self._constraint_objective = constraint_objective
        self._max_constraint_val = constraint_value
        self._input_ph_dict = input_ph_dict
        self._loss = loss

        # build the graph of the hessian vector product (hvp)
        self._hvp_approach.build_graph(constraint_objective, target, self._input_ph_dict, self._reg_coeff)

        # build the graph of the gradients
        params = list(target.get_params().values())
        grads = tf.gradients(loss, xs=params)
        for idx, (grad, param) in enumerate(zip(grads, params)):
            if grad is None:
                grads[idx] = tf.zeros_like(param)
        gradient = tf.concat([tf.reshape(grad, [-1]) for grad in grads], axis=0)

        self._gradient = gradient 
Example #12
Source File: base.py    From ProMP with MIT License 5 votes vote down vote up
def distribution_info_sym(self, obs_var, params=None):
        """
        Return the symbolic distribution information about the actions.

        Args:
            obs_var (placeholder) : symbolic variable for observations
            params (None or dict) : a dictionary of placeholders that contains information about the
            state of the policy at the time it received the observation

        Returns:
            (dict) : a dictionary of tf placeholders for the policy output distribution
        """
        raise NotImplementedError 
Example #13
Source File: base.py    From ProMP with MIT License 5 votes vote down vote up
def distribution_info_keys(self, obs, state_infos):
        """
        Args:
            obs (placeholder) : symbolic variable for observations
            state_infos (dict) : a dictionary of placeholders that contains information about the
            state of the policy at the time it received the observation

        Returns:
            (dict) : a dictionary of tf placeholders for the policy output distribution
        """
        raise NotImplementedError