Python tensorflow.Tensors() Examples

The following are 25 code examples of tensorflow.Tensors(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: categorical_actor.py    From tf2rl with MIT License 6 votes vote down vote up
def call(self, states, test=False):
        """
        Compute actions and log probability of the selected action

        :return action (tf.Tensors): Tensor of actions
        :return log_probs (tf.Tensor): Tensors of log probabilities of selected actions
        """
        param = self._compute_dist(states)
        if test:
            action = tf.math.argmax(param["prob"], axis=1)  # (size,)
        else:
            action = tf.squeeze(self.dist.sample(param), axis=1)  # (size,)
        log_prob = self.dist.log_likelihood(
            tf.one_hot(indices=action, depth=self.action_dim), param)

        return action, log_prob, param 
Example #2
Source File: categorical_actor.py    From tf2rl with MIT License 6 votes vote down vote up
def compute_log_probs(self, states, actions):
        """Compute log probabilities of inputted actions

        :param states (tf.Tensor): Tensors of inputs to NN
        :param actions (tf.Tensor): Tensors of NOT one-hot vector.
            They will be converted to one-hot vector inside this function.
        """
        param = self._compute_dist(states)
        actions = tf.one_hot(
            indices=tf.squeeze(actions),
            depth=self.action_dim)
        param["prob"] = tf.cond(
            tf.math.greater(tf.rank(actions), tf.rank(param["prob"])),
            lambda: tf.expand_dims(param["prob"], axis=0),
            lambda: param["prob"])
        actions = tf.cond(
            tf.math.greater(tf.rank(param["prob"]), tf.rank(actions)),
            lambda: tf.expand_dims(actions, axis=0),
            lambda: actions)
        log_prob = self.dist.log_likelihood(actions, param)
        return log_prob 
Example #3
Source File: tensor_utils.py    From garage with MIT License 6 votes vote down vote up
def pad_tensor_dict(tensor_dict, max_len):
    """Pad dictionary of tensors with zeros.

    Args:
        tensor_dict (dict[numpy.ndarray]): Tensors to be padded.
        max_len (int): Maximum length.

    Returns:
        dict[numpy.ndarray]: Padded tensor.
    """
    keys = list(tensor_dict.keys())
    ret = dict()
    for k in keys:
        if isinstance(tensor_dict[k], dict):
            ret[k] = pad_tensor_dict(tensor_dict[k], max_len)
        else:
            ret[k] = pad_tensor(tensor_dict[k], max_len)
    return ret 
Example #4
Source File: utils.py    From RFHO with MIT License 6 votes vote down vote up
def var_list(self, mode=VlMode.RAW):
        """
        Get the chunks that define this variable.

        :param mode: (optional, default VL_MODE.RAW) VL_MODE.RAW: returns simply var_list, that may contain tf.Variables
                         or MergedVariables
                     VL_MODE.BASE: returns a list of tf.Variables that are the "base" variables that for this
                     MergedVariable
                     VL_MODE.TENSOR: returns a list of tf.Variables or tf.Tensor from the MergedVariables
        :return: A list that may contain tf.Tensors, tf.Variables and/or MergedVariables
        """
        if mode == VlMode.RAW:
            return self._var_list
        elif mode == VlMode.BASE:
            return self._get_base_variable_list()
        elif mode == VlMode.TENSOR:
            return self._var_list_as_tensors()  # return w unic tensor + copies augmented
        else:
            raise NotImplementedError('mode %d does not exists' % mode) 
Example #5
Source File: cvxpylayer.py    From cvxpylayers with Apache License 2.0 6 votes vote down vote up
def __call__(self, *parameters, solver_args={}):
        """Solve problem (or a batch of problems) corresponding to `parameters`

        Args:
          parameters: a sequence of tf.Tensors; the n-th Tensor specifies
                      the value for the n-th CVXPY Parameter. These Tensors
                      can be batched: if a Tensor has 3 dimensions, then its
                      first dimension is interpreted as the batch size.
          solver_args: a dict of optional arguments, to send to `diffcp`. Keys
                       should be the names of keyword arguments.

        Returns:
          a list of optimal variable values, one for each CVXPY Variable
          supplied to the constructor.
        """
        if len(parameters) != len(self.params):
            raise ValueError('A tensor must be provided for each CVXPY '
                             'parameter; received %d tensors, expected %d' % (
                                 len(parameters), len(self.params)))
        compute = tf.custom_gradient(
            lambda *parameters: self._compute(parameters, solver_args))
        return compute(*parameters) 
Example #6
Source File: unet.py    From batchflow with Apache License 2.0 6 votes vote down vote up
def head(cls, inputs, targets, name='head', **kwargs):
        """ The last network layers which produce predictions. Process all output from body.

        Parameters
        ----------
        inputs : list of tf.Tensors
            Input tensors.
        targets : tf.Tensor

        name : str
            Scope name.

        Returns
        -------
        list of tf.Tensors
        """
        res = []
        for i, x in enumerate(inputs):
            res.append(super().head(x, targets, name=name+'-'+str(i), **kwargs))
        return res 
Example #7
Source File: tf_mnist_example.py    From ray with Apache License 2.0 6 votes vote down vote up
def step(self):
        self.train_loss.reset_states()
        self.train_accuracy.reset_states()
        self.test_loss.reset_states()
        self.test_accuracy.reset_states()

        for idx, (images, labels) in enumerate(self.train_ds):
            if idx > MAX_TRAIN_BATCH:  # This is optional and can be removed.
                break
            self.tf_train_step(images, labels)

        for test_images, test_labels in self.test_ds:
            self.tf_test_step(test_images, test_labels)

        # It is important to return tf.Tensors as numpy objects.
        return {
            "epoch": self.iteration,
            "loss": self.train_loss.result().numpy(),
            "accuracy": self.train_accuracy.result().numpy() * 100,
            "test_loss": self.test_loss.result().numpy(),
            "mean_accuracy": self.test_accuracy.result().numpy() * 100
        } 
Example #8
Source File: base.py    From ProMP with MIT License 6 votes vote down vote up
def _adapt_sym(self, surr_obj, params_var):
        """
        Creates the symbolic representation of the tf policy after one gradient step towards the surr_obj

        Args:
            surr_obj (tf_op) : tensorflow op for task specific (inner) objective
            params_var (dict) : dict of tf.Tensors for current policy params

        Returns:
            (dict):  dict of tf.Tensors for adapted policy params
        """
        # TODO: Fix this if we want to learn the learning rate (it isn't supported right now).
        update_param_keys = list(params_var.keys())

        grads = tf.gradients(surr_obj, [params_var[key] for key in update_param_keys])
        gradients = dict(zip(update_param_keys, grads))

        # gradient descent
        adapted_policy_params = [params_var[key] - tf.multiply(self.step_sizes[key], gradients[key])
                          for key in update_param_keys]

        adapted_policy_params_dict = OrderedDict(zip(update_param_keys, adapted_policy_params))

        return adapted_policy_params_dict 
Example #9
Source File: weighted_sparse_categorical_crossentropy_test.py    From models with Apache License 2.0 5 votes vote down vote up
def test_tf_tensor_inputs(self):
    """Test that tf.Tensors can be used as inputs to the loss function."""
    batch_size = 3
    output_data = tf.convert_to_tensor(
        np.random.random_sample((batch_size, 10, 15)))
    labels = tf.convert_to_tensor(np.random.randint(10, size=(batch_size, 10)))
    weights = tf.convert_to_tensor(np.random.randint(2, size=(batch_size, 10)))

    # We're not trying to validate numerical correctness, just ensure that
    # we can in fact pass tensors to these functions without causing runtime
    # errors from the shape checking code.
    _ = weighted_sparse_categorical_crossentropy.loss(
        predictions=output_data, labels=labels, weights=weights) 
Example #10
Source File: gather_encoder_test.py    From model-optimization with Apache License 2.0 5 votes vote down vote up
def test_python_constants_not_exposed(self):
    """Tests that only TensorFlow values are exposed to users."""
    x_fn = lambda: tf.constant(1.0)
    tensorspec = tf.TensorSpec.from_tensor(x_fn())
    encoder_py = gather_encoder.GatherEncoder.from_encoder(
        core_encoder.EncoderComposer(
            test_utils.SimpleLinearEncodingStage(2.0, 3.0)).add_parent(
                test_utils.PlusOneEncodingStage(), P1_VALS).add_parent(
                    test_utils.SimpleLinearEncodingStage(2.0, 3.0),
                    SL_VALS).make(), tensorspec)
    a_var = tf.compat.v1.get_variable('a_var', initializer=2.0)
    b_var = tf.compat.v1.get_variable('b_var', initializer=3.0)
    encoder_tf = gather_encoder.GatherEncoder.from_encoder(
        core_encoder.EncoderComposer(
            test_utils.SimpleLinearEncodingStage(a_var, b_var)).add_parent(
                test_utils.PlusOneEncodingStage(), P1_VALS).add_parent(
                    test_utils.SimpleLinearEncodingStage(a_var, b_var),
                    SL_VALS).make(), tensorspec)

    (encode_params_py, decode_before_sum_params_py,
     decode_after_sum_params_py) = encoder_py.get_params()
    (encode_params_tf, decode_before_sum_params_tf,
     decode_after_sum_params_tf) = encoder_tf.get_params()

    # Params that are Python constants -- not tf.Tensors -- should be hidden
    # from the user, and made statically available at appropriate locations.
    self.assertLen(encode_params_py, 1)
    self.assertLen(encode_params_tf, 5)
    self.assertLen(decode_before_sum_params_py, 1)
    self.assertLen(decode_before_sum_params_tf, 3)
    self.assertEmpty(decode_after_sum_params_py)
    self.assertLen(decode_after_sum_params_tf, 2) 
Example #11
Source File: BNN.py    From handful-of-trials with MIT License 5 votes vote down vote up
def _compile_outputs(self, inputs, ret_log_var=False):
        """Compiles the output of the network at the given inputs.

        If inputs is 2D, returns a 3D tensor where output[i] is the output of the ith network in the ensemble.
        If inputs is 3D, returns a 3D tensor where output[i] is the output of the ith network on the ith input matrix.

        Arguments:
            inputs: (tf.Tensor) A tensor representing the inputs to the network
            ret_log_var: (bool) If True, returns the log variance instead of the variance.

        Returns: (tf.Tensors) The mean and variance/log variance predictions at inputs for each network
            in the ensemble.
        """
        dim_output = self.layers[-1].get_output_dim()
        cur_out = self.scaler.transform(inputs)
        for layer in self.layers:
            cur_out = layer.compute_output_tensor(cur_out)

        mean = cur_out[:, :, :dim_output//2]
        if self.end_act is not None:
            mean = self.end_act(mean)

        logvar = self.max_logvar - tf.nn.softplus(self.max_logvar - cur_out[:, :, dim_output//2:])
        logvar = self.min_logvar + tf.nn.softplus(logvar - self.min_logvar)

        if ret_log_var:
            return mean, logvar
        else:
            return mean, tf.exp(logvar) 
Example #12
Source File: bnn.py    From mbpo with MIT License 5 votes vote down vote up
def _compile_outputs(self, inputs, ret_log_var=False):
        """Compiles the output of the network at the given inputs.

        If inputs is 2D, returns a 3D tensor where output[i] is the output of the ith network in the ensemble.
        If inputs is 3D, returns a 3D tensor where output[i] is the output of the ith network on the ith input matrix.

        Arguments:
            inputs: (tf.Tensor) A tensor representing the inputs to the network
            ret_log_var: (bool) If True, returns the log variance instead of the variance.

        Returns: (tf.Tensors) The mean and variance/log variance predictions at inputs for each network
            in the ensemble.
        """
        dim_output = self.layers[-1].get_output_dim()
        cur_out = self.scaler.transform(inputs)
        for layer in self.layers:
            cur_out = layer.compute_output_tensor(cur_out)

        mean = cur_out[:, :, :dim_output//2]
        if self.end_act is not None:
            mean = self.end_act(mean)

        logvar = self.max_logvar - tf.nn.softplus(self.max_logvar - cur_out[:, :, dim_output//2:])
        logvar = self.min_logvar + tf.nn.softplus(logvar - self.min_logvar)

        if ret_log_var:
            return mean, logvar
        else:
            return mean, tf.exp(logvar) 
Example #13
Source File: pretrained_models.py    From dhSegment with GNU General Public License v3.0 5 votes vote down vote up
def vgg_16_fn(input_tensor: tf.Tensor, scope='vgg_16', blocks=5, weight_decay=0.0005) \
        -> (tf.Tensor, list):  # list of tf.Tensors (layers)
    intermediate_levels = []
    # intermediate_levels.append(input_tensor)
    with slim.arg_scope(nets.vgg.vgg_arg_scope(weight_decay=weight_decay)):
        with tf.variable_scope(scope, 'vgg_16', [input_tensor]) as sc:
            input_tensor = mean_substraction(input_tensor)
            intermediate_levels.append(input_tensor)
            end_points_collection = sc.original_name_scope + '_end_points'
            # Collect outputs for conv2d, fully_connected and max_pool2d.
            with slim.arg_scope(
                    [layers.conv2d, layers.fully_connected, layers.max_pool2d],
                    outputs_collections=end_points_collection):
                net = layers.repeat(
                    input_tensor, 2, layers.conv2d, 64, [3, 3], scope='conv1')
                intermediate_levels.append(net)
                net = layers.max_pool2d(net, [2, 2], scope='pool1')
                if blocks >= 2:
                    net = layers.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool2')
                if blocks >= 3:
                    net = layers.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool3')
                if blocks >= 4:
                    net = layers.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool4')
                if blocks >= 5:
                    net = layers.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')
                    intermediate_levels.append(net)
                    net = layers.max_pool2d(net, [2, 2], scope='pool5')

                return net, intermediate_levels 
Example #14
Source File: tensorcheck.py    From in-silico-labeling with Apache License 2.0 5 votes vote down vote up
def well_defined():
  """A decorator which checks function argument tensors.

  Checked tensors must have the same shape at graph runtime as they had at graph
  construction time.
  Checked tensors must contain only finite values.

  This calls either tf.verify_tensor_all_finite or lt.verify_tensor_all_finite
  on all input tf.Tensors and lt.LabeledTensors.

  Returns:
    A function to use as a decorator.
  """

  def check(f):
    """Check the inputs."""

    # TODO(ericmc): Should we also check kwds?
    @functools.wraps(f)
    def new_f(*args, **kwds):
      """A helper function."""
      new_args = []
      for a in args:
        float_types = [tf.float16, tf.float32, tf.float64]
        if isinstance(a, tf.Tensor):
          new_a = shape_unlabeled(a)
          if a.dtype in float_types:
            new_a = tf.verify_tensor_all_finite(new_a, msg='')
        elif isinstance(a, lt.LabeledTensor):
          new_a = shape(a)
          if a.tensor.dtype in float_types:
            new_a = lt.verify_tensor_all_finite(new_a, message='')
        else:
          new_a = a
        new_args.append(new_a)

      return f(*new_args, **kwds)

    return new_f

  return check 
Example #15
Source File: base.py    From ProMP with MIT License 5 votes vote down vote up
def _adapt_sym(self, surr_obj, params_var):
        """
        Creates the symbolic representation of the tf policy after one gradient step towards the surr_obj

        Args:
            surr_obj (tf_op) : tensorflow op for task specific (inner) objective
            params_var (dict) : dict of placeholders for current policy params

        Returns:
            (dict):  dict of tf.Tensors for adapted policy params
        """
        raise NotImplementedError 
Example #16
Source File: model.py    From ebonite with Apache License 2.0 5 votes vote down vote up
def must_process(self, obj) -> bool:
        """
        Returns `True` if object is a tf.Tensor or list of tf.Tensors

        :param obj: obj to check
        :return: `True` or `False`
        """
        return isinstance(obj, tf.Tensor) or (isinstance(obj, list) and all(isinstance(o, tf.Tensor) for o in obj)) 
Example #17
Source File: pond.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def feed(self, value):
        """
    Feed `value` to placeholder
    """
        assert isinstance(value, np.ndarray), type(value)
        enc = self.prot._encode(value, self.is_scaled)
        assert isinstance(enc, np.ndarray)

        # x0, x1 = self.prot._share(enc)
        # assert isinstance(x0, np.ndarray), type(x0)
        # assert isinstance(x1, np.ndarray), type(x1)

        # TODO(Morten)
        #
        # This is a huge hack and it would be better to use `_share` as above.
        # However, _share currently expects its inputs to be TFE tensors backed
        # by tf.Tensors in order to have extra information attached, and not sure
        # we should change this until we've least considered what will happen with
        # TF2 and eager mode.
        #
        # So, to ensure that feeding can be done locally *outside* the TF graph,
        # in the mean time we manually share values here, avoiding a call to
        # `factory.tensor` as that's where tensors are converted to tf.Tensors.
        shape = self.shape
        minval = self.backing_dtype.min
        maxval = self.backing_dtype.max
        # TODO(Morten) not using secure randomness here; reconsider after TF2
        x0 = np.array(
            [random.randrange(minval, maxval) for _ in range(np.product(shape))]
        ).reshape(shape)
        x1 = enc - x0
        assert isinstance(x0, np.ndarray)
        assert isinstance(x1, np.ndarray)

        feed0 = self.placeholder0.feed(x0)
        feed1 = self.placeholder1.feed(x1)
        return {**feed0, **feed1} 
Example #18
Source File: roi.py    From batchflow with Apache License 2.0 5 votes vote down vote up
def _filter_tensor(inputs, cond, *args):
    """ Create indixes and elements of inputs which consists for which cond is True.

    Parameters
    ----------
        inputs: tf.Tensor
            input tensor
        cond: callable or float
            condition to choose elements. If float, elements which greater the cond will be choosen
        *args: tf.Tensors:
            tensors with the same shape as inputs. Will be returned corresponding elements of them.

    Returns
    -------
        indices: tf.Tensor
            indices of elements of inputs for which cond is True
        tf.Tensors:
            filtred inputs and tensors from args.
    """
    with tf.variable_scope('filter_tensor'):
        if not callable(cond):
            callable_cond = lambda x: x > cond
        else:
            callable_cond = cond
        indices = tf.where(callable_cond(inputs))
        output = (indices, *[tf.gather_nd(x, indices) for x in [inputs, *args]])
    return output 
Example #19
Source File: optimizers.py    From kfac with Apache License 2.0 5 votes vote down vote up
def _set_hyper(self, name, value):
    """Set hyper `name` to value. value must be numeric."""
    if self._hypers_created:
      if not isinstance(self._hyper[name], tf.Variable):
        raise AttributeError("Can't set attribute: {}".format(name))
      if not isinstance(value, numbers.Number):
        raise ValueError('Dynamic reassignment only supports setting with a '
                         'number. tf.Tensors and tf.Variables can only be used '
                         'before the internal kfac optimizer is created.')
      backend.set_value(self._hyper[name], value)
    else:
      super(Kfac, self)._set_hyper(name, value) 
Example #20
Source File: tensor_utils.py    From garage with MIT License 5 votes vote down vote up
def pad_tensor_n(xs, max_len):
    """Pad array of tensors.

    Args:
        xs (numpy.ndarray): Tensors to be padded.
        max_len (int): Maximum length.

    Returns:
        numpy.ndarray: Padded tensor.
    """
    ret = np.zeros((len(xs), max_len) + xs[0].shape[1:], dtype=xs[0].dtype)
    for idx, x in enumerate(xs):
        ret[idx][:len(x)] = x
    return ret 
Example #21
Source File: tensor_utils.py    From garage with MIT License 5 votes vote down vote up
def pad_tensor(x, max_len):
    """Pad tensors with zeros.

    Args:
        x (numpy.ndarray): Tensors to be padded.
        max_len (int): Maximum length.

    Returns:
        numpy.ndarray: Padded tensor.
    """
    return np.concatenate([
        x,
        np.tile(np.zeros_like(x[0]),
                (max_len - len(x), ) + (1, ) * np.ndim(x[0]))
    ]) 
Example #22
Source File: tensor_utils.py    From garage with MIT License 5 votes vote down vote up
def flatten_tensor_variables(ts):
    """Flattens a list of tensors into a single, 1-dimensional tensor.

    Args:
        ts (Iterable): Iterable containing either tf.Tensors or arrays.

    Returns:
        tf.Tensor: Flattened Tensor.
    """
    return tf.concat(axis=0,
                     values=[tf.reshape(x, [-1]) for x in ts],
                     name='flatten_tensor_variables') 
Example #23
Source File: tensor_utils.py    From garage with MIT License 5 votes vote down vote up
def flatten_batch_dict(d, name='flatten_batch_dict'):
    """Flatten a batch of observations represented as a dict.

    Args:
        d (dict[tf.Tensor]): A dict of Tensors to flatten.
        name (string): The name of the operation (None by default).

    Returns:
        dict[tf.Tensor]: A dict with flattened tensors.
    """
    with tf.name_scope(name):
        return {k: flatten_batch(v) for k, v in d.items()} 
Example #24
Source File: tensorflow_frontend.py    From kymatio with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def scattering(self, input):
        with tf.name_scope('scattering') as scope:
            try:
                input = tf.convert_to_tensor(input)
            except ValueError:
                raise TypeError('The input should be convertible to a '
                                'TensorFlow Tensor.')

            if len(input.shape) < 2:
                raise RuntimeError('Input tensor should have at least two '
                                   'dimensions.')

            if (input.shape[-1] != self.N or input.shape[-2] != self.M) and not self.pre_pad:
                raise RuntimeError('Tensor must be of spatial size (%i,%i).' % (self.M, self.N))

            if (input.shape[-1] != self.N_padded or input.shape[-2] != self.M_padded) and self.pre_pad:
                raise RuntimeError('Padded tensor must be of spatial size (%i,%i).' % (self.M_padded, self.N_padded))
            if not self.out_type in ('array', 'list'):
                raise RuntimeError("The out_type must be one of 'array' or 'list'.")

            # Use tf.shape to get the dynamic shape of the tf.Tensors at
            # execution time.
            batch_shape = tf.shape(input)[:-2]
            signal_shape = tf.shape(input)[-2:]

            # NOTE: Cannot simply concatenate these using + since they are
            # tf.Tensors and that would add their values.
            input = tf.reshape(input, tf.concat(((-1,), signal_shape), 0))

            S = scattering2d(input, self.pad, self.unpad, self.backend, self.J, self.L, self.phi, self.psi,
                             self.max_order, self.out_type)

            if self.out_type == 'array':
                scattering_shape = tf.shape(S)[-3:]
                new_shape = tf.concat((batch_shape, scattering_shape), 0)

                S = tf.reshape(S, new_shape)
            else:
                scattering_shape = tf.shape(S[0]['coef'])[-2:]
                new_shape = tf.concat((batch_shape, scattering_shape), 0)

                for x in S:
                    x['coef'] = tf.reshape(x['coef'], new_shape)

            return S 
Example #25
Source File: parameters.py    From strawberryfields with Apache License 2.0 4 votes vote down vote up
def par_evaluate(params, dtype=None):
    """Evaluate an Operation parameter sequence.

    Any parameters descending from :class:`sympy.Basic` are evaluated, others are returned as-is.
    Evaluation means that free and measured parameters are replaced by their numeric values.
    NumPy object arrays are evaluated elementwise.

    Alternatively, evaluates a single parameter and returns its value.

    Args:
        params (Sequence[Any]): parameters to evaluate
        dtype (None, np.dtype, tf.dtype): NumPy or TensorFlow datatype to optionally cast atomic symbols
            to *before* evaluating the parameter expression. Note that if the atom
            is a TensorFlow tensor, a NumPy datatype can still be passed; ``tensorflow.dtype.as_dtype()``
            is used to determine the corresponding TensorFlow dtype internally.

    Returns:
        list[Any]: evaluated parameters
    """
    scalar = False
    if not isinstance(params, collections.abc.Sequence):
        scalar = True
        params = [params]

    def do_evaluate(p):
        """Evaluates a single parameter."""
        if is_object_array(p):
            return np.array([do_evaluate(k) for k in p])

        if not par_is_symbolic(p):
            return p

        # using lambdify we can also substitute np.ndarrays and tf.Tensors for the atoms
        atoms = list(p.atoms(MeasuredParameter, FreeParameter))
        # evaluate the atoms of the expression
        vals = [k._eval_evalf(None) for k in atoms]
        # use the tensorflow printer if any of the symbolic parameter values are TF objects
        # (we do it like this to avoid importing tensorflow if it's not needed)
        is_tf = (type(v).__module__.startswith("tensorflow") for v in vals)
        printer = "tensorflow" if any(is_tf) else "numpy"
        func = sympy.lambdify(atoms, p, printer)

        if dtype is not None:
            # cast the input values
            if printer == "tensorflow":
                import tensorflow as tf

                tfdtype = tf.as_dtype(dtype)
                vals = [tf.cast(v, dtype=tfdtype) for v in vals]
            else:
                vals = [dtype(v) for v in vals]

        return func(*vals)

    ret = list(map(do_evaluate, params))
    if scalar:
        return ret[0]
    return ret