Python predict generator

20 Python code examples are found related to " predict generator". You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: multinetwork.py    From timeserio with MIT License 7 votes vote down vote up
def predict_generator(self, generator, model: str = None, **kwargs):
        """Return predictions from a batch generator.

        Args:
            generator : generator or keras.utils.Sequence
                predict batches as in Sequential.predict_generator
            **kwargs: dictionary arguments
                Legal arguments are the arguments of
                `Sequential.predict_generator`

        Returns:
            preds: array-like, shape `(n_samples, ...)`
                Predictions.

        """
        self.check_model_name(model)
        pred_kwargs = self._filter_hyperparams(
            keras.models.Sequential.predict_generator, kwargs
        )
        with self._prediction_context():
            predictions = (
                self.model[model].predict_generator(generator, **pred_kwargs)
            )
        return predictions 
Example 2
Source File: petroskisuch.py    From deepchem with MIT License 6 votes vote down vote up
def predict_proba_on_generator(self, generator, transformers=[]):
    if not self.built:
      self.build()
    with self._get_tf("Graph").as_default():
      out_tensors = [x.out_tensor for x in self.outputs]
      results = []
      for feed_dict in generator:
        feed_dict = {
            self.layers[k.name].out_tensor: v
            for k, v in six.iteritems(feed_dict)
        }
        feed_dict[self._training_placeholder] = 1.0  ##
        result = np.array(self.session.run(out_tensors, feed_dict=feed_dict))
        if len(result.shape) == 3:
          result = np.transpose(result, axes=[1, 0, 2])
        if len(transformers) > 0:
          result = undo_transforms(result, transformers)
        results.append(result)
      return np.concatenate(results, axis=0) 
Example 3
Source File: tensor_graph.py    From PADME with MIT License 6 votes vote down vote up
def predict_on_generator(self, generator, transformers=[], outputs=None):
    """
    Parameters
    ----------
    generator: Generator
      Generator that constructs feed dictionaries for TensorGraph.
    transformers: list
      List of dc.trans.Transformers.
    outputs: object
      If outputs is None, then will assume outputs = self.outputs.
      If outputs is a Layer/Tensor, then will evaluate and return as a
      single ndarray. If outputs is a list of Layers/Tensors, will return a list
      of ndarrays.
    Returns:
      y_pred: numpy ndarray of shape (n_samples, n_classes*n_tasks)
    """
    return self._predict(generator, transformers, outputs, False) 
Example 4
Source File: graph_models.py    From PADME with MIT License 6 votes vote down vote up
def predict_proba_on_generator(self, generator, transformers=[]):
    """
            Returns:
              y_pred: numpy ndarray of shape (n_samples, n_classes*n_tasks)
            """
    if not self.built:
      self.build()
    with self._get_tf("Graph").as_default():
      out_tensors = [x.out_tensor for x in self.outputs]
      results = []
      for feed_dict in generator:
        # Extract number of unique samples in the batch from w_b
        n_valid_samples = len(np.nonzero(np.sum(feed_dict[self.weights], 1))[0])
        feed_dict = {
            self.layers[k.name].out_tensor: v
            for k, v in six.iteritems(feed_dict)
        }
        feed_dict[self._training_placeholder] = 0.0
        result = np.array(self.session.run(out_tensors, feed_dict=feed_dict))
        if len(result.shape) == 3:
          result = np.transpose(result, axes=[1, 0, 2])
        result = undo_transforms(result, transformers)
        # Only fetch the first set of unique samples
        results.append(result[:n_valid_samples])
      return np.concatenate(results, axis=0) 
Example 5
Source File: keras_model.py    From deepchem with MIT License 5 votes vote down vote up
def predict_on_generator(self,
                           generator,
                           transformers=[],
                           outputs=None,
                           output_types=None):
    """
    Parameters
    ----------
    generator: generator
      this should generate batches, each represented as a tuple of the form
      (inputs, labels, weights).
    transformers: list of dc.trans.Transformers
      Transformers that the input data has been transformed by.  The output
      is passed through these transformers to undo the transformations.
    outputs: Tensor or list of Tensors
      The outputs to return.  If this is None, the model's
      standard prediction outputs will be returned.
      Alternatively one or more Tensors within the model may be
      specified, in which case the output of those Tensors will
      be returned. If outputs is specified, output_types must be
      None.
    output_types: String or list of Strings
      If specified, all outputs of this type will be retrieved
      from the model. If output_types is specified, outputs must
      be None.
    Returns:
      a NumPy array of the model produces a single output, or a list of arrays
      if it produces multiple outputs
    """
    return self._predict(generator, transformers, outputs, False, output_types) 
Example 6
Source File: models.py    From keras-lambda with MIT License 5 votes vote down vote up
def predict_generator(self, generator, steps,
                          max_queue_size=10, workers=1,
                          use_multiprocessing=False, verbose=0):
        """Generates predictions for the input samples from a data generator.

        The generator should return the same kind of data as accepted by
        `predict_on_batch`.

        # Arguments
            generator: generator yielding batches of input samples.
            steps: Total number of steps (batches of samples)
                to yield from `generator` before stopping.
            max_queue_size: maximum size for the generator queue
            workers: maximum number of processes to spin up
            use_multiprocessing: if True, use process based threading.
                Note that because this implementation
                relies on multiprocessing, you should not pass
                non picklable arguments to the generator
                as they can't be passed easily to children processes.
            verbose: verbosity mode, 0 or 1.

        # Returns
            A Numpy array of predictions.
        """
        if self.model is None:
            self.build()
        return self.model.predict_generator(generator, steps,
                                            max_queue_size=max_queue_size,
                                            workers=workers,
                                            use_multiprocessing=use_multiprocessing,
                                            verbose=verbose) 
Example 7
Source File: model.py    From camera_identification with MIT License 5 votes vote down vote up
def predict_generator(self, generator):
        self.enable_predict_mode()
        result = []
        start_time = time.time()
        for step_no, X in enumerate(generator):
            if isinstance(X, (tuple, list)):
                X = X[0]

            y_pred = self.predict_on_batch(X)
            result.append(y_pred)
            print("[{1} s] Predict step {0}".format(step_no, time.time() - start_time))

        return np.concatenate(result) 
Example 8
Source File: models.py    From KerasNeuralFingerprint with MIT License 5 votes vote down vote up
def predict_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
        '''Generates predictions for the input samples from a data generator.
        The generator should return the same kind of data as accepted by
        `predict_on_batch`.

        # Arguments
            generator: generator yielding batches of input samples.
            val_samples: total number of samples to generate from `generator`
                before returning.
            max_q_size: maximum size for the generator queue
            nb_worker: maximum number of processes to spin up
            pickle_safe: if True, use process based threading. Note that because
                this implementation relies on multiprocessing, you should not pass non
                non picklable arguments to the generator as they can't be passed
                easily to children processes.

        # Returns
            A Numpy array of predictions.
        '''
        if self.model is None:
            self.build()
        if nb_worker > 1 and not pickle_safe:
            warnings.warn('The "nb_worker" argument is deprecated when pickle_safe is False')
            nb_worker = 1  # For backward compatibility
        return self.model.predict_generator(generator, val_samples,
                                            max_q_size=max_q_size,
                                            nb_worker=nb_worker,
                                            pickle_safe=pickle_safe) 
Example 9
Source File: predict.py    From sign-language with MIT License 5 votes vote down vote up
def predict_onfeature_generator(sFeatureDir:str, sModelPath:str, oClasses:VideoClasses, nBatchSize:int = 16):    
    """ Predict labels for all features in given directory on saved model 
    
    Returns 
        fAccuracy
        arPredictions (dim = nSamples)
        arProbabilities (dim = (nSamples, nClasses)) 
        list of labels (groundtruth)
    """
        
    # load the I3D top network
    print("Load model %s ..." % sModelPath)
    keModel = keras.models.load_model(sModelPath)

    # Load video features
    genFeatures = FeaturesGenerator(sFeatureDir, nBatchSize,
        keModel.input_shape[1:], oClasses.liClasses, bShuffle = False)
    if genFeatures.nSamples == 0: raise ValueError("No feature files detected, prediction stopped")

    # predict
    print("Predict with generator on %s ..." % sFeatureDir)
    arProba = keModel.predict_generator(
        generator = genFeatures, 
        workers = 1,                 
        use_multiprocessing = False,
        verbose = 1)
    if arProba.shape[0] != genFeatures.nSamples: raise ValueError("Unexpected number of predictions")

    arPred = arProba.argmax(axis=1)
    liLabels = list(genFeatures.dfSamples.sLabel)
    fAcc = np.mean(liLabels == oClasses.dfClass.loc[arPred, "sClass"])
    
    return fAcc, arPred, arProba, liLabels 
Example 10
Source File: models.py    From deepQuest with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def predict_generator(self, generator, steps,
                          max_queue_size=10, workers=1,
                          use_multiprocessing=False, verbose=0):
        """Generates predictions for the input samples from a data generator.

        The generator should return the same kind of data as accepted by
        `predict_on_batch`.

        # Arguments
            generator: generator yielding batches of input samples.
            steps: Total number of steps (batches of samples)
                to yield from `generator` before stopping.
            max_queue_size: maximum size for the generator queue
            workers: maximum number of processes to spin up
            use_multiprocessing: if True, use process based threading.
                Note that because this implementation
                relies on multiprocessing, you should not pass
                non picklable arguments to the generator
                as they can't be passed easily to children processes.
            verbose: verbosity mode, 0 or 1.

        # Returns
            A Numpy array of predictions.
        """
        if not self.built:
            self.build()
        return self.model.predict_generator(generator, steps,
                                            max_queue_size=max_queue_size,
                                            workers=workers,
                                            use_multiprocessing=use_multiprocessing,
                                            verbose=verbose) 
Example 11
Source File: image_classifier.py    From imageatm with Apache License 2.0 5 votes vote down vote up
def predict_generator(self, data_generator: DataGenerator, **kwargs) -> History:
        """
        Generates predictions for the input samples from a data generator.

        Args:
            data_generator: Input samples from a data generator.
            workers: Maximum number of processes.
            use_multiprocessing: Use process based threading.
            verbose: Verbosity mode.

        Returns:
            history: A `History` object.
        """
        return self.model.predict_generator(data_generator, **kwargs) 
Example 12
Source File: models.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def predict_generator(self, generator, steps=None,
                          max_queue_size=10, workers=1,
                          use_multiprocessing=False, verbose=0):
        """Generates predictions for the input samples from a data generator.

        The generator should return the same kind of data as accepted by
        `predict_on_batch`.

        # Arguments
            generator: generator yielding batches of input samples.
            steps: Total number of steps (batches of samples)
                to yield from `generator` before stopping.
                Optional for `Sequence`: if unspecified, will use
                the `len(generator)` as a number of steps.
            max_queue_size: maximum size for the generator queue
            workers: maximum number of processes to spin up
            use_multiprocessing: if True, use process based threading.
                Note that because this implementation
                relies on multiprocessing, you should not pass
                non picklable arguments to the generator
                as they can't be passed easily to children processes.
            verbose: verbosity mode, 0 or 1.

        # Returns
            A Numpy array of predictions.
        """
        if not self.built:
            self.build()
        return self.model.predict_generator(generator, steps,
                                            max_queue_size=max_queue_size,
                                            workers=workers,
                                            use_multiprocessing=use_multiprocessing,
                                            verbose=verbose) 
Example 13
Source File: predict.py    From U-Time with MIT License 5 votes vote down vote up
def predict_on_generator(model, generator, argmax=False):
    """
    Takes a tf.keras model and uses it to predict on all batches in a generator
    Stacks the predictions over all batches on axis 0 (vstack)

    Args:
        model:      A tf.keras module instance. Should accept batches as output
                    from 'generator'
        generator:  A generator object yielding one or more batches of data to
                    predict on
        argmax:     Whether to return argmax values or model output values

    Returns:
        If argmax is true, returns integer predictions of shape [-1, 1].
        Otherwise, returns floating values of shape [-1, n_classes]
    """
    pred = []
    end_of_data = False
    while not end_of_data:
        try:
            X_batch, _ = next(generator)
        except StopIteration:
            end_of_data = True
        else:
            # Predict
            pred_batch = model.predict_on_batch(X_batch)
            if argmax:
                pred_batch = pred_batch.argmax(-1).reshape(-1, 1)
            pred.append(pred_batch)
    return np.vstack(pred) 
Example 14
Source File: modelwrapper.py    From baal with Apache License 2.0 5 votes vote down vote up
def predict_on_dataset_generator(self, dataset: Dataset, batch_size: int, iterations: int,
                                     use_cuda: bool, workers: int = 4,
                                     collate_fn: Optional[Callable] = None, half=False):
        """
        Use the model to predict on a dataset `iterations` time.

        Args:
            dataset (Dataset): Dataset to predict on.
            batch_size (int):  Batch size to use during prediction.
            iterations (int): Number of iterations per sample.
            use_cuda (bool): Use CUDA or not.
            workers (int): Number of workers to use.
            collate_fn (Optional[Callable]): The collate function to use.
            half (bool): If True use half precision.

        Notes:
            The "batch" is made of `batch_size` * `iterations` samples.

        Returns:
            Generators [batch_size, n_classes, ..., n_iterations].
        """
        self.eval()
        if len(dataset) == 0:
            return None

        log.info("Start Predict", dataset=len(dataset))
        collate_fn = collate_fn or default_collate
        loader = DataLoader(dataset,
                            batch_size,
                            False, num_workers=workers,
                            collate_fn=collate_fn)
        for idx, (data, _) in enumerate(tqdm(loader, total=len(loader), file=sys.stdout)):

            pred = self.predict_on_batch(data, iterations, use_cuda)
            pred = map_on_tensor(lambda x: x.detach(), pred)
            if half:
                pred = map_on_tensor(lambda x: x.half(), pred)
            yield map_on_tensor(lambda x: x.cpu().numpy(), pred) 
Example 15
Source File: models.py    From lambda-packs with MIT License 5 votes vote down vote up
def predict_generator(self,
                        generator,
                        steps,
                        max_q_size=10,
                        workers=1,
                        pickle_safe=False,
                        verbose=0):
    """Generates predictions for the input samples from a data generator.

    The generator should return the same kind of data as accepted by
    `predict_on_batch`.

    Arguments:
        generator: generator yielding batches of input samples.
        steps: Total number of steps (batches of samples)
            to yield from `generator` before stopping.
        max_q_size: maximum size for the generator queue
        workers: maximum number of processes to spin up
        pickle_safe: if True, use process based threading.
            Note that because this implementation
            relies on multiprocessing, you should not pass
            non picklable arguments to the generator
            as they can't be passed easily to children processes.
        verbose: verbosity mode, 0 or 1.

    Returns:
        A Numpy array of predictions.
    """
    if self.model is None:
      self.build()
    return self.model.predict_generator(
        generator,
        steps,
        max_q_size=max_q_size,
        workers=workers,
        pickle_safe=pickle_safe,
        verbose=verbose) 
Example 16
Source File: model.py    From poutyne with GNU Lesser General Public License v3.0 4 votes vote down vote up
def predict_generator(self, generator, *, steps=None, concatenate_returns=None):
        """
        Returns the predictions of the network given batches of samples ``x``, where the tensors are
        converted into Numpy arrays.

        generator: Generator-like object for the dataset. The generator must yield a batch of
            samples. See the :func:`fit_generator()` method for details on the types of generators
            supported. This should only yield input data ``x`` and not the target ``y``.
        steps (int, optional): Number of iterations done on ``generator``.
            (Defaults the number of steps needed to see the entire dataset)
        concatenate_returns (bool, optional): Whether to concatenate the predictions
            or the ground truths when returning them. Currently defaults to False but
            will default to True in the next version. A warning is raised if not set in
            the current version but the warning will be removed in the version. Disabling
            the warning as instructed in it switches to the new behavior when
            ``concatenate_returns`` is not set.

        Returns:
            List of the predictions of each batch with tensors converted into Numpy arrays.
        """
        if concatenate_returns is None and warning_settings['concatenate_returns'] == 'warn':
            warnings.warn("In the next version of Poutyne, the argument 'concatenate_returns' "
                          "of 'predict_generator' will default to True. To avoid this warning, "
                          "set 'concatenate_returns' to an appropriate boolean value in the "
                          "keyword arguments or get the new behavior by disabling this warning with\n"
                          "from poutyne.framework import warning_settings\n"
                          "warning_settings['concatenate_returns'] = 'ignore'\n"
                          "This warning will be removed in the next version.")
            concatenate_returns = False
        elif concatenate_returns is None:
            concatenate_returns = True

        if steps is None and hasattr(generator, '__len__'):
            steps = len(generator)
        pred_y = []
        with self._set_training_mode(False):
            for _, x in _get_step_iterator(steps, generator):
                x = self._process_input(x)
                x = x if isinstance(x, (tuple, list)) else (x, )
                pred_y.append(torch_to_numpy(self.network(*x)))
        if concatenate_returns:
            return _concat(pred_y)
        return pred_y 
Example 17
Source File: utils.py    From deepcpg with MIT License 4 votes vote down vote up
def predict_generator(model, generator, nb_sample=None):
    """Predict model outputs using generator.

    Calls `model.predict` for at most `nb_sample` samples from `generator`.

    Parameters
    ----------
    model: Keras model
        Model to be evaluated.
    generator: generator
        Data generator.
    nb_sample: int
        Maximum number of samples.

    Returns
    -------
    list
        list [`inputs`, `outputs`, `predictions`].
    """
    data = None
    nb_seen = 0
    for data_batch in generator:
        if not isinstance(data_batch, list):
            data_batch = list(data_batch)

        if nb_sample:
            # Reduce batch size if needed
            nb_left = nb_sample - nb_seen
            for data_item in data_batch:
                for key, value in data_item.items():
                    data_item[key] = data_item[key][:nb_left]

        preds = model.predict(data_batch[0])
        if not isinstance(preds, list):
            preds = [preds]
        preds = {name: pred for name, pred in zip(model.output_names, preds)}

        if not data:
            data = [dict() for i in range(len(data_batch))]
        dat.add_to_dict(preds, data[0])
        for i in range(1, len(data_batch)):
            dat.add_to_dict(data_batch[i], data[i])

        nb_seen += len(list(preds.values())[0])
        if nb_sample and nb_seen >= nb_sample:
            break

    for i in range(len(data)):
        data[i] = dat.stack_dict(data[i])
    return data 
Example 18
Source File: training.py    From GraphicDesignPatternByPython with MIT License 4 votes vote down vote up
def predict_generator(self, generator,
                          steps=None,
                          max_queue_size=10,
                          workers=1,
                          use_multiprocessing=False,
                          verbose=0):
        """Generates predictions for the input samples from a data generator.

        The generator should return the same kind of data as accepted by
        `predict_on_batch`.

        # Arguments
            generator: Generator yielding batches of input samples
                or an instance of Sequence (keras.utils.Sequence)
                object in order to avoid duplicate data
                when using multiprocessing.
            steps: Total number of steps (batches of samples)
                to yield from `generator` before stopping.
                Optional for `Sequence`: if unspecified, will use
                the `len(generator)` as a number of steps.
            max_queue_size: Maximum size for the generator queue.
            workers: Integer. Maximum number of processes to spin up
                when using process based threading.
                If unspecified, `workers` will default to 1. If 0, will
                execute the generator on the main thread.
            use_multiprocessing: If `True`, use process based threading.
                Note that because
                this implementation relies on multiprocessing,
                you should not pass
                non picklable arguments to the generator
                as they can't be passed
                easily to children processes.
            verbose: verbosity mode, 0 or 1.

        # Returns
            Numpy array(s) of predictions.

        # Raises
            ValueError: In case the generator yields
                data in an invalid format.
        """
        return training_generator.predict_generator(
            self, generator,
            steps=steps,
            max_queue_size=max_queue_size,
            workers=workers,
            use_multiprocessing=use_multiprocessing,
            verbose=verbose) 
Example 19
Source File: models.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 4 votes vote down vote up
def predict_generator(self,
                        generator,
                        steps,
                        max_queue_size=10,
                        workers=1,
                        use_multiprocessing=False,
                        verbose=0,
                        **kwargs):
    """Generates predictions for the input samples from a data generator.

    The generator should return the same kind of data as accepted by
    `predict_on_batch`.

    Arguments:
        generator: generator yielding batches of input samples.
        steps: Total number of steps (batches of samples)
            to yield from `generator` before stopping.
        max_queue_size: maximum size for the generator queue
        workers: maximum number of processes to spin up
        use_multiprocessing: if True, use process based threading.
            Note that because this implementation
            relies on multiprocessing, you should not pass
            non picklable arguments to the generator
            as they can't be passed easily to children processes.
        verbose: verbosity mode, 0 or 1.
        **kwargs: support for legacy arguments.

    Returns:
        A Numpy array of predictions.

    Raises:
        ValueError: In case the generator yields
            data in an invalid format.
    """
    # Legacy support
    if 'max_q_size' in kwargs:
      max_queue_size = kwargs.pop('max_q_size')
      logging.warning('The argument `max_q_size` has been renamed '
                      '`max_queue_size`. Update your method calls accordingly.')
    if 'pickle_safe' in kwargs:
      use_multiprocessing = kwargs.pop('pickle_safe')
      logging.warning('The argument `pickle_safe` has been renamed '
                      '`use_multiprocessing`. '
                      'Update your method calls accordingly.')
    if kwargs:
      raise ValueError('Unrecognized keyword arguments: ' + str(kwargs))

    if not self.built:
      self.build()
    return self.model.predict_generator(
        generator,
        steps,
        max_queue_size=max_queue_size,
        workers=workers,
        use_multiprocessing=use_multiprocessing,
        verbose=verbose) 
Example 20
Source File: gan.py    From deepchem with MIT License 4 votes vote down vote up
def predict_gan_generator(self,
                            batch_size=1,
                            noise_input=None,
                            conditional_inputs=[],
                            generator_index=0):
    """Use the GAN to generate a batch of samples.

    Parameters
    ----------
    batch_size: int
      the number of samples to generate.  If either noise_input or
      conditional_inputs is specified, this argument is ignored since the batch
      size is then determined by the size of that argument.
    noise_input: array
      the value to use for the generator's noise input.  If None (the default),
      get_noise_batch() is called to generate a random input, so each call will
      produce a new set of samples.
    conditional_inputs: list of arrays
      the values to use for all conditional inputs.  This must be specified if
      the GAN has any conditional inputs.
    generator_index: int
      the index of the generator (between 0 and n_generators-1) to use for
      generating the samples.

    Returns
    -------
    An array (if the generator has only one output) or list of arrays (if it has
    multiple outputs) containing the generated samples.
    """
    if noise_input is not None:
      batch_size = len(noise_input)
    elif len(conditional_inputs) > 0:
      batch_size = len(conditional_inputs[0])
    if noise_input is None:
      noise_input = self.get_noise_batch(batch_size)
    inputs = [noise_input]
    inputs += conditional_inputs
    inputs = [i.astype(np.float32) for i in inputs]
    pred = self.generators[generator_index](
        _list_or_tensor(inputs), training=False)
    pred = pred.numpy()
    return pred