Python mxnet.ndarray.argmax() Examples

The following are 17 code examples of mxnet.ndarray.argmax(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.ndarray , or try the search function .
Example #1
Source File: accuracy.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        """Updates the internal evaluation result.
        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data with class indices as values, one per sample.
        preds : list of `NDArray`
            Prediction values for samples. Each prediction value can either be the class index,
            or a vector of likelihoods for all classes.
        """
        labels, preds = check_label_shapes(labels, preds, True)

        for label, pred_label in zip(labels, preds):
            if pred_label.shape != label.shape:
                pred_label = ndarray.argmax(pred_label, axis=self.axis)
            pred_label = pred_label.asnumpy().astype('int32')
            label = label.asnumpy().astype('int32')

            labels, preds = check_label_shapes(label, pred_label)

            valid = (labels.reshape(-1, 1) != self.ignore_labels).all(axis=-1)

            self.sum_metric += np.logical_and(pred_label.flat == label.flat, valid).sum()
            self.num_inst += np.sum(valid) 
Example #2
Source File: accuracy.py    From panoptic-fpn-gluon with Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        """Updates the internal evaluation result.
        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data with class indices as values, one per sample.
        preds : list of `NDArray`
            Prediction values for samples. Each prediction value can either be the class index,
            or a vector of likelihoods for all classes.
        """
        labels, preds = check_label_shapes(labels, preds, True)

        for label, pred_label in zip(labels, preds):
            if pred_label.shape != label.shape:
                pred_label = ndarray.argmax(pred_label, axis=self.axis)
            pred_label = pred_label.asnumpy().astype('int32')
            label = label.asnumpy().astype('int32')

            labels, preds = check_label_shapes(label, pred_label)

            valid = (labels.reshape(-1, 1) != self.ignore_labels).all(axis=-1)

            self.sum_metric += np.logical_and(pred_label.flat == label.flat, valid).sum()
            self.num_inst += np.sum(valid) 
Example #3
Source File: test_gluon_model_export.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def test_model_save_load(gluon_model, model_data, model_path):
    _, _, test_data = model_data
    expected = nd.argmax(gluon_model(test_data), axis=1)

    mlflow.gluon.save_model(gluon_model, model_path)
    # Loading Gluon model
    model_loaded = mlflow.gluon.load_model(model_path, ctx.cpu())
    actual = nd.argmax(model_loaded(test_data), axis=1)
    assert all(expected == actual)
    # Loading pyfunc model
    pyfunc_loaded = mlflow.pyfunc.load_model(model_path)
    test_pyfunc_data = pd.DataFrame(test_data.asnumpy())
    pyfunc_preds = pyfunc_loaded.predict(test_pyfunc_data)
    assert all(
        np.argmax(pyfunc_preds.values, axis=1)
        == expected.asnumpy()) 
Example #4
Source File: test_gluon_model_export.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def test_model_log_load(gluon_model, model_data, model_path):
    _, _, test_data = model_data
    expected = nd.argmax(gluon_model(test_data), axis=1)

    artifact_path = "model"
    with mlflow.start_run():
        mlflow.gluon.log_model(gluon_model, artifact_path=artifact_path)
        model_uri = "runs:/{run_id}/{artifact_path}".format(
            run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path)

    # Loading Gluon model
    model_loaded = mlflow.gluon.load_model(model_uri, ctx.cpu())
    actual = nd.argmax(model_loaded(test_data), axis=1)
    assert all(expected == actual)
    # Loading pyfunc model
    pyfunc_loaded = mlflow.pyfunc.load_model(model_uri)
    test_pyfunc_data = pd.DataFrame(test_data.asnumpy())
    pyfunc_preds = pyfunc_loaded.predict(test_pyfunc_data)
    assert all(
        np.argmax(pyfunc_preds.values, axis=1)
        == expected.asnumpy()) 
Example #5
Source File: test_gluon_model_export.py    From mlflow with Apache License 2.0 6 votes vote down vote up
def test_gluon_model_serving_and_scoring_as_pyfunc(gluon_model, model_data):
    _, _, test_data = model_data
    expected = nd.argmax(gluon_model(test_data), axis=1)

    artifact_path = "model"
    with mlflow.start_run():
        mlflow.gluon.log_model(gluon_model, artifact_path=artifact_path)
        model_uri = "runs:/{run_id}/{artifact_path}".format(
            run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path)

    scoring_response = pyfunc_serve_and_score_model(
        model_uri=model_uri,
        data=pd.DataFrame(test_data.asnumpy()),
        content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
    response_values = \
        pd.read_json(scoring_response.content, orient="records").values.astype(np.float32)
    assert all(
        np.argmax(response_values, axis=1)
        == expected.asnumpy()) 
Example #6
Source File: accuracy.py    From cascade_rcnn_gluon with Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        """Updates the internal evaluation result.
        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data with class indices as values, one per sample.
        preds : list of `NDArray`
            Prediction values for samples. Each prediction value can either be the class index,
            or a vector of likelihoods for all classes.
        """
        labels, preds = check_label_shapes(labels, preds, True)

        for label, pred_label in zip(labels, preds):
            if pred_label.shape != label.shape:
                pred_label = ndarray.argmax(pred_label, axis=self.axis)
            pred_label = pred_label.asnumpy().astype('int32')
            label = label.asnumpy().astype('int32')

            labels, preds = check_label_shapes(label, pred_label)

            valid = (labels.reshape(-1, 1) != self.ignore_labels).all(axis=-1)

            self.sum_metric += np.logical_and(pred_label.flat == label.flat, valid).sum()
            self.num_inst += np.sum(valid) 
Example #7
Source File: voc_segmentation.py    From cascade_rcnn_gluon with Apache License 2.0 6 votes vote down vote up
def batch_intersection_union(output, target, nclass):
    """mIoU"""
    # inputs are NDarray, output 4D, target 3D
    predict = F.argmax(output, 1)
    target = target.astype(predict.dtype)
    mini = 1
    maxi = nclass
    nbins = nclass
    predict = predict.asnumpy() + 1
    target = target.asnumpy() + 1

    predict = predict * (target > 0).astype(predict.dtype)
    #intersection = predict * (F.equal(predict, target)).astype(predict.dtype)
    intersection = predict * (predict == target)
    # areas of intersection and union
    area_inter, _ = np.histogram(intersection, bins=nbins, range=(mini, maxi))
    area_pred, _ = np.histogram(predict, bins=nbins, range=(mini, maxi))
    area_lab, _ = np.histogram(target, bins=nbins, range=(mini, maxi))
    area_union = area_pred + area_lab - area_inter
    return area_inter, area_union 
Example #8
Source File: lstm_crf.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def _viterbi_decode(self, feats):
        backpointers = []

        # Initialize the viterbi variables in log space
        vvars = nd.full((1, self.tagset_size), -10000.)
        vvars[0, self.tag2idx[START_TAG]] = 0

        for feat in feats:
            bptrs_t = []  # holds the backpointers for this step
            viterbivars_t = []  # holds the viterbi variables for this step

            for next_tag in range(self.tagset_size):
                # next_tag_var[i] holds the viterbi variable for tag i at the
                # previous step, plus the score of transitioning
                # from tag i to next_tag.
                # We don't include the emission scores here because the max
                # does not depend on them (we add them in below)
                next_tag_var = vvars + self.transitions[next_tag]
                best_tag_id = argmax(next_tag_var)
                bptrs_t.append(best_tag_id)
                viterbivars_t.append(next_tag_var[0, best_tag_id])
            # Now add in the emission scores, and assign vvars to the set
            # of viterbi variables we just computed
            vvars = (nd.concat(*viterbivars_t, dim=0) + feat).reshape((1, -1))
            backpointers.append(bptrs_t)

        # Transition to STOP_TAG
        terminal_var = vvars + self.transitions[self.tag2idx[STOP_TAG]]
        best_tag_id = argmax(terminal_var)
        path_score = terminal_var[0, best_tag_id]

        # Follow the back pointers to decode the best path.
        best_path = [best_tag_id]
        for bptrs_t in reversed(backpointers):
            best_tag_id = bptrs_t[best_tag_id]
            best_path.append(best_tag_id)
        # Pop off the start tag (we dont want to return that to the caller)
        start = best_path.pop()
        assert start == self.tag2idx[START_TAG]  # Sanity check
        best_path.reverse()
        return path_score, best_path 
Example #9
Source File: lstm_crf.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def argmax(vec):
    # return the argmax as a python int
    idx = nd.argmax(vec, axis=1)
    return to_scalar(idx) 
Example #10
Source File: lstm_crf.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def argmax(vec):
    # return the argmax as a python int
    idx = nd.argmax(vec, axis=1)
    return to_scalar(idx) 
Example #11
Source File: model.py    From NER_BiLSTM_CRF_Chinese with Apache License 2.0 5 votes vote down vote up
def _viterbi_decode(self, feats):
        backpointers = []

        vvars = nd.full((1, self.tagset_size), -10000.,ctx=self.ctx)
        vvars[0, self.tag2idx[self.START_TAG]] = 0

        for feat in feats:
            bptrs_t = []
            viterbivars_t = []

            for next_tag in range(self.tagset_size):

                next_tag_var = vvars + self.transitions[next_tag]
                best_tag_id = argmax(next_tag_var)
                bptrs_t.append(best_tag_id)
                viterbivars_t.append(next_tag_var[0, best_tag_id])

            vvars = (nd.concat(*viterbivars_t, dim=0) + feat).reshape((1, -1))
            backpointers.append(bptrs_t)

        terminal_var = vvars + self.transitions[self.tag2idx[self.STOP_TAG]]
        best_tag_id = argmax(terminal_var)
        path_score = terminal_var[0, best_tag_id]

        best_path = [best_tag_id]
        for bptrs_t in reversed(backpointers):
            best_tag_id = bptrs_t[best_tag_id]
            best_path.append(best_tag_id)
        start = best_path.pop()
        assert start == self.tag2idx[self.START_TAG]
        best_path.reverse()
        return path_score, best_path 
Example #12
Source File: model.py    From NER_BiLSTM_CRF_Chinese with Apache License 2.0 5 votes vote down vote up
def argmax(vec):
    idx = nd.argmax(vec, axis=1)
    return to_scalar(idx) 
Example #13
Source File: voc_segmentation.py    From cascade_rcnn_gluon with Apache License 2.0 5 votes vote down vote up
def batch_pix_accuracy(output, target):
    """PixAcc"""
    # inputs are NDarray, output 4D, target 3D
    predict = F.argmax(output, 1)
    predict = predict.asnumpy() + 1
    target = target.asnumpy().astype(predict.dtype) + 1
    pixel_labeled = np.sum(target > 0)
    pixel_correct = np.sum((predict == target)*(target > 0))
    assert pixel_correct <= pixel_labeled, "Correct area should be smaller than Labeled"
    return pixel_correct, pixel_labeled 
Example #14
Source File: bnn_repurposer.py    From xfer with Apache License 2.0 5 votes vote down vote up
def _evaluate_accuracy(self, data_iterator, net, layer_params):
        numerator = 0.
        denominator = 0.
        for i, (data, label) in enumerate(data_iterator):
            data = data.as_in_context(self._context_bnn).reshape((-1, data.shape[1]))
            label = label.as_in_context(self._context_bnn)
            replace_params_net(layer_params, net, self._context_bnn)
            output = net(data)
            predictions = nd.argmax(output, axis=1)
            numerator += nd.sum(predictions == label)
            denominator += data.shape[0]
        return (numerator / denominator).asscalar() 
Example #15
Source File: lstm_crf.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def _viterbi_decode(self, feats):
        backpointers = []

        # Initialize the viterbi variables in log space
        vvars = nd.full((1, self.tagset_size), -10000.)
        vvars[0, self.tag2idx[START_TAG]] = 0

        for feat in feats:
            bptrs_t = []  # holds the backpointers for this step
            viterbivars_t = []  # holds the viterbi variables for this step

            for next_tag in range(self.tagset_size):
                # next_tag_var[i] holds the viterbi variable for tag i at the
                # previous step, plus the score of transitioning
                # from tag i to next_tag.
                # We don't include the emission scores here because the max
                # does not depend on them (we add them in below)
                next_tag_var = vvars + self.transitions.data()[next_tag]
                best_tag_id = argmax(next_tag_var)
                bptrs_t.append(best_tag_id)
                viterbivars_t.append(next_tag_var[0, best_tag_id])
            # Now add in the emission scores, and assign vvars to the set
            # of viterbi variables we just computed
            vvars = (nd.concat(*viterbivars_t, dim=0) + feat).reshape((1, -1))
            backpointers.append(bptrs_t)

        # Transition to STOP_TAG
        terminal_var = vvars + self.transitions.data()[self.tag2idx[STOP_TAG]]
        best_tag_id = argmax(terminal_var)
        path_score = terminal_var[0, best_tag_id]

        # Follow the back pointers to decode the best path.
        best_path = [best_tag_id]
        for bptrs_t in reversed(backpointers):
            best_tag_id = bptrs_t[best_tag_id]
            best_path.append(best_tag_id)
        # Pop off the start tag (we dont want to return that to the caller)
        start = best_path.pop()
        assert start == self.tag2idx[START_TAG]  # Sanity check
        best_path.reverse()
        return path_score, best_path 
Example #16
Source File: lstm_crf.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def argmax(vec):
    # return the argmax as a python int
    idx = nd.argmax(vec, axis=1)
    return to_scalar(idx) 
Example #17
Source File: lstm_crf.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def _viterbi_decode(self, feats):
        backpointers = []

        # Initialize the viterbi variables in log space
        vvars = nd.full((1, self.tagset_size), -10000.)
        vvars[0, self.tag2idx[START_TAG]] = 0

        for feat in feats:
            bptrs_t = []  # holds the backpointers for this step
            viterbivars_t = []  # holds the viterbi variables for this step

            for next_tag in range(self.tagset_size):
                # next_tag_var[i] holds the viterbi variable for tag i at the
                # previous step, plus the score of transitioning
                # from tag i to next_tag.
                # We don't include the emission scores here because the max
                # does not depend on them (we add them in below)
                next_tag_var = vvars + self.transitions.data()[next_tag]
                best_tag_id = argmax(next_tag_var)
                bptrs_t.append(best_tag_id)
                viterbivars_t.append(next_tag_var[0, best_tag_id])
            # Now add in the emission scores, and assign vvars to the set
            # of viterbi variables we just computed
            vvars = (nd.concat(*viterbivars_t, dim=0) + feat).reshape((1, -1))
            backpointers.append(bptrs_t)

        # Transition to STOP_TAG
        terminal_var = vvars + self.transitions.data()[self.tag2idx[STOP_TAG]]
        best_tag_id = argmax(terminal_var)
        path_score = terminal_var[0, best_tag_id]

        # Follow the back pointers to decode the best path.
        best_path = [best_tag_id]
        for bptrs_t in reversed(backpointers):
            best_tag_id = bptrs_t[best_tag_id]
            best_path.append(best_tag_id)
        # Pop off the start tag (we dont want to return that to the caller)
        start = best_path.pop()
        assert start == self.tag2idx[START_TAG]  # Sanity check
        best_path.reverse()
        return path_score, best_path