Python scipy.special.softmax() Examples
The following are 17
code examples of scipy.special.softmax().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy.special
, or try the search function
.
Example #1
Source File: array_utils.py From baal with Apache License 2.0 | 6 votes |
def to_prob(probabilities: np.ndarray): """ If the probabilities array is not a distrubution will softmax it. Args: probabilities (array): [batch_size, num_classes, ...] Returns: Same as probabilities. """ not_bounded = np.min(probabilities) < 0 or np.max(probabilities) > 1.0 multiclass = probabilities.shape[1] > 1 sum_to_one = np.allclose(probabilities.sum(1), 1) if not_bounded or (multiclass and not sum_to_one): if multiclass: probabilities = softmax(probabilities, 1) else: probabilities = expit(probabilities) return probabilities
Example #2
Source File: context_search.py From axcell with Apache License 2.0 | 6 votes |
def match(self, contexts): assert len(contexts) == len(self.context_noise) n = len(self._taxonomy) context_logprobs = np.zeros(n) axes_context_logprobs = _to_typed_list([ np.zeros(len(self._taxonomy_tasks)), np.zeros(len(self._taxonomy_datasets)), np.zeros(len(self._taxonomy_metrics)), ]) for context, noise, ms_noise, ts_noise in zip(contexts, self.context_noise, self.metrics_noise, self.task_noise): self.compute_context_logprobs(context, noise, ms_noise, ts_noise, context_logprobs, axes_context_logprobs) keys = self.taxonomy.taxonomy logprobs = context_logprobs #keys, logprobs = zip(*context_logprobs.items()) probs = softmax(np.array(logprobs)) axes_probs = [softmax(np.array(a)) for a in axes_context_logprobs] return ( zip(keys, probs), zip(self._taxonomy_tasks, axes_probs[0]), zip(self._taxonomy_datasets, axes_probs[1]), zip(self._taxonomy_metrics, axes_probs[2]) )
Example #3
Source File: run_inference.py From nlp_projects with MIT License | 5 votes |
def inference(args, model, tokenizer, prefix=""): inf_task = args.task_name inf_dataset = load_example(args, inf_task, tokenizer) inf_sampler = SequentialSampler(inf_dataset) inf_dataloader = DataLoader(inf_dataset, sampler=inf_sampler, batch_size=1) # Inference! logger.info("***** Running inference {} *****".format(prefix)) preds = None out_label_ids = None for batch in tqdm(inf_dataloader, desc="Inferencing"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids 'labels': batch[3]} outputs = model(**inputs) inf_loss, logits = outputs[:2] pred_arr = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() logger.info("pred_arr: %s", pred_arr) pred_prob = np.squeeze(softmax(pred_arr, axis=1)) logger.info("[0]: %s, [1]: %s", pred_prob[0], pred_prob[1]) if args.output_mode == "classification": pred = np.argmax(pred_arr, axis=1) elif args.output_mode == "regression": pred = np.squeeze(pred_arr) if pred == 0: logger.info("Text is negative with confidence: %d ", pred_prob[0]*100) else: logger.info("Text is positive with confidence: %d ", pred_prob[1]*100)
Example #4
Source File: test_listnet.py From allRank with Apache License 2.0 | 5 votes |
def test_listnet_stable_for_very_small_prediction(): y_pred = [0.5, -1e30] y_true = [1.0, 0.0] result = listNet_wrap(y_pred, y_true) expected = - np.sum(softmax(y_true) * np.log(softmax(y_pred) + DEFAULT_EPS)) assert not math.isnan(result) and not math.isinf(result) assert (result == approx(expected))
Example #5
Source File: test_listnet.py From allRank with Apache License 2.0 | 5 votes |
def test_listnet_simple(): y_pred = [0.5, 0.2] y_true = [1.0, 0.0] result = listNet_wrap(y_pred, y_true, eps=0.0) expected = - np.sum(softmax(y_true) * np.log(softmax(y_pred))) assert not math.isnan(result) and not math.isinf(result) assert (result == approx(expected))
Example #6
Source File: test_binary_listnet.py From allRank with Apache License 2.0 | 5 votes |
def test_binary_listnet_ignores_padded_value(): y_pred = [0.5, 0.2, 0.5] y_true = [1.0, 0.0, PADDED_Y_VALUE] result = binary_listNet_wrap(y_pred, y_true) expected = - np.sum(y_true[:2] * np.log(softmax(y_pred[:2]) + DEFAULT_EPS)) assert not math.isnan(result) and not math.isinf(result) assert (result == approx(expected))
Example #7
Source File: test_binary_listnet.py From allRank with Apache License 2.0 | 5 votes |
def test_binary_listnet_stable_for_very_small_prediction(): y_pred = [0.5, -1e30] y_true = [1.0, 0.0] result = binary_listNet_wrap(y_pred, y_true) expected = - np.sum(y_true * np.log(softmax(y_pred) + DEFAULT_EPS)) assert not math.isnan(result) and not math.isinf(result) assert (result == approx(expected, abs=1e-9))
Example #8
Source File: test_binary_listnet.py From allRank with Apache License 2.0 | 5 votes |
def test_binary_listnet_simple(): y_pred = [0.5, 0.2] y_true = [1.0, 0.0] result = binary_listNet_wrap(y_pred, y_true, eps=0.0) expected = - np.sum(y_true * np.log(softmax(y_pred))) assert not math.isnan(result) and not math.isinf(result) assert (result == approx(expected))
Example #9
Source File: metrics.py From mt-dnn with MIT License | 5 votes |
def metric_multi_accuracy(logits, labels, options_num): logits = np.reshape(softmax(logits, -1)[:,1], (len(logits)//options_num, options_num)) labels = np.argmax(np.reshape(labels, (len(labels)//options_num, options_num)),-1) return metric_accuracy(logits, labels)
Example #10
Source File: data_generator.py From leaf with BSD 2-Clause "Simplified" License | 5 votes |
def _generate_y(self, x, cluster_mean): model_info = np.random.normal(loc=cluster_mean, scale=0.1, size=cluster_mean.shape) w = np.matmul(self.Q, model_info) num_samples = x.shape[0] prob = softmax(np.matmul(x, w) + np.random.normal(loc=0., scale=0.1, size=(num_samples, self.num_classes)), axis=1) y = np.argmax(prob, axis=1) return y, w, model_info
Example #11
Source File: helpers.py From AIF360 with Apache License 2.0 | 5 votes |
def get_xhat_y_hat(prototypes, w, x): M = softmax(-cdist(x, prototypes), axis=1) x_hat = np.matmul(M, prototypes) y_hat = np.clip( np.matmul(M, w.reshape((-1, 1))), np.finfo(float).eps, 1.0 - np.finfo(float).eps ) return M, x_hat, y_hat
Example #12
Source File: metrics.py From kglib with Apache License 2.0 | 5 votes |
def existence_accuracy(target, output, use_nodes=True, use_edges=True): if not use_nodes and not use_edges: raise ValueError("Nodes or edges (or both) must be used") tdds = utils_np.graphs_tuple_to_data_dicts(target) odds = utils_np.graphs_tuple_to_data_dicts(output) cs = [] ss = [] for td, od in zip(tdds, odds): nodes_to_predict = td["nodes"][:, 0] == 0 xn = np.argmax(td["nodes"][:, 1:], axis=-1) xn = xn[nodes_to_predict] yn = np.argmax(softmax(od["nodes"][:, 1:], axis=1), axis=-1) yn = yn[nodes_to_predict] edges_to_predict = td["edges"][:, 0] == 0 xe = np.argmax(td["edges"][:, 1:], axis=-1) xe = xe[edges_to_predict] ye = np.argmax(softmax(od["edges"][:, 1:], axis=1), axis=-1) ye = ye[edges_to_predict] c = [] if use_nodes: c.append(xn == yn) if use_edges: c.append(xe == ye) c = np.concatenate(c, axis=0) s = np.all(c) cs.append(c) ss.append(s) correct = np.mean(np.concatenate(cs, axis=0)) solved = np.mean(np.stack(ss)) return correct, solved
Example #13
Source File: test_array_utils.py From baal with Apache License 2.0 | 5 votes |
def test_to_prob(an_array, a_binary_array): out = to_prob(an_array) assert not np.allclose(out, an_array) out = to_prob(a_binary_array) assert not np.allclose(out, a_binary_array) a_array_scaled = softmax(an_array, 1) a_binary_array_scaled = expit(a_binary_array) out = to_prob(a_array_scaled) assert np.allclose(out, a_array_scaled) out = to_prob(a_binary_array_scaled) assert np.allclose(out, a_binary_array_scaled)
Example #14
Source File: extremeClassifier.py From driverlessai-recipes with Apache License 2.0 | 5 votes |
def predict(self, probas): resulting_preds = np.zeros((np.max([len(x) for x in probas]), self.n_class)) for i, c in enumerate(self.mapped_classes): for indx, r in enumerate(c): resulting_preds[:, i] += probas[indx][:, r] resulting_preds = resulting_preds / self.R # return resulting_preds/resulting_preds.sum(axis = 1).reshape(-1,1) return softmax(resulting_preds, axis=1)
Example #15
Source File: yolo_postprocess_np.py From keras-YOLOv3-model-set with MIT License | 4 votes |
def yolo_head(prediction, anchors, num_classes, input_dims, use_softmax=False): '''Convert final layer features to bounding box parameters.''' batch_size = np.shape(prediction)[0] num_anchors = len(anchors) grid_size = np.shape(prediction)[1:3] #check if stride on height & width are same assert input_dims[0]//grid_size[0] == input_dims[1]//grid_size[1], 'model stride mismatch.' stride = input_dims[0] // grid_size[0] prediction = np.reshape(prediction, (batch_size, grid_size[0] * grid_size[1] * num_anchors, num_classes + 5)) ################################ # generate x_y_offset grid map grid_y = np.arange(grid_size[0]) grid_x = np.arange(grid_size[1]) x_offset, y_offset = np.meshgrid(grid_x, grid_y) x_offset = np.reshape(x_offset, (-1, 1)) y_offset = np.reshape(y_offset, (-1, 1)) x_y_offset = np.concatenate((x_offset, y_offset), axis=1) x_y_offset = np.tile(x_y_offset, (1, num_anchors)) x_y_offset = np.reshape(x_y_offset, (-1, 2)) x_y_offset = np.expand_dims(x_y_offset, 0) ################################ # Log space transform of the height and width anchors = np.tile(anchors, (grid_size[0] * grid_size[1], 1)) anchors = np.expand_dims(anchors, 0) box_xy = (expit(prediction[..., :2]) + x_y_offset) / np.array(grid_size)[::-1] box_wh = (np.exp(prediction[..., 2:4]) * anchors) / np.array(input_dims)[::-1] # Sigmoid objectness scores objectness = expit(prediction[..., 4]) # p_o (objectness score) objectness = np.expand_dims(objectness, -1) # To make the same number of values for axis 0 and 1 if use_softmax: # Softmax class scores class_scores = softmax(prediction[..., 5:], axis=-1) else: # Sigmoid class scores class_scores = expit(prediction[..., 5:]) return np.concatenate([box_xy, box_wh, objectness, class_scores], axis=2)
Example #16
Source File: simulate.py From respy with MIT License | 4 votes |
def _sample_characteristic(states_df, options, level_dict, use_keys): """Sample characteristic of individuals. The function is used to sample the values of one state space characteristic, say experience. The keys of ``level_dict`` are the possible starting values of experience. The values of the dictionary are :class:`pandas.Series` whose index are covariate names and the values are the parameter values. ``states_df`` is used to generate all possible covariates with the existing information. For each level, the dot product of parameters and covariates determines the value ``z``. The softmax function converts the level-specific ``z``-values to probabilities. The probabilities are used to sample the characteristic. Parameters ---------- states_df : pandas.DataFrame Contains the state of each individual. options : dict Options of the model. level_dict : dict A dictionary where the keys are the values distributed according to the probability mass function. The values are a :class:`pandas.Series` with covariate names as the index and parameter values. use_keys : bool Identifier for whether the keys of the level dict should be used as variables values or use numeric codes instead. For example, assign numbers to choices. Returns ------- characteristic : numpy.ndarray Array with shape (n_individuals,) containing sampled values. """ # Generate covariates. all_data = compute_covariates( states_df, options["covariates_all"], check_nans=True, raise_errors=False ) # Calculate dot product of covariates and parameters. z = () for level in level_dict: labels = level_dict[level].index x_beta = np.dot( all_data[labels].to_numpy(dtype=COVARIATES_DOT_PRODUCT_DTYPE), level_dict[level], ) z += (x_beta,) # Calculate probabilities with the softmax function. probabilities = softmax(np.column_stack(z), axis=1) np.random.seed(next(options["simulation_seed_iteration"])) choices = level_dict if use_keys else len(level_dict) characteristic = _random_choice(choices, probabilities) return characteristic
Example #17
Source File: utils_inference.py From nlp_projects with MIT License | 4 votes |
def do_inference(args, model, tokenizer): inf_task = args.task_name inf_dataset = load_example(args, inf_task, tokenizer) inf_sampler = SequentialSampler(inf_dataset) inf_dataloader = DataLoader(inf_dataset, sampler=inf_sampler, batch_size=1) # Inference! logger.info("***** Running inference *****") preds = None out_label_ids = None for batch in inf_dataloader: model.eval() batch = tuple(t for t in batch) with torch.no_grad(): inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids 'labels': batch[3]} outputs = model(**inputs) inf_loss, logits = outputs[:2] pred_arr = logits.detach().cpu().numpy() out_label_ids = inputs['labels'].detach().cpu().numpy() pred_prob = np.squeeze(softmax(pred_arr, axis=1)) logger.info("[0]: %s, [1]: %s", pred_prob[0], pred_prob[1]) if args.output_mode == "classification": pred = np.argmax(pred_arr, axis=1) elif args.output_mode == "regression": pred = np.squeeze(pred_arr) confidence = 0 if pred == 0: confidence = pred_prob[0]*100 logger.info("Text is negative with confidence: %d ", confidence) else: confidence = pred_prob[1]*100 logger.info("Text is positive with confidence: %d ", confidence) return (pred, confidence)