Python scipy.special.logit() Examples

The following are 30 code examples of scipy.special.logit(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.special , or try the search function .
Example #1
Source File: space.py    From bayesmark with Apache License 2.0 6 votes vote down vote up
def __init__(self, warp="linear", values=None, range_=None):
        """Build Integer space class.

        Parameters
        ----------
        warp : {'linear', 'log', 'bilog'}
            Which warping type to apply to the space. The warping is applied in the original space. That is, in a space
            with ``warp='log'`` and ``range_=(2, 10)``, the value 2 warps to ``log(2)``, not ``-inf`` as in some other
            frameworks. There are no settings with integers that are compatible with the logit warp.
        values : None or list(float)
            Possible values for space to take. Values must be of `int` type.
        range_ : None or :class:`numpy:numpy.ndarray` of shape (2,)
            Array with (lower, upper) pair with limits of space. Note that one must specify `values` or `range_`, but
            not both. `range_` must be composed of `int`.
        """
        assert warp is not None, "warp/space not specified for int"
        Space.__init__(self, np.int_, np.round, warp, values, range_) 
Example #2
Source File: att.py    From causal-text-embeddings with MIT License 6 votes vote down vote up
def _perturbed_model(q_t0, q_t1, g, t, q, eps):
    # helper function for psi_tmle

    h1 = t / q - ((1 - t) * g) / (q * (1 - g))
    full_q = (1.0 - t) * q_t0 + t * q_t1
    perturbed_q = full_q - eps * h1

    def q1(t_cf, epsilon):
        h_cf = t_cf * (1.0 / g) - (1.0 - t_cf) / (1.0 - g)
        full_q = (1.0 - t_cf) * q_t0 + t_cf * q_t1  # predictions from unperturbed model
        return full_q - epsilon * h_cf

    psi_init = np.mean(t * (q1(np.ones_like(t), eps) - q1(np.zeros_like(t), eps))) / q
    h2 = (q_t1 - q_t0 - psi_init) / q
    perturbed_g = expit(logit(g) - eps * h2)

    return perturbed_q, perturbed_g 
Example #3
Source File: test_cond_indep.py    From causal-text-embeddings with MIT License 6 votes vote down vote up
def fit_treatment_model(df, term_counts):
	indices = df.post_index.values
	tc = term_counts[indices,:]
	tc = tc.toarray()
	f_z = logit(df.treatment_probability.values)
	print(f_z.shape, tc.shape)
	features = np.column_stack((f_z, tc))
	labels = df.treatment.values

	true_model = LogisticRegression(solver='liblinear')
	true_model.fit(features, labels)
	coeffs = np.array(true_model.coef_).flatten()[1:]
	print(coeffs.mean(), coeffs.std())

	np.random.shuffle(tc)
	features = np.column_stack((f_z, tc))
	permuted = LogisticRegression(solver='liblinear')
	permuted.fit(features, labels)
	permuted_coeffs = np.array(permuted.coef_).flatten()[1:]
	print(permuted_coeffs.mean(), permuted_coeffs.std())


#$E_{Z|W=1}[log P(T=1 | W=1, Z)/ P(T=1| Z)]$ 
Example #4
Source File: shared_cnn.py    From eval-nas with MIT License 6 votes vote down vote up
def update_dag_logits(self, gradient_dicts, weight_decay, max_grad=0.1):
        """
        Updates the probabilities of each path being selected using the given gradients.
        """
        dag_probs = tuple(expit(logit) for logit in self.dags_logits)
        current_average_dag_probs = tuple(np.mean(prob) for prob in dag_probs)

        for i, key in enumerate(self.all_connections):
            for grad_dict, current_average_dag_prob, dag_logits in zip(gradient_dicts, current_average_dag_probs,
                                                                       self.dags_logits):
                if key in grad_dict:
                    grad = grad_dict[key] - weight_decay * (
                            current_average_dag_prob - self.target_ave_prob)  # *expit(dag_logits[i])
                    deriv = sigmoid_derivitive(dag_logits[i])
                    logit_grad = grad * deriv
                    dag_logits[i] += np.clip(logit_grad, -max_grad, max_grad) 
Example #5
Source File: space.py    From bayesmark with Apache License 2.0 6 votes vote down vote up
def __init__(self, warp="linear", values=None, range_=None):
        """Build Real space class.

        Parameters
        ----------
        warp : {'linear', 'log', 'logit', 'bilog'}
            Which warping type to apply to the space. The warping is applied in the original space. That is, in a space
            with ``warp='log'`` and ``range_=(2.0, 10.0)``, the value 2.0 warps to ``log(2)``, not ``-inf`` as in some
            other frameworks.
        values : None or list(float)
            Possible values for space to take. Values must be of `float` type.
        range_ : None or :class:`numpy:numpy.ndarray` of shape (2,)
            Array with (lower, upper) pair with limits of space. Note that one must specify `values` or `range_`, but
            not both. `range_` must be composed of `float`.
        """
        assert warp is not None, "warp/space not specified for real"
        Space.__init__(self, np.float_, identity, warp, values, range_) 
Example #6
Source File: train.py    From ffn with Apache License 2.0 6 votes vote down vote up
def fixed_offsets(model, seed, fov_shifts=None):
  """Generates offsets based on a fixed list."""
  for off in itertools.chain([(0, 0, 0)], fov_shifts):
    if model.dim == 3:
      is_valid_move = seed[:,
                           seed.shape[1] // 2 + off[2],
                           seed.shape[2] // 2 + off[1],
                           seed.shape[3] // 2 + off[0],
                           0] >= logit(FLAGS.threshold)
    else:
      is_valid_move = seed[:,
                           seed.shape[1] // 2 + off[1],
                           seed.shape[2] // 2 + off[0],
                           0] >= logit(FLAGS.threshold)

    if not is_valid_move:
      continue

    yield off 
Example #7
Source File: movement.py    From ffn with Apache License 2.0 6 votes vote down vote up
def get_policy_fn(request, ffn_model):
  """Returns a policy class based on the InferenceRequest proto."""

  if request.movement_policy_name:
    movement_policy_class = globals().get(request.movement_policy_name, None)
    if movement_policy_class is None:
      movement_policy_class = import_symbol(request.movement_policy_name)
  else:  # Default / fallback.
    movement_policy_class = FaceMaxMovementPolicy

  if request.movement_policy_args:
    kwargs = json.loads(request.movement_policy_args)
  else:
    kwargs = {}
  if 'deltas' not in kwargs:
    kwargs['deltas'] = ffn_model.deltas[::-1]
  if 'score_threshold' not in kwargs:
    kwargs['score_threshold'] = logit(request.inference_options.move_threshold)

  return lambda canvas: movement_policy_class(canvas, **kwargs) 
Example #8
Source File: mh.py    From metropolis-hastings-gans with Apache License 2.0 6 votes vote down vote up
def _mh_sample(d_score, init_picked=0, start=1, random=np.random):
    '''Same as `mh_sample` but more obviously correct.
    '''
    assert(np.ndim(d_score) == 1 and len(d_score) > 0)
    assert(0 <= np.min(d_score) and np.max(d_score) <= 1)
    assert(init_picked < start)

    d_last = np.float_(d_score[init_picked])
    picked_round = init_picked
    for ii, d_new in enumerate(d_score[start:], start):
        d_new = np.float_(d_new)

        # Note: we might want to move to log or logit scale for disc probs if
        # this starts to create numerics issues.
        alpha = accept_prob_MH_disc(d_last, d_new)
        assert(0 <= alpha and alpha <= 1)
        if random.rand() <= alpha:
            d_last = d_new
            picked_round = ii
    return picked_round 
Example #9
Source File: classification.py    From metropolis-hastings-gans with Apache License 2.0 5 votes vote down vote up
def fit(self, y_pred, y_true):
        assert y_true is not None
        y_pred, y_true = Calibrator.validate(y_pred, y_true)
        y_pred = logit(np.clip(y_pred, self.epsilon, 1.0 - self.epsilon))
        self.clf.fit(y_pred[:, None], y_true) 
Example #10
Source File: _continuous_distns.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _isf(self, q):
        return -sc.logit(q) 
Example #11
Source File: _continuous_distns.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _ppf(self, q):
        return sc.logit(q) 
Example #12
Source File: ate.py    From causal-text-embeddings with MIT License 5 votes vote down vote up
def _perturbed_model_bin_outcome(q_t0, q_t1, g, t, eps):
    """
    Helper for psi_tmle_bin_outcome

    Returns q_\eps (t,x)
    (i.e., value of perturbed predictor at t, eps, x; where q_t0, q_t1, g are all evaluated at x
    """
    h = t * (1./g) - (1.-t) / (1. - g)
    full_lq = (1.-t)*logit(q_t0) + t*logit(q_t1)  # logit predictions from unperturbed model
    logit_perturb = full_lq + eps * h
    return expit(logit_perturb) 
Example #13
Source File: classification.py    From metropolis-hastings-gans with Apache License 2.0 5 votes vote down vote up
def predict(self, y_pred):
        y_pred, _ = Calibrator.validate(y_pred)
        y_pred = logit(np.clip(y_pred, self.epsilon, 1.0 - self.epsilon))
        y_calib = self.clf.predict_proba(y_pred[:, None])[:, 1]
        return y_calib 
Example #14
Source File: mh.py    From metropolis-hastings-gans with Apache License 2.0 5 votes vote down vote up
def rejection_sample(d_score, epsilon=1e-6, shift_percent=95.0, score_max=None,
                     random=np.random):
    '''Rejection scheme from:
    https://arxiv.org/pdf/1810.06758.pdf
    '''
    assert(np.ndim(d_score) == 1 and len(d_score) > 0)
    assert(0 <= np.min(d_score) and np.max(d_score) <= 1)
    assert(np.ndim(score_max) == 0)

    # Chop off first since we assume that is real point and reject does not
    # start with real point.
    d_score = d_score[1:]

    # Make sure logit finite
    d_score = np.clip(d_score.astype(np.float), 1e-14, 1 - 1e-14)
    max_burnin_d_score = np.clip(score_max.astype(np.float),
                                 1e-14, 1 - 1e-14)

    log_M = logit(max_burnin_d_score)

    D_tilde = logit(d_score)
    # Bump up M if found something bigger
    D_tilde_M = np.maximum(log_M, np.maximum.accumulate(D_tilde))

    D_delta = D_tilde - D_tilde_M
    F = D_delta - np.log(1 - np.exp(D_delta - epsilon))

    if shift_percent is not None:
        gamma = np.percentile(F, shift_percent)
        F = F - gamma

    P = logistic(F)
    accept = random.rand(len(d_score)) <= P

    if np.any(accept):
        idx = np.argmax(accept)  # Stop at first true, default to 0
    else:
        idx = np.argmax(d_score)  # Revert to cherry if no accept

    # Now shift idx because we took away the real init point
    return idx + 1, P[idx] 
Example #15
Source File: distribution_util_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testGetLogitsAndProbProbability(self):
    p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)

    with self.test_session():
      new_logits, new_p = distribution_util.get_logits_and_prob(
          p=p, validate_args=True)

      self.assertAllClose(special.logit(p), new_logits.eval())
      self.assertAllClose(p, new_p.eval()) 
Example #16
Source File: distribution_util_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testGetLogitsAndProbLogits(self):
    p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
    logits = special.logit(p)

    with self.test_session():
      new_logits, new_p = distribution_util.get_logits_and_prob(
          logits=logits, validate_args=True)

      self.assertAllClose(p, new_p.eval())
      self.assertAllClose(logits, new_logits.eval()) 
Example #17
Source File: test_logit.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_nan(self):
        expected = np.array([np.nan]*4)
        olderr = np.seterr(invalid='ignore')
        try:
            actual = logit(np.array([-3., -2., 2., 3.]))
        finally:
            np.seterr(**olderr)

        assert_equal(expected, actual) 
Example #18
Source File: tfdata.py    From bayesian-yolov3 with MIT License 5 votes vote down vote up
def logit(x):
    """
    inverse of sigmoid function
    """
    return - tf.log((1. / x) - 1.) 
Example #19
Source File: test_logit.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def check_logit_out(self, dtype, expected):
        a = np.linspace(0,1,10)
        a = np.array(a, dtype=dtype)
        olderr = np.seterr(divide='ignore')
        try:
            actual = logit(a)
        finally:
            np.seterr(**olderr)

        assert_almost_equal(actual, expected)

        assert_equal(actual.dtype, np.dtype(dtype)) 
Example #20
Source File: _continuous_distns.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _isf(self, q):
        return -sc.logit(q) 
Example #21
Source File: _continuous_distns.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _ppf(self, q):
        return sc.logit(q) 
Example #22
Source File: plot_score_distn.py    From metropolis-hastings-gans with Apache License 2.0 5 votes vote down vote up
def safe_logit(x):
    y = logit(np.clip(x, EPSILON, 1 - EPSILON))
    return y 
Example #23
Source File: test_cond_indep.py    From causal-text-embeddings with MIT License 5 votes vote down vote up
def train_classifier(train_df, term_counts, word_index, treat_index=1):
	if treat_index is not None: 
		train_df = train_df[train_df.treatment==treat_index]

	indices = train_df.post_index.values
	term_counts = term_counts[:,word_index]
	labels = term_counts[indices,:]
	labels = labels.toarray().flatten()
	labels[labels>1] = 1
	features = logit(train_df.treatment_probability.values)
	features = features[:,np.newaxis]
	model = LogisticRegression(solver='liblinear')
	model.fit(features, labels)
	return model 
Example #24
Source File: train.py    From ffn with Apache License 2.0 5 votes vote down vote up
def __init__(self, eval_shape):
    self.eval_labels = tf.placeholder(
        tf.float32, [1] + eval_shape + [1], name='eval_labels')
    self.eval_preds = tf.placeholder(
        tf.float32, [1] + eval_shape + [1], name='eval_preds')
    self.eval_loss = tf.reduce_mean(
        tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.eval_preds, labels=self.eval_labels))
    self.reset()
    self.eval_threshold = logit(0.9)
    self.sess = None
    self._eval_shape = eval_shape 
Example #25
Source File: train.py    From ffn with Apache License 2.0 5 votes vote down vote up
def max_pred_offsets(model, seed):
  """Generates offsets with the policy used for inference."""
  # Always start at the center.
  queue = deque([(0, 0, 0)])
  done = set()

  train_image_radius = train_image_size(model) // 2
  input_image_radius = np.array(model.input_image_size) // 2

  while queue:
    offset = queue.popleft()

    # Drop any offsets that would take us beyond the image fragment we
    # loaded for training.
    if np.any(np.abs(np.array(offset)) + input_image_radius >
              train_image_radius):
      continue

    # Ignore locations that were visited previously.
    quantized_offset = (
        offset[0] // max(model.deltas[0], 1),
        offset[1] // max(model.deltas[1], 1),
        offset[2] // max(model.deltas[2], 1))

    if quantized_offset in done:
      continue

    done.add(quantized_offset)

    yield offset

    # Look for new offsets within the updated seed.
    curr_seed = mask.crop_and_pad(seed, offset, model.pred_mask_size[::-1])
    todos = sorted(
        movement.get_scored_move_offsets(
            model.deltas[::-1],
            curr_seed[0, ..., 0],
            threshold=logit(FLAGS.threshold)), reverse=True)
    queue.extend((x[2] + offset[0],
                  x[1] + offset[1],
                  x[0] + offset[2]) for _, x in todos) 
Example #26
Source File: _continuous_distns.py    From lambda-packs with MIT License 5 votes vote down vote up
def _ppf(self, q):
        return sc.logit(q) 
Example #27
Source File: _continuous_distns.py    From lambda-packs with MIT License 5 votes vote down vote up
def _isf(self, q):
        return -sc.logit(q) 
Example #28
Source File: test_logit.py    From Computable with MIT License 5 votes vote down vote up
def check_logit_out(self, dtype, expected):
        a = np.linspace(0,1,10)
        a = np.array(a, dtype=dtype)
        olderr = np.seterr(divide='ignore')
        try:
            actual = logit(a)
        finally:
            np.seterr(**olderr)

        if np.__version__ >= '1.6':
            assert_almost_equal(actual, expected)
        else:
            assert_almost_equal(actual[1:-1], expected[1:-1])

        assert_equal(actual.dtype, np.dtype(dtype)) 
Example #29
Source File: test_logit.py    From Computable with MIT License 5 votes vote down vote up
def test_nan(self):
        expected = np.array([np.nan]*4)
        olderr = np.seterr(invalid='ignore')
        try:
            actual = logit(np.array([-3., -2., 2., 3.]))
        finally:
            np.seterr(**olderr)

        assert_equal(expected, actual) 
Example #30
Source File: plot_adjustment.py    From causal-text-embeddings with MIT License 5 votes vote down vote up
def main():
	predict_df = get_prediction_file()
	term_counts = load_terms(dataset)
	print(predict_df.shape, term_counts.shape)
	if dataset == 'reddit':
		imbalanced_terms = filter_imbalanced_terms(predict_df, term_counts)
		term_counts = term_counts[:,imbalanced_terms]
		print(term_counts.shape)

	n_bootstraps = 10
	n_w = term_counts.shape[1]
	
	adjusted = np.zeros((n_bootstraps, n_w))
	permuted = np.zeros((n_bootstraps, n_w))
	unadjusted = np.zeros((n_bootstraps, n_w))

	for i in range(n_bootstraps):
		sample = assign_split(predict_df,num_splits=2)
		sample = sample[sample.split==0]
		indices = sample.post_index.values
		labels = sample.treatment.values
		words = term_counts[indices, :]
		propensity_score = logit(sample.treatment_probability.values)
		all_features = np.column_stack((propensity_score, words))
		unadjusted[i,:] = fit_treatment(words, labels, coeff_offset=0)
		adjusted[i,:] = fit_treatment(all_features, labels)
		np.random.shuffle(words)
		permuted_features = np.column_stack((propensity_score, words))
		permuted[i,:] = fit_treatment(permuted_features, labels)

	plot_density(unadjusted, adjusted, permuted)