Python __future__.division() Examples
The following are 30
code examples of __future__.division().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
__future__
, or try the search function
.
Example #1
Source File: base.py From verge3d-blender-addon with GNU General Public License v3.0 | 6 votes |
def strip_future_imports(self, code): """ Strips any of these import lines: from __future__ import <anything> from future <anything> from future.<anything> from builtins <anything> or any line containing: install_hooks() or: install_aliases() Limitation: doesn't handle imports split across multiple lines like this: from __future__ import (absolute_import, division, print_function, unicode_literals) """ output = [] # We need .splitlines(keepends=True), which doesn't exist on Py2, # so we use this instead: for line in code.split('\n'): if not (line.startswith('from __future__ import ') or line.startswith('from future ') or line.startswith('from builtins ') or 'install_hooks()' in line or 'install_aliases()' in line # but don't match "from future_builtins" :) or line.startswith('from future.')): output.append(line) return '\n'.join(output)
Example #2
Source File: base.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def strip_future_imports(self, code): """ Strips any of these import lines: from __future__ import <anything> from future <anything> from future.<anything> from builtins <anything> or any line containing: install_hooks() or: install_aliases() Limitation: doesn't handle imports split across multiple lines like this: from __future__ import (absolute_import, division, print_function, unicode_literals) """ output = [] # We need .splitlines(keepends=True), which doesn't exist on Py2, # so we use this instead: for line in code.split('\n'): if not (line.startswith('from __future__ import ') or line.startswith('from future ') or line.startswith('from builtins ') or 'install_hooks()' in line or 'install_aliases()' in line # but don't match "from future_builtins" :) or line.startswith('from future.')): output.append(line) return '\n'.join(output)
Example #3
Source File: fix_add__future__imports_except_unicode_literals.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def transform(self, node, results): # Reverse order: future_import(u"print_function", node) future_import(u"division", node) future_import(u"absolute_import", node)
Example #4
Source File: base.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def strip_future_imports(self, code): """ Strips any of these import lines: from __future__ import <anything> from future <anything> from future.<anything> from builtins <anything> or any line containing: install_hooks() or: install_aliases() Limitation: doesn't handle imports split across multiple lines like this: from __future__ import (absolute_import, division, print_function, unicode_literals) """ output = [] # We need .splitlines(keepends=True), which doesn't exist on Py2, # so we use this instead: for line in code.split('\n'): if not (line.startswith('from __future__ import ') or line.startswith('from future ') or line.startswith('from builtins ') or 'install_hooks()' in line or 'install_aliases()' in line # but don't match "from future_builtins" :) or line.startswith('from future.')): output.append(line) return '\n'.join(output)
Example #5
Source File: test_classes.py From recruit with Apache License 2.0 | 6 votes |
def test_truediv(Poly): # true division is valid only if the denominator is a Number and # not a python bool. p1 = Poly([1,2,3]) p2 = p1 * 5 for stype in np.ScalarType: if not issubclass(stype, Number) or issubclass(stype, bool): continue s = stype(5) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) for stype in (int, long, float): s = stype(5) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) for stype in [complex]: s = stype(5, 0) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) for s in [tuple(), list(), dict(), bool(), np.array([1])]: assert_raises(TypeError, op.truediv, p2, s) assert_raises(TypeError, op.truediv, s, p2) for ptype in classes: assert_raises(TypeError, op.truediv, p2, ptype(1))
Example #6
Source File: fix_add_all__future__imports.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def transform(self, node, results): future_import(u"unicode_literals", node) future_import(u"print_function", node) future_import(u"division", node) future_import(u"absolute_import", node)
Example #7
Source File: math_ops.py From CapsLayer with Apache License 2.0 | 6 votes |
def divide(x, y, safe_mode=True, epsilon=None, name=None): """ A wrapper of `tf.divide`, computes Python style division of x by y but extends safe divide support. If safe_mode is `True` or epsilon is given(a small float number), the absolute value of denominator in the division will be clip to make sure it's bigger than epsilon(default is 1e-13). Args: safe_mode: Use safe divide mode. epsilon: Float number. Default is `1e-13`. """ if not safe_mode and epsilon is None: return tf.divide(x, y, name=name) else: epsilon = 1e-20 if epsilon is None else epsilon name = "safe_divide" if name is None else name with tf.name_scope(name): y = tf.where(tf.greater(tf.abs(y), epsilon), y, y + tf.sign(y) * epsilon) return tf.divide(x, y)
Example #8
Source File: stages_impl_test.py From model-optimization with Apache License 2.0 | 5 votes |
def test_all_zero_input_works(self): # Tests that encoding does not blow up with all-zero input. With # min_max=None, the derived min and max are identical, thus potential for # division by zero. stage = stages_impl.UniformQuantizationEncodingStage(bits=8, min_max=None) test_data = self.run_one_to_many_encode_decode( stage, lambda: tf.zeros([50])) self.assertAllEqual(np.zeros((50)).astype(np.float32), test_data.decoded_x)
Example #9
Source File: ui.py From python-netsurv with MIT License | 5 votes |
def download_speed(self): # Avoid zero division errors... if self.avg == 0.0: return "..." return format_size(1 / self.avg) + "/s"
Example #10
Source File: chebyshev.py From recruit with Apache License 2.0 | 5 votes |
def _zseries_der(zs): """Differentiate a z-series. The derivative is with respect to x, not z. This is achieved using the chain rule and the value of dx/dz given in the module notes. Parameters ---------- zs : z-series The z-series to differentiate. Returns ------- derivative : z-series The derivative Notes ----- The zseries for x (ns) has been multiplied by two in order to avoid using floats that are incompatible with Decimal and likely other specialized scalar types. This scaling has been compensated by multiplying the value of zs by two also so that the two cancels in the division. """ n = len(zs)//2 ns = np.array([-1, 0, 1], dtype=zs.dtype) zs *= np.arange(-n, n+1)*2 d, r = _zseries_div(zs, ns) return d
Example #11
Source File: ui.py From python-netsurv with MIT License | 5 votes |
def download_speed(self): # Avoid zero division errors... if self.avg == 0.0: return "..." return format_size(1 / self.avg) + "/s"
Example #12
Source File: quantization_test.py From model-optimization with Apache License 2.0 | 5 votes |
def test_all_zero_input_works(self): # Tests that encoding does not blow up with all-zero input. With # min_max=None, the derived min and max are identical, thus potential for # division by zero. stage = quantization.PerChannelPRNGUniformQuantizationEncodingStage(bits=8) test_data = self.run_one_to_many_encode_decode(stage, lambda: tf.zeros([50])) self.assertAllEqual(np.zeros((50)).astype(np.float32), test_data.decoded_x)
Example #13
Source File: quantization_test.py From model-optimization with Apache License 2.0 | 5 votes |
def test_all_zero_input_works(self): # Tests that encoding does not blow up with all-zero input. With # min_max=None, the derived min and max are identical, thus potential for # division by zero. stage = quantization.PerChannelUniformQuantizationEncodingStage(bits=8) test_data = self.run_one_to_many_encode_decode(stage, lambda: tf.zeros([50])) self.assertAllEqual(np.zeros((50)).astype(np.float32), test_data.decoded_x)
Example #14
Source File: test_utils.py From zhusuan with MIT License | 5 votes |
def test_prerequisite(self): # Tensorflow has deprecated Python 2 division semantics, # regular division in Python 3 is true division. # if six.PY2: # self.assertAlmostEqual(regular_div(3, 2), 1) # self.assertAlmostEqual(regular_div(3.3, 1.6), 2.0625) # else: self.assertAlmostEqual(regular_div(3, 2), 1.5) self.assertAlmostEqual(regular_div(3.3, 1.6), 2.0625) self.assertAlmostEqual(true_div(3, 2), 1.5) self.assertAlmostEqual(true_div(3.3, 1.6), 2.0625) self.assertAlmostEqual(floor_div(3, 2), 1) self.assertAlmostEqual(floor_div(3.3, 1.6), 2.0)
Example #15
Source File: numbers.py From meddle with MIT License | 5 votes |
def __div__(self, other): """self / other without __future__ division May promote to float. """ raise NotImplementedError
Example #16
Source File: numbers.py From meddle with MIT License | 5 votes |
def __rdiv__(self, other): """other / self without __future__ division""" raise NotImplementedError
Example #17
Source File: quantization_test.py From model-optimization with Apache License 2.0 | 5 votes |
def test_all_zero_input_works(self): # Tests that encoding does not blow up with all-zero input. With # min_max=None, the derived min and max are identical, thus potential for # division by zero. stage = quantization.PRNGUniformQuantizationEncodingStage(bits=8) test_data = self.run_one_to_many_encode_decode( stage, lambda: tf.zeros([50])) self.assertAllEqual(np.zeros((50)).astype(np.float32), test_data.decoded_x)
Example #18
Source File: ui.py From jbox with MIT License | 5 votes |
def download_speed(self): # Avoid zero division errors... if self.avg == 0.0: return "..." return format_size(1 / self.avg) + "/s"
Example #19
Source File: encode_and_decode.py From R2CNN_Faster-RCNN_Tensorflow with MIT License | 5 votes |
def encode_boxes(unencode_boxes, reference_boxes, scale_factors=None): ''' :param unencode_boxes: [-1, 4] :param reference_boxes: [-1, 4] :return: encode_boxes [-1, 4] ''' xmin, ymin, xmax, ymax = unencode_boxes[:, 0], unencode_boxes[:, 1], unencode_boxes[:, 2], unencode_boxes[:, 3] reference_xmin, reference_ymin, reference_xmax, reference_ymax = reference_boxes[:, 0], reference_boxes[:, 1], \ reference_boxes[:, 2], reference_boxes[:, 3] x_center = (xmin + xmax) / 2. y_center = (ymin + ymax) / 2. w = xmax - xmin + 1e-8 h = ymax - ymin + 1e-8 reference_xcenter = (reference_xmin + reference_xmax) / 2. reference_ycenter = (reference_ymin + reference_ymax) / 2. reference_w = reference_xmax - reference_xmin + 1e-8 reference_h = reference_ymax - reference_ymin + 1e-8 # w + 1e-8 to avoid NaN in division and log below t_xcenter = (x_center - reference_xcenter) / reference_w t_ycenter = (y_center - reference_ycenter) / reference_h t_w = np.log(w/reference_w) t_h = np.log(h/reference_h) if scale_factors: t_xcenter *= scale_factors[0] t_ycenter *= scale_factors[1] t_w *= scale_factors[2] t_h *= scale_factors[3] return np.transpose(np.stack([t_xcenter, t_ycenter, t_w, t_h], axis=0))
Example #20
Source File: ops.py From recruit with Apache License 2.0 | 5 votes |
def _flex_method_PANEL(cls, op, special): str_rep = _get_opstr(op, cls) op_name = _get_op_name(op, special) eval_kwargs = _gen_eval_kwargs(op_name) fill_zeros = _gen_fill_zeros(op_name) def na_op(x, y): import pandas.core.computation.expressions as expressions try: result = expressions.evaluate(op, str_rep, x, y, errors='raise', **eval_kwargs) except TypeError: result = op(x, y) # handles discrepancy between numpy and numexpr on division/mod # by 0 though, given that these are generally (always?) # non-scalars, I'm not sure whether it's worth it at the moment result = missing.fill_zeros(result, x, y, op_name, fill_zeros) return result if op_name in _op_descriptions: doc = _make_flex_doc(op_name, 'panel') else: # doc strings substitors doc = _agg_doc_PANEL.format(op_name=op_name) @Appender(doc) def f(self, other, axis=0): return self._combine(other, na_op, axis=axis) f.__name__ = op_name return f # ----------------------------------------------------------------------------- # Sparse
Example #21
Source File: term.py From tensorflow_constrained_optimization with Apache License 2.0 | 5 votes |
def __truediv__(self, scalar): """Returns the result of dividing the ratio weights by a scalar.""" if not isinstance(scalar, numbers.Number): raise TypeError("_RatioWeights objects only support *scalar* division") if scalar == 0: raise ValueError("cannot divide by zero") return _RatioWeights({ denominator_key: numerator / scalar for denominator_key, numerator in six.iteritems(self._ratios) }) # __rtruediv__ is not implemented since we only allow *scalar* division, i.e. # (_RatioWeights / scalar) is allowed, but (scalar / _RatioWeights) is not.
Example #22
Source File: _polybase.py From recruit with Apache License 2.0 | 5 votes |
def __truediv__(self, other): # there is no true divide if the rhs is not a Number, although it # could return the first n elements of an infinite series. # It is hard to see where n would come from, though. if not isinstance(other, numbers.Number) or isinstance(other, bool): form = "unsupported types for true division: '%s', '%s'" raise TypeError(form % (type(self), type(other))) return self.__floordiv__(other)
Example #23
Source File: test_umath.py From recruit with Apache License 2.0 | 5 votes |
def test_floor_division_complex(self): # check that implementation is correct msg = "Complex floor division implementation check" x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128) y = np.array([0., -1., 0., 0.], dtype=np.complex128) assert_equal(np.floor_divide(x**2, x), y, err_msg=msg) # check overflow, underflow msg = "Complex floor division overflow/underflow check" x = np.array([1.e+110, 1.e-110], dtype=np.complex128) y = np.floor_divide(x**2, x) assert_equal(y, [1.e+110, 0], err_msg=msg)
Example #24
Source File: test_umath.py From recruit with Apache License 2.0 | 5 votes |
def test_division_complex(self): # check that implementation is correct msg = "Complex division implementation check" x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128) assert_almost_equal(x**2/x, x, err_msg=msg) # check overflow, underflow msg = "Complex division overflow/underflow check" x = np.array([1.e+110, 1.e-110], dtype=np.complex128) y = x**2/x assert_almost_equal(y/x, [1, 1], err_msg=msg)
Example #25
Source File: test_umath.py From recruit with Apache License 2.0 | 5 votes |
def test_division_int(self): # int division should follow Python x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) if 5 / 10 == 0.5: assert_equal(x / 100, [0.05, 0.1, 0.9, 1, -0.05, -0.1, -0.9, -1, -1.2]) else: assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
Example #26
Source File: basic_expression.py From tensorflow_constrained_optimization with Apache License 2.0 | 5 votes |
def __truediv__(self, scalar): """Returns the result of dividing by a scalar.""" if not isinstance(scalar, numbers.Number): raise TypeError("BasicExpression objects only support *scalar* division") return BasicExpression([tt / scalar for tt in self._terms]) # __rtruediv__ is not implemented since we only allow *scalar* division, i.e. # (BasicExpression / scalar) is allowed, but (scalar / BasicExpression) is # not.
Example #27
Source File: basic_expression.py From tensorflow_constrained_optimization with Apache License 2.0 | 5 votes |
def __init__(self, terms): """Creates a new `BasicExpression`. The reason for taking a collection of `Term`s, instead of only a single `Term` representing the entire linear combination, is that, unlike `Tensor`s, two `Term`s can only be added or subtracted if they're "compatible" (which is a notion defined by the `Term` itself). Args: terms: collection of `Term`s to sum in the `BasicExpression`. """ # This object contains the "_terms" member variable, representing a linear # combination of `Term` objects. Like `Tensor`s, `Term`s support negation, # scalar multiplication and division without restriction. Unlike `Tensor`s, # however, only "compatible" `Term`s may be added or subtracted. Two `Term`s # are compatible iff they have the same key (returned by their "key" # method). When we add or subtract two `BasicExpression`s, compatible # `Term`s are added or subtracted, and incompatible `Term`s are put in their # own dict entries. # # We use a list to store the Terms (instead of e.g. a dict mapping keys to # Terms), so that we can preserve the order of the Terms (of course, this # isn't the only way to handle it). This is needed to support distributed # optimization, since it results in the DeferredVariables upon which the # Terms depend having a consistent order from machine-to-machine. self._terms = [] self._add_terms(terms)
Example #28
Source File: term.py From tensorflow_constrained_optimization with Apache License 2.0 | 5 votes |
def __truediv__(self, scalar): """Returns the result of dividing by a scalar.""" if not isinstance(scalar, numbers.Number): raise TypeError("Term objects only support *scalar* division") return BinaryClassificationTerm(self._predictions, self._positive_ratio_weights / scalar, self._negative_ratio_weights / scalar, self._loss) # __rtruediv__ is not implemented since we only allow *scalar* division, i.e. # (Term / scalar) is allowed, but (scalar / Term) is not.
Example #29
Source File: term.py From tensorflow_constrained_optimization with Apache License 2.0 | 5 votes |
def __truediv__(self, scalar): """Returns the result of dividing this `Term` by a scalar.""" # __rtruediv__ is not implemented since we only allow *scalar* division, i.e. # (Term / scalar) is allowed, but (scalar / Term) is not.
Example #30
Source File: nanfunctions.py From recruit with Apache License 2.0 | 5 votes |
def _divide_by_count(a, b, out=None): """ Compute a/b ignoring invalid results. If `a` is an array the division is done in place. If `a` is a scalar, then its type is preserved in the output. If out is None, then then a is used instead so that the division is in place. Note that this is only called with `a` an inexact type. Parameters ---------- a : {ndarray, numpy scalar} Numerator. Expected to be of inexact type but not checked. b : {ndarray, numpy scalar} Denominator. out : ndarray, optional Alternate output array in which to place the result. The default is ``None``; if provided, it must have the same shape as the expected output, but the type will be cast if necessary. Returns ------- ret : {ndarray, numpy scalar} The return value is a/b. If `a` was an ndarray the division is done in place. If `a` is a numpy scalar, the division preserves its type. """ with np.errstate(invalid='ignore', divide='ignore'): if isinstance(a, np.ndarray): if out is None: return np.divide(a, b, out=a, casting='unsafe') else: return np.divide(a, b, out=out, casting='unsafe') else: if out is None: return a.dtype.type(a / b) else: # This is questionable, but currently a numpy scalar can # be output to a zero dimensional array. return np.divide(a, b, out=out, casting='unsafe')