Python numpy.subtract() Examples
The following are 30
code examples of numpy.subtract().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: nlm.py From openISP with MIT License | 6 votes |
def calWeights(self, img, kernel, y, x): wmax = 0 sweight = 0 average = 0 for j in range(2 * self.Ds + 1 - 2 * self.ds - 1): for i in range(2 * self.Ds + 1 - 2 * self.ds - 1): start_y = y - self.Ds + self.ds + j start_x = x - self.Ds + self.ds + i neighbour_w = img[start_y - self.ds:start_y + self.ds + 1, start_x - self.ds:start_x + self.ds + 1] center_w = img[y-self.ds:y+self.ds+1, x-self.ds:x+self.ds+1] if j != y or i != x: sub = np.subtract(neighbour_w, center_w) dist = np.sum(np.multiply(kernel, np.multiply(sub, sub))) w = np.exp(-dist/pow(self.h, 2)) # replaced by look up table if w > wmax: wmax = w sweight = sweight + w average = average + w * img[start_y, start_x] return sweight, average, wmax
Example #2
Source File: entropy_akshat.py From Emotion-Recogniton-from-EEG-Signals with MIT License | 6 votes |
def app_entropy(x, order=2, metric='chebyshev'): """Approximate Entropy Parameters ---------- x : list or np.array One-dimensional time series of shape (n_times) order : int (default: 2) Embedding dimension. metric : str (default: chebyshev) Name of the metric function used with :class:`~sklearn.neighbors.KDTree`. The list of available metric functions is given by: ``KDTree.valid_metrics``. Returns ------- ae : float Approximate Entropy. """ phi = _app_samp_entropy(x, order=order, metric=metric, approximate=True) return np.subtract(phi[0], phi[1])
Example #3
Source File: histogram.py From mars with Apache License 2.0 | 6 votes |
def _unsigned_subtract(a, b): """ Subtract two values where a >= b, and produce an unsigned result This is needed when finding the difference between the upper and lower bound of an int16 histogram """ # coerce to a single type signed_to_unsigned = { np.byte: np.ubyte, np.short: np.ushort, np.intc: np.uintc, np.int_: np.uint, np.longlong: np.ulonglong } dt = np.result_type(a, b) try: dt = signed_to_unsigned[dt.type] except KeyError: # pragma: no cover return np.subtract(a, b, dtype=dt) else: # we know the inputs are integers, and we are deliberately casting # signed to unsigned return np.subtract(a, b, casting='unsafe', dtype=dt)
Example #4
Source File: davis17_online_data.py From MaskTrack with MIT License | 6 votes |
def make_img_gt_pair(self, idx): """ Make the image-ground-truth pair """ img = cv2.imread(os.path.join(self.db_root_dir, self.img_list[idx])) if self.labels[idx] is not None: label = cv2.imread(os.path.join(self.db_root_dir, self.labels[idx]), 0) else: gt = np.zeros(img.shape[:-1], dtype=np.uint8) if self.inputRes is not None: img = imresize(img, self.inputRes) if self.labels[idx] is not None: label = imresize(label, self.inputRes, interp='nearest') img = np.array(img, dtype=np.float32) img = np.subtract(img, np.array(self.meanval, dtype=np.float32)) if self.labels[idx] is not None: gt = np.array(label, dtype=np.float32) gt = gt/np.max([gt.max(), 1e-8]) return img, gt
Example #5
Source File: utility_functions.py From MaskTrack with MIT License | 6 votes |
def apply_val_transform_image(image,inputRes=None): meanval = (104.00699, 116.66877, 122.67892) if inputRes is not None: image = sm.imresize(image, inputRes) image = np.array(image, dtype=np.float32) image = np.subtract(image, np.array(meanval, dtype=np.float32)) if image.ndim == 2: image = image[:, :, np.newaxis] # swap color axis because # numpy image: H x W x C # torch image: C X H X W image = image.transpose((2, 0, 1)) image = torch.from_numpy(image) return image
Example #6
Source File: eval_helpers.py From PoseWarper with Apache License 2.0 | 6 votes |
def VOCap(rec,prec): mpre = np.zeros([1,2+len(prec)]) mpre[0,1:len(prec)+1] = prec mrec = np.zeros([1,2+len(rec)]) mrec[0,1:len(rec)+1] = rec mrec[0,len(rec)+1] = 1.0 for i in range(mpre.size-2,-1,-1): mpre[0,i] = max(mpre[0,i],mpre[0,i+1]) i = np.argwhere( ~np.equal( mrec[0,1:], mrec[0,:mrec.shape[1]-1]) )+1 i = i.flatten() # compute area under the curve ap = np.sum( np.multiply( np.subtract( mrec[0,i], mrec[0,i-1]), mpre[0,i] ) ) return ap
Example #7
Source File: facenet.py From TNT with GNU General Public License v3.0 | 6 votes |
def triplet_loss(anchor, positive, negative, alpha): """Calculate the triplet loss according to the FaceNet paper Args: anchor: the embeddings for the anchor images. positive: the embeddings for the positive images. negative: the embeddings for the negative images. Returns: the triplet loss according to the FaceNet paper as a float tensor. """ with tf.variable_scope('triplet_loss'): pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1) neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1) basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha) loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0) return loss
Example #8
Source File: test_dataframe.py From plydata with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_mutate_all(): df = pd.DataFrame({ 'alpha': list('aaabbb'), 'beta': list('babruq'), 'theta': list('cdecde'), 'x': [1, 2, 3, 4, 5, 6], 'y': [6, 5, 4, 3, 2, 1], 'z': [7, 9, 11, 8, 10, 12] }) result = (df >> group_by('alpha') >> select('x', 'y', 'z') >> mutate_all((np.add, np.subtract), 10) ) assert 'alpha' in result
Example #9
Source File: facenet.py From TNT with GNU General Public License v3.0 | 6 votes |
def triplet_loss(anchor, positive, negative, alpha): """Calculate the triplet loss according to the FaceNet paper Args: anchor: the embeddings for the anchor images. positive: the embeddings for the positive images. negative: the embeddings for the negative images. Returns: the triplet loss according to the FaceNet paper as a float tensor. """ with tf.variable_scope('triplet_loss'): pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1) neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1) basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha) loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0) return loss
Example #10
Source File: facenet.py From TNT with GNU General Public License v3.0 | 6 votes |
def triplet_loss(anchor, positive, negative, alpha): """Calculate the triplet loss according to the FaceNet paper Args: anchor: the embeddings for the anchor images. positive: the embeddings for the positive images. negative: the embeddings for the negative images. Returns: the triplet loss according to the FaceNet paper as a float tensor. """ with tf.variable_scope('triplet_loss'): pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1) neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1) basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha) loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0) return loss
Example #11
Source File: test_period.py From recruit with Apache License 2.0 | 6 votes |
def test_pi_ops_array_int(self): idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx') f = lambda x: x + np.array([1, 2, 3, 4]) exp = PeriodIndex(['2011-02', '2011-04', 'NaT', '2011-08'], freq='M', name='idx') self._check(idx, f, exp) f = lambda x: np.add(x, np.array([4, -1, 1, 2])) exp = PeriodIndex(['2011-05', '2011-01', 'NaT', '2011-06'], freq='M', name='idx') self._check(idx, f, exp) f = lambda x: x - np.array([1, 2, 3, 4]) exp = PeriodIndex(['2010-12', '2010-12', 'NaT', '2010-12'], freq='M', name='idx') self._check(idx, f, exp) f = lambda x: np.subtract(x, np.array([3, 2, 3, -2])) exp = PeriodIndex(['2010-10', '2010-12', 'NaT', '2011-06'], freq='M', name='idx') self._check(idx, f, exp)
Example #12
Source File: test_period.py From recruit with Apache License 2.0 | 6 votes |
def test_pi_ops_nat(self): idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx') expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'], freq='M', name='idx') self._check(idx, lambda x: x + 2, expected) self._check(idx, lambda x: 2 + x, expected) self._check(idx, lambda x: np.add(x, 2), expected) self._check(idx + 2, lambda x: x - 2, idx) self._check(idx + 2, lambda x: np.subtract(x, 2), idx) # freq with mult idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='2M', name='idx') expected = PeriodIndex(['2011-07', '2011-08', 'NaT', '2011-10'], freq='2M', name='idx') self._check(idx, lambda x: x + 3, expected) self._check(idx, lambda x: 3 + x, expected) self._check(idx, lambda x: np.add(x, 3), expected) self._check(idx + 3, lambda x: x - 3, idx) self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
Example #13
Source File: call_engine_to_infer_all_print_predict_on_image.py From iAI with MIT License | 6 votes |
def load_image(img_path, net_input_shape): imgBGR = cv2.imread(img_path) img = cv2.resize(imgBGR, net_input_shape) # BGR -> RGB #img = img[:,:, (2, 1, 0)] ## Method 1 # imgT = np.transpose(img, (2, 0, 1)) # c,w,h # imgF = np.asarray(imgT, dtype=np.float32) # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean # imgS = np.subtract(imgF,mean) ## Method 2 imgF = np.asarray(img, dtype=np.float32) mean = [88.159309, 97.966286, 103.66106] # Caffe image mean imgSS = np.subtract(imgF, mean) imgS = np.transpose(imgSS, (2, 0, 1)) # c,w,h # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32) return imgBGR, np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous
Example #14
Source File: histograms.py From lambda-packs with MIT License | 6 votes |
def _unsigned_subtract(a, b): """ Subtract two values where a >= b, and produce an unsigned result This is needed when finding the difference between the upper and lower bound of an int16 histogram """ # coerce to a single type signed_to_unsigned = { np.byte: np.ubyte, np.short: np.ushort, np.intc: np.uintc, np.int_: np.uint, np.longlong: np.ulonglong } dt = np.result_type(a, b) try: dt = signed_to_unsigned[dt.type] except KeyError: return np.subtract(a, b, dtype=dt) else: # we know the inputs are integers, and we are deliberately casting # signed to unsigned return np.subtract(a, b, casting='unsafe', dtype=dt)
Example #15
Source File: call_engine_to_infer_one.py From iAI with MIT License | 6 votes |
def load_image(img_path, net_input_shape): img = cv2.resize(cv2.imread(img_path), net_input_shape) # BGR -> RGB #img = img[:,:, (2, 1, 0)] ## Method 1 # imgT = np.transpose(img, (2, 0, 1)) # c,w,h # imgF = np.asarray(imgT, dtype=np.float32) # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean # imgS = np.subtract(imgF,mean) ## Method 2 imgF = np.asarray(img, dtype=np.float32) mean = [88.159309, 97.966286, 103.66106] # Caffe image mean imgSS = np.subtract(imgF, mean) imgS = np.transpose(imgSS, (2, 0, 1)) # CHW # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32) return np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous
Example #16
Source File: call_engine_to_infer_all_analysis_error_6classes.py From iAI with MIT License | 6 votes |
def load_image(img_path, net_input_shape): imgBGR = cv2.imread(img_path) img = cv2.resize(imgBGR, net_input_shape) # BGR -> RGB #img = img[:,:, (2, 1, 0)] ## Method 1 # imgT = np.transpose(img, (2, 0, 1)) # c,w,h # imgF = np.asarray(imgT, dtype=np.float32) # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean # imgS = np.subtract(imgF,mean) ## Method 2 imgF = np.asarray(img, dtype=np.float32) mean = [128.0, 128.0, 128.0] # Caffe image mean # mean = [88.159309, 97.966286, 103.66106] # Caffe image mean imgSS = np.subtract(imgF, mean)/128.0 imgS = np.transpose(imgSS, (2, 0, 1)) # c,w,h # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32) return imgBGR, np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous
Example #17
Source File: call_engine_to_infer_all.py From iAI with MIT License | 6 votes |
def load_image(img_path, net_input_shape): img = cv2.resize(cv2.imread(img_path), net_input_shape) # BGR -> RGB #img = img[:,:, (2, 1, 0)] ## Method 1 # imgT = np.transpose(img, (2, 0, 1)) # c,w,h # imgF = np.asarray(imgT, dtype=np.float32) # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean # imgS = np.subtract(imgF,mean) ## Method 2 imgF = np.asarray(img, dtype=np.float32) mean = [88.159309, 97.966286, 103.66106] # Caffe image mean imgSS = np.subtract(imgF, mean) imgS = np.transpose(imgSS, (2, 0, 1)) # CHW # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32) return np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous
Example #18
Source File: call_engine_to_infer_all_print_predict_on_image_6classes.py From iAI with MIT License | 6 votes |
def load_image(img_path, net_input_shape): imgBGR = cv2.imread(img_path) img = cv2.resize(imgBGR, net_input_shape) # BGR -> RGB #img = img[:,:, (2, 1, 0)] ## Method 1 # imgT = np.transpose(img, (2, 0, 1)) # c,w,h # imgF = np.asarray(imgT, dtype=np.float32) # mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean # imgS = np.subtract(imgF,mean) ## Method 2 imgF = np.asarray(img, dtype=np.float32) mean = [128.0, 128.0, 128.0] # Caffe image mean # mean = [88.159309, 97.966286, 103.66106] # Caffe image mean imgSS = np.subtract(imgF, mean)/128.0 imgS = np.transpose(imgSS, (2, 0, 1)) # c,w,h # RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32) return imgBGR, np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous
Example #19
Source File: test_old_ma.py From lambda-packs with MIT License | 5 votes |
def test_testArithmetic(self): # Test of basic arithmetic. (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d a2d = array([[1, 2], [0, 4]]) a2dm = masked_array(a2d, [[0, 0], [1, 0]]) self.assertTrue(eq(a2d * a2d, a2d * a2dm)) self.assertTrue(eq(a2d + a2d, a2d + a2dm)) self.assertTrue(eq(a2d - a2d, a2d - a2dm)) for s in [(12,), (4, 3), (2, 6)]: x = x.reshape(s) y = y.reshape(s) xm = xm.reshape(s) ym = ym.reshape(s) xf = xf.reshape(s) self.assertTrue(eq(-x, -xm)) self.assertTrue(eq(x + y, xm + ym)) self.assertTrue(eq(x - y, xm - ym)) self.assertTrue(eq(x * y, xm * ym)) with np.errstate(divide='ignore', invalid='ignore'): self.assertTrue(eq(x / y, xm / ym)) self.assertTrue(eq(a10 + y, a10 + ym)) self.assertTrue(eq(a10 - y, a10 - ym)) self.assertTrue(eq(a10 * y, a10 * ym)) with np.errstate(divide='ignore', invalid='ignore'): self.assertTrue(eq(a10 / y, a10 / ym)) self.assertTrue(eq(x + a10, xm + a10)) self.assertTrue(eq(x - a10, xm - a10)) self.assertTrue(eq(x * a10, xm * a10)) self.assertTrue(eq(x / a10, xm / a10)) self.assertTrue(eq(x ** 2, xm ** 2)) self.assertTrue(eq(abs(x) ** 2.5, abs(xm) ** 2.5)) self.assertTrue(eq(x ** y, xm ** ym)) self.assertTrue(eq(np.add(x, y), add(xm, ym))) self.assertTrue(eq(np.subtract(x, y), subtract(xm, ym))) self.assertTrue(eq(np.multiply(x, y), multiply(xm, ym))) with np.errstate(divide='ignore', invalid='ignore'): self.assertTrue(eq(np.divide(x, y), divide(xm, ym)))
Example #20
Source File: histograms.py From lambda-packs with MIT License | 5 votes |
def _hist_bin_fd(x): """ The Freedman-Diaconis histogram bin estimator. The Freedman-Diaconis rule uses interquartile range (IQR) to estimate binwidth. It is considered a variation of the Scott rule with more robustness as the IQR is less affected by outliers than the standard deviation. However, the IQR depends on fewer points than the standard deviation, so it is less accurate, especially for long tailed distributions. If the IQR is 0, this function returns 1 for the number of bins. Binwidth is inversely proportional to the cube root of data size (asymptotically optimal). Parameters ---------- x : array_like Input data that is to be histogrammed, trimmed to range. May not be empty. Returns ------- h : An estimate of the optimal bin width for the given data. """ iqr = np.subtract(*np.percentile(x, [75, 25])) return 2.0 * iqr * x.size ** (-1.0 / 3.0)
Example #21
Source File: test_umath.py From recruit with Apache License 2.0 | 5 votes |
def test_exceptions(self): a = np.ones(1, dtype=np.bool_) assert_raises(TypeError, np.negative, a) assert_raises(TypeError, np.positive, a) assert_raises(TypeError, np.subtract, a, a)
Example #22
Source File: test_subclassing.py From lambda-packs with MIT License | 5 votes |
def test_pure_subclass_info_preservation(self): # Test that ufuncs and methods conserve extra information consistently; # see gh-7122. arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6]) arr2 = SubMaskedArray(data=[0,1,2,3,4,5]) diff1 = np.subtract(arr1, arr2) self.assertTrue('info' in diff1._optinfo) self.assertTrue(diff1._optinfo['info'] == 'test') diff2 = arr1 - arr2 self.assertTrue('info' in diff2._optinfo) self.assertTrue(diff2._optinfo['info'] == 'test') ###############################################################################
Example #23
Source File: test_datetime64.py From recruit with Apache License 2.0 | 5 votes |
def test_dti_isub_tdi(self, tz_naive_fixture): # GH#17558 tz = tz_naive_fixture dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10) tdi = pd.timedelta_range('0 days', periods=10) expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D') # isub with TimedeltaIndex result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10) result -= tdi tm.assert_index_equal(result, expected) msg = 'cannot subtract .* from a TimedeltaArray' with pytest.raises(TypeError, match=msg): tdi -= dti # isub with timedelta64 array result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10) result -= tdi.values tm.assert_index_equal(result, expected) msg = '|'.join(['cannot perform __neg__ with this index type:', 'ufunc subtract cannot use operands with types', 'cannot subtract DatetimeArray from']) with pytest.raises(TypeError, match=msg): tdi.values -= dti # ------------------------------------------------------------- # Binary Operations DatetimeIndex and datetime-like # TODO: A couple other tests belong in this section. Move them in # A PR where there isn't already a giant diff.
Example #24
Source File: test_datetime64.py From recruit with Apache License 2.0 | 5 votes |
def test_ufunc_coercions(self): idx = date_range('2011-01-01', periods=3, freq='2D', name='x') delta = np.timedelta64(1, 'D') for result in [idx + delta, np.add(idx, delta)]: assert isinstance(result, DatetimeIndex) exp = date_range('2011-01-02', periods=3, freq='2D', name='x') tm.assert_index_equal(result, exp) assert result.freq == '2D' for result in [idx - delta, np.subtract(idx, delta)]: assert isinstance(result, DatetimeIndex) exp = date_range('2010-12-31', periods=3, freq='2D', name='x') tm.assert_index_equal(result, exp) assert result.freq == '2D' delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'), np.timedelta64(3, 'D')]) for result in [idx + delta, np.add(idx, delta)]: assert isinstance(result, DatetimeIndex) exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'], freq='3D', name='x') tm.assert_index_equal(result, exp) assert result.freq == '3D' for result in [idx - delta, np.subtract(idx, delta)]: assert isinstance(result, DatetimeIndex) exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'], freq='D', name='x') tm.assert_index_equal(result, exp) assert result.freq == 'D'
Example #25
Source File: test_period.py From recruit with Apache License 2.0 | 5 votes |
def test_parr_ops_errors(self, ng, box_with_array): idx = PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'], freq='M', name='idx') obj = tm.box_expected(idx, box_with_array) msg = r"unsupported operand type\(s\)" with pytest.raises(TypeError, match=msg): obj + ng with pytest.raises(TypeError): # error message differs between PY2 and 3 ng + obj with pytest.raises(TypeError, match=msg): obj - ng with pytest.raises(TypeError): np.add(obj, ng) with pytest.raises(TypeError): np.add(ng, obj) with pytest.raises(TypeError): np.subtract(obj, ng) with pytest.raises(TypeError): np.subtract(ng, obj)
Example #26
Source File: LSDMap_BasicManipulation.py From LSDMappingTools with MIT License | 5 votes |
def BasicMassBalance(path, file1, file2): """This function checks the difference in "volume" between two rasters. Args: path (str): The path to the files file1 (str): The name of the first raster. file2 (str): The name of the second raster Returns: float: The differnece in the volume betweeen the two rasters Author: SMM """ # make sure names are in correct format NewPath = LSDOst.AppendSepToDirectoryPath(path) raster_file1 = NewPath+file1 raster_file2 = NewPath+file2 PixelArea = LSDMap_IO.GetPixelArea(raster_file1) print("PixelArea is: " + str(PixelArea)) print("The formatted path is: " + NewPath) Raster1 = LSDMap_IO.ReadRasterArrayBlocks(raster_file1,raster_band=1) Raster2 = LSDMap_IO.ReadRasterArrayBlocks(raster_file2,raster_band=1) NewRaster = np.subtract(Raster2,Raster1) mass_balance = np.sum(NewRaster)*PixelArea print("linear dif " + str(np.sum(NewRaster))) return mass_balance
Example #27
Source File: cnn_main.py From Convolutional-Networks-for-Stock-Predicting with MIT License | 5 votes |
def r_squared(y_true, y_hat): ssr = 0 sst = 0 e = np.subtract(y_true, y_hat) y_mean = np.mean(y_true) for item in e: ssr += item**2 for item in y_true: sst += (item - y_mean)**2 r2 = 1 - ssr / sst return r2
Example #28
Source File: main.py From Convolutional-Networks-for-Stock-Predicting with MIT License | 5 votes |
def r_squared(y_true, y_hat): ssr = 0 sst = 0 e = np.subtract(y_true, y_hat) y_mean = np.mean(y_true) for item in e: ssr += item**2 for item in y_true: sst += (item - y_mean)**2 r2 = 1 - ssr / sst return r2
Example #29
Source File: general.py From cgpm with Apache License 2.0 | 5 votes |
def log_normalize(logp): """Normalizes a np array of log probabilites.""" return np.subtract(logp, logsumexp(logp))
Example #30
Source File: period.py From recruit with Apache License 2.0 | 5 votes |
def __array_wrap__(self, result, context=None): """ Gets called after a ufunc. Needs additional handling as PeriodIndex stores internal data as int dtype Replace this to __numpy_ufunc__ in future version """ if isinstance(context, tuple) and len(context) > 0: func = context[0] if func is np.add: pass elif func is np.subtract: name = self.name left = context[1][0] right = context[1][1] if (isinstance(left, PeriodIndex) and isinstance(right, PeriodIndex)): name = left.name if left.name == right.name else None return Index(result, name=name) elif isinstance(left, Period) or isinstance(right, Period): return Index(result, name=name) elif isinstance(func, np.ufunc): if 'M->M' not in func.types: msg = "ufunc '{0}' not supported for the PeriodIndex" # This should be TypeError, but TypeError cannot be raised # from here because numpy catches. raise ValueError(msg.format(func.__name__)) if is_bool_dtype(result): return result # the result is object dtype array of Period # cannot pass _simple_new as it is return type(self)(result, freq=self.freq, name=self.name)