Python numpy.zeros() Examples
The following are 30
code examples of numpy.zeros().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: NLP.py From Financial-NLP with Apache License 2.0 | 7 votes |
def wordbag2mat(self, wordbag): #testing if self.model==None: raise Exception("no model") matrix=np.empty((len(wordbag),self.len_vector)) #如果词典中不存在该词,抛出异常,但暂时还没有自定义词典的办法,所以暂时不那么严格 #try: # for i in range(len(wordbag)): # matrix[i,:]=self.model[wordbag[i]] #except: # raise Exception("'%s' can not be found in dictionary." % wordbag[i]) #如果词典中不存在该词,则push进一列零向量 for i in range(len(wordbag)): try: matrix[i,:]=self.model.wv.__getitem__(wordbag[i])#[wordbag[i]] except: matrix[i,:]=np.zeros((1,self.len_vector)) return matrix ################################ problem #####################################
Example #2
Source File: NLP.py From Financial-NLP with Apache License 2.0 | 7 votes |
def similarity_label(self, words, normalization=True): """ you can calculate more than one word at the same time. """ if self.model==None: raise Exception('no model.') if isinstance(words, string_types): words=[words] vectors=np.transpose(self.model.wv.__getitem__(words)) if normalization: unit_vector=unitvec(vectors,ax=0) # 这样写比原来那样速度提升一倍 #unit_vector=np.zeros((len(vectors),len(words))) #for i in range(len(words)): # unit_vector[:,i]=matutils.unitvec(vectors[:,i]) dists=np.dot(self.Label_vec_u, unit_vector) else: dists=np.dot(self.Label_vec, vectors) return dists
Example #3
Source File: test.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 6 votes |
def _project_im_rois(im_rois, scales): """Project image RoIs into the image pyramid built by _get_image_blob. Arguments: im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates scales (list): scale factors as returned by _get_image_blob Returns: rois (ndarray): R x 4 matrix of projected RoI coordinates levels (list): image pyramid levels used by each projected RoI """ im_rois = im_rois.astype(np.float, copy=False) if len(scales) > 1: widths = im_rois[:, 2] - im_rois[:, 0] + 1 heights = im_rois[:, 3] - im_rois[:, 1] + 1 areas = widths * heights scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2) diff_areas = np.abs(scaled_areas - 224 * 224) levels = diff_areas.argmin(axis=1)[:, np.newaxis] else: levels = np.zeros((im_rois.shape[0], 1), dtype=np.int) rois = im_rois * scales[levels] return rois, levels
Example #4
Source File: MultiLabelLayer.py From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License | 6 votes |
def get_a_datum(self): if self._compressed: datum = extract_sample( self._data[self._cur], self._mean, self._resize) else: datum = self._data[self._cur] # start parsing labels label_elems = parse_label(self._label[self._cur]) label = np.zeros(self._label_dim) if not self._multilabel: label[0] = label_elems[0] else: for i in label_elems: label[i] = 1 self._cur = (self._cur + 1) % self._sample_count return datum, label
Example #5
Source File: create_dataset.py From cat-bbs with MIT License | 6 votes |
def load_keypoints(image_filepath, image_height, image_width): """Load facial keypoints of one image.""" fp_keypoints = "%s.cat" % (image_filepath,) if not os.path.isfile(fp_keypoints): raise Exception("Could not find keypoint coordinates for image '%s'." \ % (image_filepath,)) else: coords_raw = open(fp_keypoints, "r").readlines()[0].strip().split(" ") coords_raw = [abs(int(coord)) for coord in coords_raw] keypoints = [] #keypoints_arr = np.zeros((9*2,), dtype=np.int32) for i in range(1, len(coords_raw), 2): # first element is the number of coords x = np.clip(coords_raw[i], 0, image_width-1) y = np.clip(coords_raw[i+1], 0, image_height-1) keypoints.append((x, y)) return keypoints
Example #6
Source File: ctc_decoder.py From LipNet-PyTorch with BSD 3-Clause "New" or "Revised" License | 6 votes |
def wer(self, r, h): # initialisation d = np.zeros((len(r)+1)*(len(h)+1), dtype=np.uint8) d = d.reshape((len(r)+1, len(h)+1)) for i in range(len(r)+1): for j in range(len(h)+1): if i == 0: d[0][j] = j elif j == 0: d[i][0] = i # computation for i in range(1, len(r)+1): for j in range(1, len(h)+1): if r[i-1] == h[j-1]: d[i][j] = d[i-1][j-1] else: substitution = d[i-1][j-1] + 1 insertion = d[i][j-1] + 1 deletion = d[i-1][j] + 1 d[i][j] = min(substitution, insertion, deletion) return d[len(r)][len(h)]
Example #7
Source File: solver.py From fenics-topopt with MIT License | 6 votes |
def compliance_function_fdiff(self, x, dc): obj = self.compliance_function(x, dc) x0 = x.copy() dc0 = dc.copy() dcf = np.zeros(dc.shape) for i, v in enumerate(x): x = x0.copy() x[i] += 1e-6 o1 = self.compliance_function(x, dc) x[i] = x0[i] - 1e-6 o2 = self.compliance_function(x, dc) dcf[i] = (o1 - o2) / (2e-6) print("finite differences: {:g}".format(np.linalg.norm(dcf - dc0))) dc[:] = dc0 return obj
Example #8
Source File: solver.py From fenics-topopt with MIT License | 6 votes |
def compliance_function_fdiff(self, x, dc): obj = self.compliance_function(x, dc) x0 = x.copy() dc0 = dc.copy() dcf = np.zeros(dc.shape) for i, v in enumerate(x): x = x0.copy() x[i] += 1e-6 o1 = self.compliance_function(x, dc) x[i] = x0[i] - 1e-6 o2 = self.compliance_function(x, dc) dcf[i] = (o1 - o2) / (2e-6) print("finite differences: {:g}".format(np.linalg.norm(dcf - dc0))) dc[:] = dc0 return obj
Example #9
Source File: von_mises_stress.py From fenics-topopt with MIT License | 6 votes |
def calculate_fdiff_stress(self, x, u, nu, side=1, dx=1e-6): """ Calculate the derivative of the Von Mises stress using finite differences given the densities x, displacements u, and young modulus nu. Optionally, provide the side length (default: 1) and delta x (default: 1e-6). """ ds = self.calculate_diff_stress(x, u, nu, side) dsf = numpy.zeros(x.shape) x = numpy.expand_dims(x, -1) for i in range(x.shape[0]): delta = scipy.sparse.coo_matrix(([dx], [[i], [0]]), shape=x.shape) s1 = self.calculate_stress((x + delta.A).squeeze(), u, nu, side) s2 = self.calculate_stress((x - delta.A).squeeze(), u, nu, side) dsf[i] = ((s1 - s2) / (2. * dx))[i] print("finite differences: {:g}".format(numpy.linalg.norm(dsf - ds))) return dsf
Example #10
Source File: conftest.py From aospy with Apache License 2.0 | 6 votes |
def ds_time_encoded_cf(): time_bounds = np.array([[0, 31], [31, 59], [59, 90]]) bounds = np.array([0, 1]) time = np.array([15, 46, 74]) data = np.zeros((3)) ds = xr.DataArray(data, coords=[time], dims=[TIME_STR], name='a').to_dataset() ds[TIME_BOUNDS_STR] = xr.DataArray(time_bounds, coords=[time, bounds], dims=[TIME_STR, BOUNDS_STR], name=TIME_BOUNDS_STR) units_str = 'days since 2000-01-01 00:00:00' cal_str = 'noleap' ds[TIME_STR].attrs['units'] = units_str ds[TIME_STR].attrs['calendar'] = cal_str return ds
Example #11
Source File: test_data_loader.py From aospy with Apache License 2.0 | 6 votes |
def ds_with_time_bounds(alt_lat_str, var_name): time_bounds = np.array([[0, 31], [31, 59], [59, 90]]) bounds = np.array([0, 1]) time = np.array([15, 46, 74]) data = np.zeros((3, 1, 1)) lat = [0] lon = [0] ds = xr.DataArray(data, coords=[time, lat, lon], dims=[TIME_STR, alt_lat_str, LON_STR], name=var_name).to_dataset() ds[TIME_BOUNDS_STR] = xr.DataArray(time_bounds, coords=[time, bounds], dims=[TIME_STR, BOUNDS_STR], name=TIME_BOUNDS_STR) units_str = 'days since 2000-01-01 00:00:00' ds[TIME_STR].attrs['units'] = units_str ds[TIME_BOUNDS_STR].attrs['units'] = units_str return ds
Example #12
Source File: test_data_loader.py From aospy with Apache License 2.0 | 6 votes |
def test_sel_var(): time = np.array([0, 31, 59]) + 15 data = np.zeros((3)) ds = xr.DataArray(data, coords=[time], dims=[TIME_STR], name=convection_rain.name).to_dataset() condensation_rain_alt_name, = condensation_rain.alt_names ds[condensation_rain_alt_name] = xr.DataArray(data, coords=[ds.time]) result = _sel_var(ds, convection_rain) assert result.name == convection_rain.name result = _sel_var(ds, condensation_rain) assert result.name == condensation_rain.name with pytest.raises(LookupError): _sel_var(ds, precip)
Example #13
Source File: nn.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def build(self): #{{{ import numpy as np; self.W = shared((self.input_dim, 4 * self.output_dim), name='{}_W'.format(self.name)) self.U = shared((self.output_dim, 4 * self.output_dim), name='{}_U'.format(self.name)) self.b = K.variable(np.hstack((np.zeros(self.output_dim), K.get_value(self.forget_bias_init( (self.output_dim,))), np.zeros(self.output_dim), np.zeros(self.output_dim))), name='{}_b'.format(self.name)) #self.c_0 = shared((self.output_dim,), name='{}_c_0'.format(self.name) ) #self.h_0 = shared((self.output_dim,), name='{}_h_0'.format(self.name) ) self.c_0=np.zeros(self.output_dim).astype(theano.config.floatX); self.h_0=np.zeros(self.output_dim).astype(theano.config.floatX); self.params=[self.W,self.U, self.b, # self.c_0,self.h_0 ]; #}}}
Example #14
Source File: theano_backend.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev): active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()] active_next = T.cast(T.minimum( T.maximum( active + 1, T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1 ), log_p_curr.shape[0]), 'int32') common_factor = T.max(log_p_prev[:active]) p_prev = T.exp(log_p_prev[:active] - common_factor) _p_prev = zeros[:active_next] # copy over _p_prev = T.set_subtensor(_p_prev[:active], p_prev) # previous transitions _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1]) # skip transitions _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs]) updated_log_p_prev = T.log(_p_prev) + common_factor log_p_next = T.set_subtensor( zeros[:active_next], log_p_curr[:active_next] + updated_log_p_prev ) return active_next, log_p_next
Example #15
Source File: theano_backend.py From Att-ChemdNER with Apache License 2.0 | 6 votes |
def ctc_path_probs(predict, Y, alpha=1e-4): smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0] L = T.log(smoothed_predict) zeros = T.zeros_like(L[0]) log_first = zeros f_skip_idxs = ctc_create_skip_idxs(Y) b_skip_idxs = ctc_create_skip_idxs(Y[::-1]) # there should be a shortcut to calculating this def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev): f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev) b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev) return f_active_next, log_f_next, b_active_next, log_b_next [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan( step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first]) idxs = T.arange(L.shape[1]).dimshuffle('x', 0) mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1] log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L return log_probs, mask
Example #16
Source File: test_utils_times.py From aospy with Apache License 2.0 | 6 votes |
def test_add_uniform_time_weights(): time = np.array([15, 46, 74]) data = np.zeros((3)) ds = xr.DataArray(data, coords=[time], dims=[TIME_STR], name='a').to_dataset() units_str = 'days since 2000-01-01 00:00:00' cal_str = 'noleap' ds[TIME_STR].attrs['units'] = units_str ds[TIME_STR].attrs['calendar'] = cal_str with pytest.raises(KeyError): ds[TIME_WEIGHTS_STR] ds = add_uniform_time_weights(ds) time_weights_expected = xr.DataArray( [1, 1, 1], coords=ds[TIME_STR].coords, name=TIME_WEIGHTS_STR) time_weights_expected.attrs['units'] = 'days' assert ds[TIME_WEIGHTS_STR].identical(time_weights_expected)
Example #17
Source File: von_mises_stress.py From fenics-topopt with MIT License | 6 votes |
def calculate_fdiff_stress(self, x, u, nu, side=1, dx=1e-6): """ Calculate the derivative of the Von Mises stress using finite differences given the densities x, displacements u, and young modulus nu. Optionally, provide the side length (default: 1) and delta x (default: 1e-6). """ ds = self.calculate_diff_stress(x, u, nu, side) dsf = numpy.zeros(x.shape) x = numpy.expand_dims(x, -1) for i in range(x.shape[0]): delta = scipy.sparse.coo_matrix(([dx], [[i], [0]]), shape=x.shape) s1 = self.calculate_stress((x + delta.A).squeeze(), u, nu, side) s2 = self.calculate_stress((x - delta.A).squeeze(), u, nu, side) dsf[i] = ((s1 - s2) / (2. * dx))[i] print("finite differences: {:g}".format(numpy.linalg.norm(dsf - ds))) return dsf
Example #18
Source File: nn.py From Att-ChemdNER with Apache License 2.0 | 5 votes |
def step(self, word,index,energy_tm1,h_tm1,c_tm1,x): #{{{ #attention H=x; if self.attendedMode is "concat": M_X=T.dot(x,self.W_A_X)#+self.b_A_X; M_state=T.dot(self.W_A_h,c_tm1)#+self.b_A_h; M=T.tanh(M_X+M_state) _energy=T.dot(M,self.W_A.T)#+self.b_A; elif self.attendedMode is "dot": energy=None; assert 0,"not implement"; elif self.attendedMode is "general": M_X=T.dot(x,self.W_A_X)#+self.b_A_X; M_state=T.dot(self.W_A_h,c_tm1)#+self.b_A_h; M=T.tanh(M_X*M_state); _energy=T.dot(M,self.W_A.T)#+self.b_A; #mask mask=T.zeros((1,x.shape[0]),dtype=theano.config.floatX); energy=T.nnet.softmax(_energy[:index+1]); masked_energy=T.set_subtensor(mask[0,:index+1],energy.flatten()); glimpsed=(masked_energy.T*H).sum(axis=0) #combine glimpsed with word; if self.wordInput_dim==0: combined=glimpsed; else: combine=K.concatenate([glimpsed,word]); combined=combine; #original LSTM step h_t,c_t=super(AttentionLSTM3,self).step(combined,h_tm1,c_tm1); return masked_energy.flatten(),h_t,c_t #}}}
Example #19
Source File: L_bracket.py From fenics-topopt with MIT License | 5 votes |
def get_forces(self): """ Return the force vector for the problem. """ ndof = 2 * (self.nelx + 1) * (self.nely + 1) f = np.zeros((ndof, 1)) fx = self.nelx # fy = (self.nely - self.passive_max_y) // 2 + self.passive_max_y for i in range(1, 2): fy = self.passive_max_y - 1 + 2 * i id = xy_to_id(fx, fy, self.nelx, self.nely) f[2 * id + 1, 0] = -1 return f
Example #20
Source File: bridge_distributed.py From fenics-topopt with MIT License | 5 votes |
def get_forces(self): # Return the force vector for the problem topx_to_id = np.vectorize( lambda x: xy_to_id(x, 0, self.nelx, self.nely)) topx = 2 * topx_to_id(np.arange((self.nelx + 1) // 2)) + 1 f = np.zeros((2 * (self.nelx + 1) * (self.nely + 1), 1)) f[topx, 0] = -100 return f
Example #21
Source File: gui.py From fenics-topopt with MIT License | 5 votes |
def __init__(self, nelx, nely, title=""): """Initialize plot and plot the initial design""" plt.ion() # Ensure that redrawing is possible self.fig, self.ax = plt.subplots() self.im = self.ax.imshow(-np.zeros((nelx, nely)).T, cmap='gray', interpolation='none', norm=colors.Normalize(vmin=-1, vmax=0)) plt.xlabel(title) # self.fig.tight_layout() self.fig.show() self.nelx, self.nely = nelx, nely
Example #22
Source File: triangulate.py From fenics-topopt with MIT License | 5 votes |
def mesh_from_img(img): nv = (img.shape[0] + 1) * (img.shape[1] + 1) nf = img.size * 2 v_count = 0 f_count = 0 V_dict = {} V = np.zeros([nv, 2]) F = np.zeros([nf, 3], dtype=np.int) for i in range(img.shape[0]): for j in range(img.shape[1]): val = img[i, j] if val == 255.0: continue v_idx = [] for v_i in [(i, j), (i + 1, j), (i, j + 1), (i + 1, j + 1)]: if v_i in V_dict: v_idx.append(V_dict[v_i]) else: V_dict[v_i] = v_count V[v_count, :] = np.array((v_i[1], -v_i[0])) v_count += 1 v_idx.append(v_count - 1) v1, v2, v3, v4 = v_idx F[f_count, :] = np.array([v1, v2, v4]) F[f_count + 1, :] = np.array([v1, v4, v3]) f_count += 2 V = np.resize(V, [v_count, 2]) F = np.resize(F, [f_count, 3]) return V, F
Example #23
Source File: test_utils_times.py From aospy with Apache License 2.0 | 5 votes |
def test_assert_has_data_for_time(): time_bounds = np.array([[0, 31], [31, 59], [59, 90]]) nv = np.array([0, 1]) time = np.array([15, 46, 74]) data = np.zeros((3)) var_name = 'a' ds = xr.DataArray(data, coords=[time], dims=[TIME_STR], name=var_name).to_dataset() ds[TIME_BOUNDS_STR] = xr.DataArray(time_bounds, coords=[time, nv], dims=[TIME_STR, BOUNDS_STR], name=TIME_BOUNDS_STR) units_str = 'days since 2000-01-01 00:00:00' ds[TIME_STR].attrs['units'] = units_str ds = ensure_time_avg_has_cf_metadata(ds) ds = set_grid_attrs_as_coords(ds) ds = xr.decode_cf(ds) da = ds[var_name] start_date = np.datetime64('2000-01-01') end_date = np.datetime64('2000-03-31') _assert_has_data_for_time(da, start_date, end_date) start_date_bad = np.datetime64('1999-12-31') end_date_bad = np.datetime64('2000-04-01') with pytest.raises(AssertionError): _assert_has_data_for_time(da, start_date_bad, end_date) with pytest.raises(AssertionError): _assert_has_data_for_time(da, start_date, end_date_bad) with pytest.raises(AssertionError): _assert_has_data_for_time(da, start_date_bad, end_date_bad)
Example #24
Source File: test_utils_times.py From aospy with Apache License 2.0 | 5 votes |
def test_assert_has_data_for_time_cftime_datetimes(calendar, date_type): time_bounds = np.array([[0, 2], [2, 4], [4, 6]]) nv = np.array([0, 1]) time = np.array([1, 3, 5]) data = np.zeros((3)) var_name = 'a' ds = xr.DataArray(data, coords=[time], dims=[TIME_STR], name=var_name).to_dataset() ds[TIME_BOUNDS_STR] = xr.DataArray(time_bounds, coords=[time, nv], dims=[TIME_STR, BOUNDS_STR], name=TIME_BOUNDS_STR) units_str = 'days since 0002-01-02 00:00:00' ds[TIME_STR].attrs['units'] = units_str ds[TIME_STR].attrs['calendar'] = calendar ds = ensure_time_avg_has_cf_metadata(ds) ds = set_grid_attrs_as_coords(ds) ds = xr.decode_cf(ds) da = ds[var_name] start_date = date_type(2, 1, 2) end_date = date_type(2, 1, 8) _assert_has_data_for_time(da, start_date, end_date) start_date_bad = date_type(2, 1, 1) end_date_bad = date_type(2, 1, 9) with pytest.raises(AssertionError): _assert_has_data_for_time(da, start_date_bad, end_date) with pytest.raises(AssertionError): _assert_has_data_for_time(da, start_date, end_date_bad) with pytest.raises(AssertionError): _assert_has_data_for_time(da, start_date_bad, end_date_bad)
Example #25
Source File: test_utils_times.py From aospy with Apache License 2.0 | 5 votes |
def test_sel_time(): time_bounds = np.array([[0, 31], [31, 59], [59, 90]]) nv = np.array([0, 1]) time = np.array([15, 46, 74]) data = np.zeros((3)) var_name = 'a' ds = xr.DataArray(data, coords=[time], dims=[TIME_STR], name=var_name).to_dataset() ds[TIME_BOUNDS_STR] = xr.DataArray(time_bounds, coords=[time, nv], dims=[TIME_STR, BOUNDS_STR], name=TIME_BOUNDS_STR) units_str = 'days since 2000-01-01 00:00:00' ds[TIME_STR].attrs['units'] = units_str ds = ensure_time_avg_has_cf_metadata(ds) ds = set_grid_attrs_as_coords(ds) ds = xr.decode_cf(ds) da = ds[var_name] start_date = np.datetime64('2000-02-01') end_date = np.datetime64('2000-03-31') result = sel_time(da, start_date, end_date) assert result[SUBSET_START_DATE_STR].values == start_date assert result[SUBSET_END_DATE_STR].values == end_date
Example #26
Source File: rba.py From libTLDA with MIT License | 5 votes |
def psi(self, X, theta, w, K=2): """ Compute psi function. Parameters ---------- X : array data set (N samples by D features) theta : array classifier parameters (D features by 1) w : array importance-weights (N samples by 1) K : int number of classes (def: 2) Returns ------- psi : array array with psi function values (N samples by K classes) """ # Number of samples N = X.shape[0] # Preallocate psi array psi = np.zeros((N, K)) # Loop over classes for k in range(K): # Compute feature statistics Xk = self.feature_stats(X, k*np.ones((N, 1))) # Compute psi function psi[:, k] = (w*np.dot(Xk, theta))[:, 0] return psi
Example #27
Source File: conftest.py From aospy with Apache License 2.0 | 5 votes |
def ds_with_time_bounds(ds_time_encoded_cf, alt_lat_str, var_name): time = ds_time_encoded_cf[TIME_STR] data = np.zeros((3, 1, 1)) lat = [0] lon = [0] ds = xr.DataArray( data, coords=[time, lat, lon], dims=[TIME_STR, alt_lat_str, LON_STR], name=var_name, ).to_dataset() ds[TIME_BOUNDS_STR] = ds_time_encoded_cf[TIME_BOUNDS_STR] return ds
Example #28
Source File: util.py From libTLDA with MIT License | 5 votes |
def one_hot(y, fill_k=False, one_not=False): """Map to one-hot encoding.""" # Check labels labels = np.unique(y) # Number of classes K = len(labels) # Number of samples N = y.shape[0] # Preallocate array if one_not: Y = -np.ones((N, K)) else: Y = np.zeros((N, K)) # Set k-th column to 1 for n-th sample for n in range(N): # Map current class to index label y_n = (y[n] == labels) if fill_k: Y[n, y_n] = y_n else: Y[n, y_n] = 1 return Y, labels
Example #29
Source File: kde.py From svviz with MIT License | 5 votes |
def evaluate(self, points): points = atleast_2d(points) d, m = points.shape if d != self.d: if d == 1 and m == self.d: # points was passed in as a row vector points = reshape(points, (self.d, 1)) m = 1 else: msg = "points have dimension %s, dataset has dimension %s" % (d, self.d) raise ValueError(msg) result = zeros((m,), dtype=np.float) if m >= self.n: # there are more points than data, so loop over data for i in range(self.n): diff = self.dataset[:, i, newaxis] - points tdiff = dot(self.inv_cov, diff) energy = sum(diff*tdiff,axis=0) / 2.0 result = result + exp(-energy) else: # loop over points for i in range(m): diff = self.dataset - points[:, i, newaxis] tdiff = dot(self.inv_cov, diff) energy = sum(diff * tdiff, axis=0) / 2.0 result[i] = sum(exp(-energy), axis=0) result = result / self._norm_factor return result
Example #30
Source File: rba.py From libTLDA with MIT License | 5 votes |
def posterior(self, psi): """ Class-posterior estimation. Parameters ---------- psi : array weighted data-classifier output (N samples by K classes) Returns ------- pyx : array class-posterior estimation (N samples by K classes) """ # Data shape N, K = psi.shape # Preallocate array pyx = np.zeros((N, K)) # Subtract maximum value for numerical stability psi = (psi.T - np.max(psi, axis=1).T).T # Loop over classes for k in range(K): # Estimate posterior p^(Y=y | x_i) pyx[:, k] = np.exp(psi[:, k]) / np.sum(np.exp(psi), axis=1) return pyx