Python numpy.trim_zeros() Examples
The following are 30
code examples of numpy.trim_zeros().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: utils.py From abagen with BSD 3-Clause "New" or "Revised" License | 6 votes |
def get_unique_labels(label_image): """ Returns all possible ROI labels from ``label_image`` Parameters ---------- label_image : niimg-like object ROI image, where each ROI is identified with a unique integer ID Returns ------- labels : np.ndarray Integer labels of all ROIS found within ``label_image`` """ label_image = check_img(label_image) return np.trim_zeros(np.unique(label_image.dataobj)).astype(int)
Example #2
Source File: gasrank.py From pyflux with BSD 3-Clause "New" or "Revised" License | 6 votes |
def predict_one_component(self, team_1, team_2, neutral=False): """ Returns team 1's probability of winning """ if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: if type(team_1) == str: team_1_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[self.team_dict[team_1]], trim='b')[-1] team_2_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[self.team_dict[team_2]], trim='b')[-1] else: team_1_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[team_1], trim='b')[-1] team_2_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[team_2], trim='b')[-1] t_z = self.transform_z() if neutral is False: return self.link(t_z[0] + team_1_ability - team_2_ability) else: return self.link(team_1_ability - team_2_ability)
Example #3
Source File: gasrank.py From pyflux with BSD 3-Clause "New" or "Revised" License | 6 votes |
def predict_two_components(self, team_1, team_2, team_1b, team_2b, neutral=False): """ Returns team 1's probability of winning """ if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: if type(team_1) == str: team_1_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[0].T[self.team_dict[team_1]], trim='b')[-1] team_2_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[0].T[self.team_dict[team_2]], trim='b')[-1] team_1_b_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[1].T[self.team_dict[team_1]], trim='b')[-1] team_2_b_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[1].T[self.team_dict[team_2]], trim='b')[-1] else: team_1_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[0].T[team_1], trim='b')[-1] team_2_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[0].T[team_2], trim='b')[-1] team_1_b_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[1].T[team_1_b], trim='b')[-1] team_2_b_ability = np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[1].T[team_2_b], trim='b')[-1] t_z = self.transform_z() if neutral is False: return self.link(t_z[0] + team_1_ability - team_2_ability + team_1_b_ability - team_2_b_ability) else: return self.link(team_1_ability - team_2_ability + team_1_b_ability - team_2_b_ability)
Example #4
Source File: gen_mini_batches.py From avod with MIT License | 6 votes |
def split_indices(dataset, num_children): """Splits indices between children Args: dataset: Dataset object num_children: Number of children to split samples between Returns: indices_split: A list of evenly split indices """ all_indices = np.arange(dataset.num_samples) # Pad indices to divide evenly length_padding = (-len(all_indices)) % num_children padded_indices = np.concatenate((all_indices, np.zeros(length_padding, dtype=np.int32))) # Split and trim last set of indices to original length indices_split = np.split(padded_indices, num_children) indices_split[-1] = np.trim_zeros(indices_split[-1]) return indices_split
Example #5
Source File: gen_mini_batches.py From TLNet with Apache License 2.0 | 6 votes |
def split_indices(dataset, num_children): """Splits indices between children Args: dataset: Dataset object num_children: Number of children to split samples between Returns: indices_split: A list of evenly split indices """ all_indices = np.arange(dataset.num_samples) # Pad indices to divide evenly length_padding = (-len(all_indices)) % num_children padded_indices = np.concatenate((all_indices, np.zeros(length_padding, dtype=np.int32))) # Split and trim last set of indices to original length indices_split = np.split(padded_indices, num_children) indices_split[-1] = np.trim_zeros(indices_split[-1]) return indices_split
Example #6
Source File: gen_mini_batches.py From avod-ssd with MIT License | 6 votes |
def split_indices(dataset, num_children): """Splits indices between children Args: dataset: Dataset object num_children: Number of children to split samples between Returns: indices_split: A list of evenly split indices """ all_indices = np.arange(dataset.num_samples) # Pad indices to divide evenly length_padding = (-len(all_indices)) % num_children padded_indices = np.concatenate((all_indices, np.zeros(length_padding, dtype=np.int32))) # Split and trim last set of indices to original length indices_split = np.split(padded_indices, num_children) indices_split[-1] = np.trim_zeros(indices_split[-1]) return indices_split
Example #7
Source File: array.py From pycbc with GNU General Public License v3.0 | 5 votes |
def trim_zeros(self): """Remove the leading and trailing zeros. """ tmp = self.numpy() f = len(self)-len(_numpy.trim_zeros(tmp, trim='f')) b = len(self)-len(_numpy.trim_zeros(tmp, trim='b')) return self[f:len(self)-b]
Example #8
Source File: FeatureFunctionLib.py From FATS with MIT License | 5 votes |
def fit(self, data): magnitude = data[0] time = data[1] global m_21 global m_31 global m_32 Nsf = 100 Np = 100 sf1 = np.zeros(Nsf) sf2 = np.zeros(Nsf) sf3 = np.zeros(Nsf) f = interp1d(time, magnitude) time_int = np.linspace(np.min(time), np.max(time), Np) mag_int = f(time_int) for tau in np.arange(1, Nsf): sf1[tau-1] = np.mean(np.power(np.abs(mag_int[0:Np-tau] - mag_int[tau:Np]) , 1.0)) sf2[tau-1] = np.mean(np.abs(np.power(np.abs(mag_int[0:Np-tau] - mag_int[tau:Np]) , 2.0))) sf3[tau-1] = np.mean(np.abs(np.power(np.abs(mag_int[0:Np-tau] - mag_int[tau:Np]) , 3.0))) sf1_log = np.log10(np.trim_zeros(sf1)) sf2_log = np.log10(np.trim_zeros(sf2)) sf3_log = np.log10(np.trim_zeros(sf3)) m_21, b_21 = np.polyfit(sf1_log, sf2_log, 1) m_31, b_31 = np.polyfit(sf1_log, sf3_log, 1) m_32, b_32 = np.polyfit(sf2_log, sf3_log, 1) return m_21
Example #9
Source File: text.py From lambda-packs with MIT License | 5 votes |
def reverse(self, x): """Reverses output of transform back to text. Args: x: iterator or matrix of integers. Document representation in bytes. Yields: Iterators of utf-8 strings. """ for data in x: document = np.trim_zeros(data.astype(np.int8), trim='b').tostring() try: yield document.decode('utf-8') except UnicodeDecodeError: yield ''
Example #10
Source File: data_utils.py From tensorflow_nlp with Apache License 2.0 | 5 votes |
def unpad_zeros(l): out = [] for tags in l: out.append([np.trim_zeros(line) for line in tags]) return out # 将不满足长度的句子填充0
Example #11
Source File: accountcurve.py From PyTrendFollow with MIT License | 5 votes |
def losses(self): return [z for z in np.trim_zeros(self.returns()).sum(axis=1) if z<0]
Example #12
Source File: text.py From auto-alt-text-lambda-api with MIT License | 5 votes |
def reverse(self, x): """Reverses output of transform back to text. Args: x: iterator or matrix of integers. Document representation in bytes. Yields: Iterators of utf-8 strings. """ for data in x: document = np.trim_zeros(data.astype(np.int8), trim='b').tostring() try: yield document.decode('utf-8') except UnicodeDecodeError: yield ''
Example #13
Source File: accountcurve.py From PyTrendFollow with MIT License | 5 votes |
def annual_vol(self): return "{0:,.4f}".format(np.trim_zeros(self.returns()).sum(axis=1).std() * np.sqrt(252)/self.capital)
Example #14
Source File: accountcurve.py From PyTrendFollow with MIT License | 5 votes |
def underwater(self): r = self.returns().sum(axis=1) u = (r.cumsum() - r.cumsum().cummax())/self.capital return np.trim_zeros(u).plot()
Example #15
Source File: accountcurve.py From PyTrendFollow with MIT License | 5 votes |
def cumcapital(self): return np.trim_zeros((self.returns().sum(axis=1)/self.capital)+1).cumprod()
Example #16
Source File: text.py From keras-lambda with MIT License | 5 votes |
def reverse(self, x): """Reverses output of transform back to text. Args: x: iterator or matrix of integers. Document representation in bytes. Yields: Iterators of utf-8 strings. """ for data in x: document = np.trim_zeros(data.astype(np.int8), trim='b').tostring() try: yield document.decode('utf-8') except UnicodeDecodeError: yield ''
Example #17
Source File: utility.py From PyTrendFollow with MIT License | 5 votes |
def rolling_sharpe(p): """Mean sharpe ratio of the returns in a rolling window of the size 252""" p = np.trim_zeros(p) return p.rolling(252, min_periods=252).mean()/p.rolling(252, min_periods=252).std()*np.sqrt(252)
Example #18
Source File: accountcurve.py From PyTrendFollow with MIT License | 5 votes |
def sortino(self): return np.trim_zeros(self.returns().sum(axis=1)).mean()/np.std(self.losses())*np.sqrt(252)
Example #19
Source File: utility.py From PyTrendFollow with MIT License | 5 votes |
def sortino(x): if type(x) == pd.Series: x = x.to_frame() return np.trim_zeros(x.sum(axis=1)).mean()/np.std(losses(x))*np.sqrt(252)
Example #20
Source File: utility.py From PyTrendFollow with MIT License | 5 votes |
def losses(x): return [z for z in np.trim_zeros(x).sum(axis=1) if z<0]
Example #21
Source File: text.py From deep_image_model with Apache License 2.0 | 5 votes |
def reverse(self, x): """Reverses output of transform back to text. Args: x: iterator or matrix of integers. Document representation in bytes. Yields: Iterators of utf-8 strings. """ for data in x: document = np.trim_zeros(data.astype(np.int8), trim='b').tostring() try: yield document.decode('utf-8') except UnicodeDecodeError: yield ''
Example #22
Source File: gasrank.py From pyflux with BSD 3-Clause "New" or "Revised" License | 5 votes |
def plot_abilities_two_components(self, team_ids, component_id=0, **kwargs): import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(15,5)) if component_id == 0: name_strings = self.team_strings name_dict = self.team_dict else: name_strings = self.team_strings_2 name_dict = self.team_dict_2 if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: plt.figure(figsize=figsize) if type(team_ids) == type([]): if type(team_ids[0]) == str: for team_id in team_ids: plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[component_id].T[name_dict[team_id]], trim='b'), label=name_strings[name_dict[team_id]]) else: for team_id in team_ids: plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[component_id].T[team_id], trim='b'), label=name_strings[team_id]) else: if type(team_ids) == str: plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[component_id].T[name_dict[team_ids]], trim='b'), label=name_strings[name_dict[team_ids]]) else: plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values())[component_id].T[team_ids], trim='b'), label=name_strings[team_ids]) plt.legend() plt.ylabel("Power") plt.xlabel("Games") plt.show()
Example #23
Source File: minreal_test.py From python-control with BSD 3-Clause "New" or "Revised" License | 5 votes |
def assert_numden_almost_equal(self, n1, n2, d1, d2): n1[np.abs(n1) < 1e-10] = 0. n1 = np.trim_zeros(n1) d1[np.abs(d1) < 1e-10] = 0. d1 = np.trim_zeros(d1) n2[np.abs(n2) < 1e-10] = 0. n2 = np.trim_zeros(n2) d2[np.abs(d2) < 1e-10] = 0. d2 = np.trim_zeros(d2) np.testing.assert_array_almost_equal(n1, n2) np.testing.assert_array_almost_equal(d2, d2)
Example #24
Source File: gasrank.py From pyflux with BSD 3-Clause "New" or "Revised" License | 5 votes |
def plot_abilities_one_components(self, team_ids, **kwargs): import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(15,5)) if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: plt.figure(figsize=figsize) if type(team_ids) == type([]): if type(team_ids[0]) == str: for team_id in team_ids: plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[self.team_dict[team_id]], trim='b'), label=self.team_strings[self.team_dict[team_id]]) else: for team_id in team_ids: plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[team_id], trim='b'), label=self.team_strings[team_id]) else: if type(team_ids) == str: plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[self.team_dict[team_ids]], trim='b'), label=self.team_strings[self.team_dict[team_ids]]) else: plt.plot(np.trim_zeros(self._model_abilities(self.latent_variables.get_z_values()).T[team_ids], trim='b'), label=self.team_strings[team_ids]) plt.legend() plt.ylabel("Power") plt.xlabel("Games") plt.show()
Example #25
Source File: utils.py From abagen with BSD 3-Clause "New" or "Revised" License | 5 votes |
def get_centroids(image, labels=None, image_space=False): """ Finds centroids of ``labels`` in ``label_image`` Parameters ---------- label_image : niimg-like object 3D image containing integer label at each point labels : array_like, optional List of values containing labels of which to find centroids. Default: all possible labels image_space : bool, optional Whether to return xyz (image space) coordinates for centroids based on transformation in ``label_image.affine``. Default: False Returns ------- centroids : (N, 3) np.ndarray Coordinates of centroids for ROIs in input data """ image = check_img(image) data = np.asarray(image.dataobj) # if no labels of interest provided, get all possible labels if labels is None: labels = np.trim_zeros(np.unique(data)) # get centroids for all possible labels centroids = np.row_stack(center_of_mass(data, labels=data, index=labels)) # return xyz if desired; otherwise, ijk if image_space: centroids = ijk_to_xyz(centroids, image.affine) return centroids
Example #26
Source File: __funcs__.py From porespy with MIT License | 5 votes |
def _create_alias_map(im, alias=None): r""" Creates an alias mapping between phases in original image and identifyable names. This mapping is used during network extraction to label interconnection between and properties of each phase. Parameters ---------- im : ND-array Image of porous material where each phase is represented by unique integer. Phase integer should start from 1. Boolean image will extract only one network labeled with True's only. alias : dict (Optional) A dictionary that assigns unique image label to specific phase. For example {1: 'Solid'} will show all structural properties associated with label 1 as Solid phase properties. If ``None`` then default labelling will be used i.e {1: 'Phase1',..}. Returns ------- A dictionary with numerical phase labels as key, and readable phase names as valuies. If no alias is provided then default labelling is used i.e {1: 'Phase1',..} """ # ------------------------------------------------------------------------- # Get alias if provided by user phases_num = np.unique(im * 1) phases_num = np.trim_zeros(phases_num) al = {} for values in phases_num: al[values] = 'phase{}'.format(values) if alias is not None: alias_sort = dict(sorted(alias.items())) phase_labels = np.array([*alias_sort]) al = alias if set(phase_labels) != set(phases_num): raise Exception('Alias labels does not match with image labels ' 'please provide correct image labels') return al
Example #27
Source File: batch.py From segmenter with Apache License 2.0 | 5 votes |
def predict_seq2seq(sess, model, decoding, data, decode_len, dr=None, argmax=True, batch_size=100, ensemble=False, verbose=False): num_items = len(data) in_len = len(data[0][0]) input_v = model[:num_items*in_len + decode_len] input_v.append(decoding) if dr is not None: input_v.append(dr) predictions = model[num_items*in_len + decode_len:] output = [] samples = zip(*data) start_idx = 0 n_samples = len(samples) while start_idx < n_samples: if verbose: print '%d' % (start_idx * 100 / n_samples) + '%' next_batch_input = samples[start_idx:start_idx + batch_size] batch_size = len(next_batch_input) holders = [] next_batch_input = zip(*next_batch_input) for n_batch in next_batch_input: n_batch = np.asarray(n_batch).T for b in n_batch: holders.append(b) for i in range(decode_len): holders.append(np.zeros(batch_size, dtype='int32')) holders.append(True) if dr is not None: holders.append(0.0) if argmax: pre = sess.run(predictions, feed_dict={i: h for i, h in zip(input_v, holders)}) pre = [np.argmax(pre_t, axis=1) for pre_t in pre] pre = np.asarray(pre).T.tolist() pre = [np.trim_zeros(pre_t) for pre_t in pre] output += pre else: pre = sess.run(predictions, feed_dict={i: h for i, h in zip(input_v, holders)}) output += pre start_idx += batch_size return output
Example #28
Source File: toolbox.py From segmenter with Apache License 2.0 | 5 votes |
def decode_chars(idx, idx2chars): out = [] for line in idx: line = np.trim_zeros(line) out.append([idx2chars[item] for item in line]) return out
Example #29
Source File: toolbox.py From segmenter with Apache License 2.0 | 5 votes |
def unpad_zeros(l): out = [] for tags in l: out.append([np.trim_zeros(line) for line in tags]) return out
Example #30
Source File: transducer_model.py From segmenter with Apache License 2.0 | 5 votes |
def train(self, t_x, t_y, v_x, v_y, lrv, char2idx, sess, epochs, batch_size=10, reset=True): idx2char = {k: v for v, k in char2idx.items()} v_y_g = [np.trim_zeros(v_y_t) for v_y_t in v_y] gold_out = [toolbox.generate_trans_out(v_y_t, idx2char) for v_y_t in v_y_g] best_score = 0 if reset or not os.path.isfile(self.trained + '_weights.index'): for epoch in range(epochs): Batch.train_seq2seq(sess, model=self.en_vec + self.trans_labels, decoding=self.feed_previous, batch_size=batch_size, config=self.trans_train, lr=self.trans_l_rate, lrv=lrv, data=[t_x] + [t_y]) pred = Batch.predict_seq2seq(sess, model=self.en_vec + self.de_vec + self.trans_output, decoding=self.feed_previous, decode_len=self.decode_step, data=[v_x], argmax=True, batch_size=100) pred_out = [toolbox.generate_trans_out(pre_t, idx2char) for pre_t in pred] c_scores = evaluation.trans_evaluator(gold_out, pred_out) print 'epoch: %d' % (epoch + 1) print 'ACC: %f' % c_scores[0] print 'Token F score: %f' % c_scores[1] if c_scores[1] > best_score: best_score = c_scores[1] self.saver.save(sess, self.trained + '_weights', write_meta_graph=False) if best_score > 0 or not reset: self.saver.restore(sess, self.trained + '_weights')