Python numpy.random.permutation() Examples
The following are 30
code examples of numpy.random.permutation().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy.random
, or try the search function
.
Example #1
Source File: loader.py From PANet with MIT License | 6 votes |
def __iter__(self): if cfg.TRAIN.ASPECT_GROUPING: # indices for aspect grouping awared permutation n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH) round_num_data = n * cfg.TRAIN.IMS_PER_BATCH indices = np.arange(round_num_data) npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle if rem != 0: indices = np.append(indices, np.arange(round_num_data, round_num_data + rem)) ratio_index = self.ratio_index[indices] ratio_list_minibatch = self.ratio_list_minibatch[indices] else: rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
Example #2
Source File: test_token_swapper.py From qiskit-terra with Apache License 2.0 | 6 votes |
def test_large_partial_random(self) -> None: """Test a random (partial) mapping on a large randomly generated graph""" size = 100 # Note that graph may have "gaps" in the node counts, i.e. the numbering is noncontiguous. graph = nx.dense_gnm_random_graph(size, size ** 2 // 10) graph.remove_edges_from((i, i) for i in graph.nodes) # Remove self-loops. # Make sure the graph is connected by adding C_n nodes = list(graph.nodes) graph.add_edges_from((node, nodes[(i + 1) % len(nodes)]) for i, node in enumerate(nodes)) swapper = ApproximateTokenSwapper(graph) # type: ApproximateTokenSwapper[int] # Generate a randomized permutation. rand_perm = random.permutation(graph.nodes()) permutation = dict(zip(graph.nodes(), rand_perm)) mapping = dict(itertools.islice(permutation.items(), 0, size, 2)) # Drop every 2nd element. out = list(swapper.map(mapping, trials=40)) util.swap_permutation([out], mapping, allow_missing_keys=True) self.assertEqual({i: i for i in mapping.values()}, mapping)
Example #3
Source File: loader.py From Context-aware-ZSR with MIT License | 6 votes |
def _reset_iter(self): if cfg.TRAIN.ASPECT_GROUPING: # indices for aspect grouping awared permutation n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH) round_num_data = n * cfg.TRAIN.IMS_PER_BATCH indices = np.arange(round_num_data) npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle if rem != 0: indices = np.append(indices, np.arange(round_num_data, round_num_data + rem)) self._ratio_index = self.ratio_index[indices] self._ratio_list_minibatch = self.ratio_list_minibatch[indices] else: rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] self._ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list self._ratio_list_minibatch = cal_minibatch_ratio(ratio_list) self.iter_counter = 0 self._ratio_index = self._ratio_index.tolist() self._ratio_list_minibatch = self._ratio_list_minibatch.tolist()
Example #4
Source File: loader.py From Detectron.pytorch with MIT License | 6 votes |
def __iter__(self): if cfg.TRAIN.ASPECT_GROUPING: # indices for aspect grouping awared permutation n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH) round_num_data = n * cfg.TRAIN.IMS_PER_BATCH indices = np.arange(round_num_data) npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle if rem != 0: indices = np.append(indices, np.arange(round_num_data, round_num_data + rem)) ratio_index = self.ratio_index[indices] ratio_list_minibatch = self.ratio_list_minibatch[indices] else: rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
Example #5
Source File: utils.py From neon with Apache License 2.0 | 6 votes |
def sparse_rand(shape, frac=0.05, round_up=False): # generate an input with sparse activation # in the input dimension for LSTM testing # frac is the fraction of the matrix elements # which will be nonzero. Set round_up to # True to get a binary matrix, i.e. elements # are either set to 0 or 1 num_el = np.prod(shape) inds = nprnd.permutation(num_el)[0:int(frac * num_el)] # draw frac*num_el random numbers vals = nprnd.random(inds.size) if round_up: vals = np.ceil(vals) out = np.zeros(shape) out.flat[inds] = vals return (out, inds)
Example #6
Source File: train_level_classifier.py From sonic_contest with MIT License | 6 votes |
def read_and_normalize_and_shuffle_train_data(img_rows, img_cols, color_type, random_seed, subtract_mean=True): np.random.seed(random_seed) train_data, train_target, train_id = load_train(img_rows, img_cols, color_type) print('Convert to numpy...') train_data = np.array(train_data, dtype=np.uint8) train_target = np.array(train_target, dtype=np.uint8) train_target_vec = copy.deepcopy(train_target) train_target = np_utils.to_categorical(train_target, 27) # Transform Categorical Vector to One-Hot-Encoded Vector train_id = np.array(train_id) print('Convert to float...') train_data = train_data.astype('float32') perm = permutation(len(train_target)) train_data = train_data[perm] train_target = train_target[perm] train_target_vec = train_target_vec[perm] train_id = train_id[perm] print('Train shape:', train_data.shape) print(train_data.shape[0], 'train samples') return train_data, train_target, train_target_vec, train_id
Example #7
Source File: loader.py From FPN-Pytorch with MIT License | 6 votes |
def __iter__(self): if cfg.TRAIN.ASPECT_GROUPING: # indices for aspect grouping awared permutation n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH) round_num_data = n * cfg.TRAIN.IMS_PER_BATCH indices = np.arange(round_num_data) npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle if rem != 0: indices = np.append(indices, np.arange(round_num_data, round_num_data + rem)) ratio_index = self.ratio_index[indices] ratio_list_minibatch = self.ratio_list_minibatch[indices] else: rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
Example #8
Source File: loader.py From Detectron.pytorch with MIT License | 6 votes |
def __iter__(self): if cfg.TRAIN.ASPECT_GROUPING: # indices for aspect grouping awared permutation n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH) round_num_data = n * cfg.TRAIN.IMS_PER_BATCH indices = np.arange(round_num_data) npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle if rem != 0: indices = np.append(indices, np.arange(round_num_data, round_num_data + rem)) ratio_index = self.ratio_index[indices] ratio_list_minibatch = self.ratio_list_minibatch[indices] else: rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
Example #9
Source File: loader.py From PMFNet with MIT License | 6 votes |
def __iter__(self): if cfg.TRAIN.ASPECT_GROUPING: # indices for aspect grouping awared permutation n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH) round_num_data = n * cfg.TRAIN.IMS_PER_BATCH indices = np.arange(round_num_data) npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle if rem != 0: indices = np.append(indices, np.arange(round_num_data, round_num_data + rem)) ratio_index = self.ratio_index[indices] ratio_list_minibatch = self.ratio_list_minibatch[indices] else: rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
Example #10
Source File: loader.py From DIoU-pytorch-detectron with GNU General Public License v3.0 | 6 votes |
def __iter__(self): if cfg.TRAIN.ASPECT_GROUPING: # indices for aspect grouping awared permutation n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH) round_num_data = n * cfg.TRAIN.IMS_PER_BATCH indices = np.arange(round_num_data) npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle if rem != 0: indices = np.append(indices, np.arange(round_num_data, round_num_data + rem)) ratio_index = self.ratio_index[indices] ratio_list_minibatch = self.ratio_list_minibatch[indices] else: rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
Example #11
Source File: loader.py From detectron-self-train with MIT License | 6 votes |
def __iter__(self): if cfg.TRAIN.ASPECT_GROUPING: # indices for aspect grouping awared permutation n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH) round_num_data = n * cfg.TRAIN.IMS_PER_BATCH indices = np.arange(round_num_data) npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle if rem != 0: indices = np.append(indices, np.arange(round_num_data, round_num_data + rem)) ratio_index = self.ratio_index[indices] ratio_list_minibatch = self.ratio_list_minibatch[indices] else: rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
Example #12
Source File: loader_rel.py From Large-Scale-VRD.pytorch with MIT License | 6 votes |
def __iter__(self): if cfg.TRAIN.ASPECT_GROUPING: # indices for aspect grouping awared permutation n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH) round_num_data = n * cfg.TRAIN.IMS_PER_BATCH indices = np.arange(round_num_data) npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle if rem != 0: indices = np.append(indices, np.arange(round_num_data, round_num_data + rem)) ratio_index = self.ratio_index[indices] ratio_list_minibatch = self.ratio_list_minibatch[indices] else: rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
Example #13
Source File: loader.py From Large-Scale-VRD.pytorch with MIT License | 6 votes |
def __iter__(self): if cfg.TRAIN.ASPECT_GROUPING: # indices for aspect grouping awared permutation n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH) round_num_data = n * cfg.TRAIN.IMS_PER_BATCH indices = np.arange(round_num_data) npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle if rem != 0: indices = np.append(indices, np.arange(round_num_data, round_num_data + rem)) ratio_index = self.ratio_index[indices] ratio_list_minibatch = self.ratio_list_minibatch[indices] else: rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
Example #14
Source File: data.py From multipy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def neuhaus(permute=False): """Function that returns the Neuhaus et al. data that was re-analyzed in the classic Benjamini & Hochberg (1995) FDR paper. Input arguments: permute - If true, the p-values are returned in random order. If false, the p-values are returned in ascending order. """ pvals = np.array([0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344, 0.0459, 0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000], dtype='float') if (permute): m = len(pvals) pvals = pvals[permutation(m)] return pvals
Example #15
Source File: resampling.py From particles with MIT License | 5 votes |
def enqueue(self): perm = random.permutation(self.M) self.A = multinomial(self.W, M=self.M)[perm]
Example #16
Source File: data.py From YOLO_Object_Detection with GNU General Public License v3.0 | 5 votes |
def shuffle(self): batch = self.FLAGS.batch data = self.parse() size = len(data) print('Dataset of {} instance(s)'.format(size)) if batch > size: self.FLAGS.batch = batch = size batch_per_epoch = int(size / batch) for i in range(self.FLAGS.epoch): shuffle_idx = perm(np.arange(size)) for b in range(batch_per_epoch): # yield these x_batch = list() feed_batch = dict() for j in range(b*batch, b*batch+batch): train_instance = data[shuffle_idx[j]] inp, new_feed = self._batch(train_instance) if inp is None: continue x_batch += [np.expand_dims(inp, 0)] for key in new_feed: new = new_feed[key] old_feed = feed_batch.get(key, np.zeros((0,) + new.shape)) feed_batch[key] = np.concatenate([ old_feed, [new] ]) x_batch = np.concatenate(x_batch, 0) yield x_batch, feed_batch print('Finish {} epoch(es)'.format(i + 1))
Example #17
Source File: id_train.py From mlens with MIT License | 5 votes |
def fit(self, X): """Sample a training set. Parameters ---------- X: array-like training set to sample observations from. Returns ---------- self: obj fitted instance with stored sample. """ self.train_shape = X.shape sample_idx = {} for i in range(2): dim_size = min(X.shape[i], self.size) sample_idx[i] = permutation(X.shape[i])[:dim_size] sample = X[ix_(sample_idx[0], sample_idx[1])] self.sample_idx_ = sample_idx self.sample_ = sample return self
Example #18
Source File: sgcrf.py From sgcrfpy with MIT License | 5 votes |
def lambda_newton_direction(self, active, fixed, vary, max_iter=1): # TODO we should be able to do a warm start... delta = np.zeros_like(vary.Sigma) U = np.zeros_like(vary.Sigma) for _ in range(max_iter): for i, j in rng.permutation(np.array(active).T): if i > j: # seems ok since we look for upper triangular indices in active set continue if i==j: a = vary.Sigma[i,i] ** 2 + 2 * vary.Sigma[i,i] * vary.Psi[i,i] else: a = (vary.Sigma[i, j] ** 2 + vary.Sigma[i, i] * vary.Sigma[j, j] + vary.Sigma[i, i] * vary.Psi[j, j] + 2 * vary.Sigma[i, j] * vary.Psi[i, j] + vary.Sigma[j, j] * vary.Psi[i, i]) b = (fixed.Syy[i, j] - vary.Sigma[i, j] - vary.Psi[i, j] + np.dot(vary.Sigma[i,:], U[:,j]) + np.dot(vary.Psi[i,:], U[:,j]) + np.dot(vary.Psi[j,:], U[:,i])) if i==j: u = -b/a delta[i, i] += u U[i, :] += u * vary.Sigma[i, :] else: c = self.Lam[i, j] + delta[i, j] u = soft_thresh(self.lamL / a, c - b/a) - c delta[j, i] += u delta[i, j] += u U[j, :] += u * vary.Sigma[i, :] U[i, :] += u * vary.Sigma[j, :] return delta
Example #19
Source File: loader.py From pcl.pytorch with MIT License | 5 votes |
def __iter__(self): rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
Example #20
Source File: data.py From VideoRecognition-realtime-autotrainer-alerts with GNU General Public License v3.0 | 5 votes |
def shuffle(self): batch = self.FLAGS.batch data = self.parse() size = len(data) print('Dataset of {} instance(s)'.format(size)) if batch > size: self.FLAGS.batch = batch = size batch_per_epoch = int(size / batch) for i in range(self.FLAGS.epoch): shuffle_idx = perm(np.arange(size)) for b in range(batch_per_epoch): # yield these x_batch = list() feed_batch = dict() for j in range(b*batch, b*batch+batch): train_instance = data[shuffle_idx[j]] try: inp, new_feed = self._batch(train_instance) except ZeroDivisionError: print("This image's width or height are zeros: ", train_instance[0]) print('train_instance:', train_instance) print('Please remove or fix it then try again.') raise if inp is None: continue x_batch += [np.expand_dims(inp, 0)] for key in new_feed: new = new_feed[key] old_feed = feed_batch.get(key, np.zeros((0,) + new.shape)) feed_batch[key] = np.concatenate([ old_feed, [new] ]) x_batch = np.concatenate(x_batch, 0) yield x_batch, feed_batch print('Finish {} epoch(es)'.format(i + 1))
Example #21
Source File: bigsql.py From pymdptoolbox with BSD 3-Clause "New" or "Revised" License | 5 votes |
def setup(S, A): P_sparse = [None] * A R_sparse = 2*random(S) - 1 DB_sql = "MDP-big.db" % (S, A) if os.path.exists(DB_sql): os.remove(DB_sql) with sqlite3.connect(DB_sql) as conn: c = conn.cursor() cmd = ''' CREATE TABLE info (name TEXT, value INTEGER); INSERT INTO info VALUES('states', %s); INSERT INTO info VALUES('actions', %s);''' % (S, A) c.executescript(cmd) for a in range(1, A+1): a_sparse = a - 1 PP_sparse = dok_matrix((S, S)) cmd = ''' CREATE TABLE transition%s (row INTEGER, col INTEGER, prob REAL); CREATE TABLE reward%s (state INTEGER PRIMARY KEY ASC, val REAL); ''' % (a, a) c.executescript(cmd) cmd = "INSERT INTO reward%s(val) VALUES(?)" % a c.executemany(cmd, zip(R_sparse.tolist())) for s in xrange(1, S+1): s_sparse = s - 1 n = randint(1, 10) col = (permutation(arange(1,S+1))[0:n]).tolist() val = random(n) val = (val / val.sum()).tolist() PP_sparse[s_sparse, col - 1] = val cmd = "INSERT INTO transition%s VALUES(?, ?, ?)" % a c.executemany(cmd, zip([s] * n, col, val)) cmd = "CREATE UNIQUE INDEX Pidx%s ON transition%s (row, col);" % (a, a) c.execute(cmd) P_sparse[a_sparse] = PP_sparse.tocsr() return P_sparse, R_sparse, DB_sql
Example #22
Source File: mdpsql.py From pymdptoolbox with BSD 3-Clause "New" or "Revised" License | 5 votes |
def exampleRand(S, A): """WARNING: This will delete a database with the same name as 'db'.""" db = "MDP-%sx%s.db" % (S, A) if os.path.exists(db): os.remove(db) conn = sqlite3.connect(db) with conn: c = conn.cursor() cmd = ''' CREATE TABLE info (name TEXT, value INTEGER); INSERT INTO info VALUES('states', %s); INSERT INTO info VALUES('actions', %s);''' % (S, A) c.executescript(cmd) for a in range(1, A+1): cmd = ''' CREATE TABLE transition%s (row INTEGER, col INTEGER, prob REAL); CREATE TABLE reward%s (state INTEGER PRIMARY KEY ASC, val REAL); ''' % (a, a) c.executescript(cmd) cmd = "INSERT INTO reward%s(val) VALUES(?)" % a c.executemany(cmd, zip(random(S).tolist())) for s in xrange(1, S+1): # to be usefully represented as a sparse matrix, the number of # nonzero entries should be less than 1/3 of dimesion of the # matrix, so S/3 n = randint(1, S//3) # timeit [90894] * 20330 # ==> 10000 loops, best of 3: 141 us per loop # timeit (90894*np.ones(20330, dtype=int)).tolist() # ==> 1000 loops, best of 3: 548 us per loop col = (permutation(arange(1,S+1))[0:n]).tolist() val = random(n) val = (val / val.sum()).tolist() cmd = "INSERT INTO transition%s VALUES(?, ?, ?)" % a c.executemany(cmd, zip([s] * n, col, val)) cmd = "CREATE UNIQUE INDEX Pidx%s ON transition%s (row, col);" % (a, a) c.execute(cmd) # return the name of teh database return db
Example #23
Source File: test_Indexing.py From Kayak with MIT License | 5 votes |
def test_indexing_values(): npr.seed(1) for ii in xrange(NUM_TRIALS): np_X = npr.randn(6,10) inds = npr.permutation(10)[:5] X = kayak.Parameter(np_X) Y = kayak.Take(X, inds,axis=1) assert(np.array_equal(Y.value, np.take(np_X, inds,axis=1)))
Example #24
Source File: test_Indexing.py From Kayak with MIT License | 5 votes |
def test_indexing_grad(): npr.seed(2) for ii in xrange(NUM_TRIALS): np_X = npr.randn(6,20) inds = npr.permutation(20)[:5] X = kayak.Parameter(np_X) Y = kayak.Take(X, inds,axis=1) Z = kayak.MatSum(Y) Z.value assert_less(kayak.util.checkgrad(X, Z), MAX_GRAD_DIFF)
Example #25
Source File: test_Indexing.py From Kayak with MIT License | 5 votes |
def test_indexing_grad_2(): npr.seed(3) for ii in xrange(NUM_TRIALS): np_X = npr.randn(6, 2, 7, 3) inds = npr.permutation(7)[:5] X = kayak.Parameter(np_X) Y = kayak.Take(X, inds,axis=2) Z = kayak.MatSum(Y) Z.value assert_less(kayak.util.checkgrad(X, Z), MAX_GRAD_DIFF)
Example #26
Source File: crossval.py From Kayak with MIT License | 5 votes |
def __init__(self, num_folds, inputs, targets=None, permute=True): if permute: # Make a copy of the data, with a random permutation. self.ordering = npr.permutation(inputs.shape[0]) self.inputs = inputs[self.ordering,...].copy() if targets is not None: self.targets = targets[self.ordering,...].copy() else: self.targets = None else: self.ordering = np.arange(inputs.shape[0], dtype=int) self.inputs = inputs self.targets = targets self.fold_idx = 0 self.num_folds = num_folds self.edges = np.linspace(0, self.inputs.shape[0], self.num_folds+1).astype(int) self.indices = [] for ii in xrange(self.num_folds): self.indices.append( np.arange(self.edges[ii], self.edges[ii+1], dtype=int) ) self.folds = [] for ii in xrange(self.num_folds): self.folds.append(Fold(self, np.array(list(itertools.chain.from_iterable([self.indices[jj] for jj in range(0,ii)+range(ii+1,self.num_folds)])), dtype=int), np.array(self.indices[ii], dtype=int)))
Example #27
Source File: keras_trainer.py From lyapy with BSD 3-Clause "New" or "Revised" License | 5 votes |
def shuffle(self, data): """Shuffle data. Let N be the number of data points Outputs a numpy array (N, ...) Inputs: Data array, data: numpy array (N, ...) """ N = len(data[0]) perm = permutation(N) return tuple(_data[perm] for _data in data)
Example #28
Source File: ndtest.py From ndtest with MIT License | 5 votes |
def estat(x, y, nboot=1000, replace=False, method='log', fitting=False): ''' Energy distance statistics test. Reference --------- Aslan, B, Zech, G (2005) Statistical energy as a tool for binning-free multivariate goodness-of-fit tests, two-sample comparison and unfolding. Nuc Instr and Meth in Phys Res A 537: 626-636 Szekely, G, Rizzo, M (2014) Energy statistics: A class of statistics based on distances. J Stat Planning & Infer 143: 1249-1272 Brian Lau, multdist, https://github.com/brian-lau/multdist ''' n, N = len(x), len(x) + len(y) stack = np.vstack([x, y]) stack = (stack - stack.mean(0)) / stack.std(0) if replace: rand = lambda x: random.randint(x, size=x) else: rand = random.permutation en = energy(stack[:n], stack[n:], method) en_boot = np.zeros(nboot, 'f') for i in range(nboot): idx = rand(N) en_boot[i] = energy(stack[idx[:n]], stack[idx[n:]], method) if fitting: param = genextreme.fit(en_boot) p = genextreme.sf(en, *param) return p, en, param else: p = (en_boot >= en).sum() / nboot return p, en, en_boot
Example #29
Source File: dataloader.py From ImageCaptioning.pytorch with MIT License | 5 votes |
def _reset_iter(self): if self.shuffle: rand_perm = npr.permutation(len(self.index_list)) self._index_list = [self.index_list[_] for _ in rand_perm] else: self._index_list = self.index_list self.iter_counter = 0
Example #30
Source File: dataloader.py From self-critical.pytorch with MIT License | 5 votes |
def _reset_iter(self): if self.shuffle: rand_perm = npr.permutation(len(self.index_list)) self._index_list = [self.index_list[_] for _ in rand_perm] else: self._index_list = self.index_list self.iter_counter = 0