Python numpy.concatenate() Examples
The following are 30
code examples of numpy.concatenate().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: utils.py From sklearn-audio-transfer-learning with ISC License | 6 votes |
def wavefile_to_waveform(wav_file, features_type): data, sr = sf.read(wav_file) if features_type == 'vggish': tmp_name = str(int(np.random.rand(1)*1000000)) + '.wav' sf.write(tmp_name, data, sr, subtype='PCM_16') sr, wav_data = wavfile.read(tmp_name) os.remove(tmp_name) # sr, wav_data = wavfile.read(wav_file) # as done in VGGish Audioset assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype data = wav_data / 32768.0 # Convert to [-1.0, +1.0] # at least one second of samples, if not repead-pad src_repeat = data while (src_repeat.shape[0] < sr): src_repeat = np.concatenate((src_repeat, data), axis=0) data = src_repeat[:sr] return data, sr
Example #2
Source File: gla_gpu.py From Deep_VoiceChanger with MIT License | 6 votes |
def auto_inverse(self, whole_spectrum): whole_spectrum = np.copy(whole_spectrum).astype(complex) whole_spectrum[whole_spectrum < 1] = 1 overwrap = self.buffer_size * 2 height = whole_spectrum.shape[0] parallel_dif = (height-overwrap) // self.parallel if height < self.parallel*overwrap: raise Exception('voice length is too small to use gpu, or parallel number is too big') spec = [self.inverse(whole_spectrum[range(i, i+parallel_dif*self.parallel, parallel_dif), :]) for i in tqdm.tqdm(range(parallel_dif+overwrap))] spec = spec[overwrap:] spec = np.concatenate(spec, axis=1) spec = spec.reshape(-1, self.wave_len) #Below code don't consider wave_len and wave_dif, I'll fix. wave = np.fft.ifft(spec, axis=1).real pad = np.zeros((wave.shape[0], 2), dtype=float) wave = np.concatenate([wave, pad], axis=1) dst = np.zeros((wave.shape[0]+3)*self.wave_dif, dtype=float) for i in range(4): w = wave[range(i, wave.shape[0], 4),:] w = w.reshape(-1) dst[i*self.wave_dif:i*self.wave_dif+len(w)] += w return dst*0.5
Example #3
Source File: convert_story.py From gated-graph-transformer-network with MIT License | 6 votes |
def convert(story): # import pdb; pdb.set_trace() sentence_arr, graphs, query_arr, answer_arr = story node_id_w = graphs[2].shape[2] edge_type_w = graphs[3].shape[3] all_node_strengths = [np.zeros([1])] all_node_ids = [np.zeros([1,node_id_w])] for num_new_nodes, new_node_strengths, new_node_ids, _ in zip(*graphs): last_strengths = all_node_strengths[-1] last_ids = all_node_ids[-1] cur_strengths = np.concatenate([last_strengths, new_node_strengths], 0) cur_ids = np.concatenate([last_ids, new_node_ids], 0) all_node_strengths.append(cur_strengths) all_node_ids.append(cur_ids) all_edges = graphs[3] full_n_nodes = all_edges.shape[1] all_node_strengths = np.stack([np.pad(x, ((0, full_n_nodes-x.shape[0])), 'constant') for x in all_node_strengths[1:]]) all_node_ids = np.stack([np.pad(x, ((0, full_n_nodes-x.shape[0]), (0, 0)), 'constant') for x in all_node_ids[1:]]) all_node_states = np.zeros([len(all_node_strengths), full_n_nodes,0]) return tuple(x[np.newaxis,...] for x in (all_node_strengths, all_node_ids, all_node_states, all_edges))
Example #4
Source File: model.py From models with MIT License | 6 votes |
def predict_on_batch(self, inputs): if inputs.shape == (2,): inputs = inputs[np.newaxis, :] # Encode max_len = len(max(inputs, key=len)) one_hot_ref = self.encode(inputs[:,0]) one_hot_alt = self.encode(inputs[:,1]) # Construct dummy library indicator indicator = np.zeros((inputs.shape[0],2)) indicator[:,1] = 1 # Compute fold change for all three frames fc_changes = [] for shift in range(3): if shift > 0: shifter = np.zeros((one_hot_ref.shape[0],1,4)) one_hot_ref = np.concatenate([one_hot_ref, shifter], axis=1) one_hot_alt = np.concatenate([one_hot_alt, shifter], axis=1) pred_ref = self.model.predict_on_batch([one_hot_ref, indicator]).reshape(-1) pred_variant = self.model.predict_on_batch([one_hot_alt, indicator]).reshape(-1) fc_changes.append(np.log2(pred_variant/pred_ref)) # Return return {"mrl_fold_change":fc_changes[0], "shift_1":fc_changes[1], "shift_2":fc_changes[2]}
Example #5
Source File: model_architecture.py From models with MIT License | 6 votes |
def forward(self, input): # array has shape (N, 4, 1, 1000) # return the sequence + its RC concatenated # create inverted indices invert_dims = [1,3] input_bkup = input for idim in invert_dims: idxs = [i for i in range(input.size(idim)-1, -1, -1)] idxs_var = Variable(torch.LongTensor(idxs)) if input.is_cuda: idxs_var =idxs_var.cuda() input = input.index_select(idim, idxs_var) # input = torch.cat([input_bkup, input], dim=0) # # Using numpy: #input = edit_tensor_in_numpy(input, lambda x: np.concatenate([x, x[:,::-1, : ,::-1]],axis=0)) return input
Example #6
Source File: group_sampler.py From mmdetection with Apache License 2.0 | 6 votes |
def __iter__(self): indices = [] for i, size in enumerate(self.group_sizes): if size == 0: continue indice = np.where(self.flag == i)[0] assert len(indice) == size np.random.shuffle(indice) num_extra = int(np.ceil(size / self.samples_per_gpu) ) * self.samples_per_gpu - len(indice) indice = np.concatenate( [indice, np.random.choice(indice, num_extra)]) indices.append(indice) indices = np.concatenate(indices) indices = [ indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu] for i in np.random.permutation( range(len(indices) // self.samples_per_gpu)) ] indices = np.concatenate(indices) indices = indices.astype(np.int64).tolist() assert len(indices) == self.num_samples return iter(indices)
Example #7
Source File: dataloader_m.py From models with MIT License | 6 votes |
def _prepro_cpg(self, states, dists): """Preprocess the state and distance of neighboring CpG sites.""" prepro_states = [] prepro_dists = [] for state, dist in zip(states, dists): nan = state == dat.CPG_NAN if np.any(nan): state[nan] = np.random.binomial(1, state[~nan].mean(), nan.sum()) dist[nan] = self.cpg_max_dist dist = np.minimum(dist, self.cpg_max_dist) / self.cpg_max_dist prepro_states.append(np.expand_dims(state, 1)) prepro_dists.append(np.expand_dims(dist, 1)) prepro_states = np.concatenate(prepro_states, axis=1) prepro_dists = np.concatenate(prepro_dists, axis=1) if self.cpg_wlen: center = prepro_states.shape[2] // 2 delta = self.cpg_wlen // 2 tmp = slice(center - delta, center + delta) prepro_states = prepro_states[:, :, tmp] prepro_dists = prepro_dists[:, :, tmp] return (prepro_states, prepro_dists)
Example #8
Source File: encoding_images.py From face-attendance-machine with Apache License 2.0 | 6 votes |
def load_encodings(): """ 加载保存的历史人脸向量,以及name向量,并返回 :return: """ known_face_encodings = np.load(KNOWN_FACE_ENCODINGS) known_face_names = np.load(KNOWN_FACE_NANE) if not os.path.exists(KNOWN_FACE_NANE) or not os.path.exists(KNOWN_FACE_ENCODINGS): encoding_images(data_path) aa = [file for file in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, file)) and file.endswith("npy")] # ("known_face_encodings_") or file.startswith("known_face_name_")) for data in aa: if data.startswith('known_face_encodings_'): tmp_face_encodings = np.load(os.path.join(data_path,data)) known_face_encodings = np.concatenate((known_face_encodings, tmp_face_encodings), axis=0) print("load ", data) elif data.startswith('known_face_name_'): tmp_face_name = np.load(os.path.join(data_path, data)) known_face_names = np.concatenate((known_face_names, tmp_face_name), axis=0) print("load ", data) else: print('skip to load original ', data) return known_face_encodings,known_face_names
Example #9
Source File: util.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def block_split(X, Y): """ Split the data into 80% for training and 20% for testing in a block size of 100. :param X: :param Y: :return: """ print("Isolated split 80%, 20% for training and testing") num_samples = X.shape[0] partition = int(num_samples/3) X_adv, Y_adv = X[:partition], Y[:partition] X_norm, Y_norm = X[partition:2*partition], Y[partition:2*partition] X_noisy, Y_noisy = X[2*partition:], Y[2*partition:] num_train = int(partition * 0.008) * 100 X_train = np.concatenate((X_adv[:num_train], X_norm[:num_train], X_noisy[:num_train])) Y_train = np.concatenate((Y_adv[:num_train], Y_norm[:num_train], Y_noisy[:num_train])) X_test = np.concatenate((X_adv[num_train:], X_norm[num_train:], X_noisy[num_train:])) Y_test = np.concatenate((Y_adv[num_train:], Y_norm[num_train:], Y_noisy[num_train:])) return X_train, Y_train, X_test, Y_test
Example #10
Source File: util.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False): """ TODO :param probs_neg: :param probs_pos: :param plot: :return: """ probs = np.concatenate((probs_neg, probs_pos)) labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos))) fpr, tpr, _ = roc_curve(labels, probs) auc_score = auc(fpr, tpr) if plot: plt.figure(figsize=(7, 6)) plt.plot(fpr, tpr, color='blue', label='ROC (AUC = %0.4f)' % auc_score) plt.legend(loc='lower right') plt.title("ROC Curve") plt.xlabel("FPR") plt.ylabel("TPR") plt.show() return fpr, tpr, auc_score
Example #11
Source File: util.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg): """ TODO :param densities_pos: :param densities_neg: :param uncerts_pos: :param uncerts_neg: :return: """ values_neg = np.concatenate( (densities_neg.reshape((1, -1)), uncerts_neg.reshape((1, -1))), axis=0).transpose([1, 0]) values_pos = np.concatenate( (densities_pos.reshape((1, -1)), uncerts_pos.reshape((1, -1))), axis=0).transpose([1, 0]) values = np.concatenate((values_neg, values_pos)) labels = np.concatenate( (np.zeros_like(densities_neg), np.ones_like(densities_pos))) lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels) return values, labels, lr
Example #12
Source File: custom_datasets.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, transform=None, target_transform=None, filename="adv_set_e_2.p", transp = False): """ :param transform: :param target_transform: :param filename: :param transp: Set shuff= False for PGD based attacks :return: """ self.transform = transform self.target_transform = target_transform self.adv_dict={} self.adv_dict["adv_input"]=None self.adv_dict["adv_labels"]= None for i in range(16): if("Test" in filename): print('OK') new_adv_dict=pickle.load(open(filename.split(".")[0]+str(i)+"."+filename.split(".")[1],"rb")) else: new_adv_dict=pickle.load(open(filename.split(".")[0]+"_"+str(i)+"."+filename.split(".")[1],"rb")) if(self.adv_dict["adv_input"] is None): self.adv_dict["adv_input"] = (new_adv_dict["adv_input"]) self.adv_dict["adv_labels"] = (new_adv_dict["adv_labels"]) else: self.adv_dict["adv_input"] = np.concatenate((new_adv_dict["adv_input"],self.adv_dict["adv_input"])) self.adv_dict["adv_labels"] = np.concatenate((new_adv_dict["adv_labels"],self.adv_dict["adv_labels"])) self.adv_flat=self.adv_dict["adv_input"] self.num_adv=np.shape(self.adv_flat)[0] self.shuff = transp self.sample_num = 0
Example #13
Source File: NLP.py From Financial-NLP with Apache License 2.0 | 5 votes |
def safe_nlp_vector(self, words): """ Parameters ---------- words : list of str/str wordbag Returns ---------- ndarray(float) the corresponding vectors of words in wordbag. a vector contains the similarities calculated by word2vec and wordnet. """ if isinstance(words, string_types): synonym=self.synonym_label(words) similarity=self.similarity_label(words) else: synonym=np.empty((len(self.Label_index),len(words))) similarity=np.empty((len(self.Label_index),len(words))) for i in range(len(words)): try: synonym[:,i]=self.synonym_label(words[i]) except: synonym[:,i]=np.zeros((len(self.Label_index),1))[:,0] try: similarity[:,i]=self.similarity_label(words[i])[:,0] except: similarity[:,i]=np.zeros((len(self.Label_index),1))[:,0] vector=np.concatenate((similarity, synonym)) return vector
Example #14
Source File: metric.py From subword-qac with MIT License | 5 votes |
def mrl_summary(recover_lengths, seens, n_candidates): recover_lengths = np.array(recover_lengths) seens = np.array(seens) mrl = np.concatenate((recover_lengths[seens == 1].mean(0).reshape((1, -1)), recover_lengths[seens == 0].mean(0).reshape((1, -1)), recover_lengths.mean(0).reshape((1, -1))), 0) logs = [] for i in range(1, n_candidates + 1): i_str = ' '.join(f"{mrl[s, i]:.4f} ({seen_str})" for s, seen_str in enumerate(['seen', 'unseen', 'all'])) logs.append(f"mrl @{i:-2d}: {i_str}") logs.append(" ") return logs
Example #15
Source File: test_masks.py From mmdetection with Apache License 2.0 | 5 votes |
def dummy_bboxes(num, max_height, max_width): x1y1 = np.random.randint(0, min(max_height // 2, max_width // 2), (num, 2)) wh = np.random.randint(0, min(max_height // 2, max_width // 2), (num, 2)) x2y2 = x1y1 + wh return np.concatenate([x1y1, x2y2], axis=1).squeeze().astype(np.float32)
Example #16
Source File: iou_balanced_neg_sampler.py From mmdetection with Apache License 2.0 | 5 votes |
def sample_via_interval(self, max_overlaps, full_set, num_expected): """Sample according to the iou interval. Args: max_overlaps (torch.Tensor): IoU between bounding boxes and ground truth boxes. full_set (set(int)): A full set of indices of boxes。 num_expected (int): Number of expected samples。 Returns: np.ndarray: Indices of samples """ max_iou = max_overlaps.max() iou_interval = (max_iou - self.floor_thr) / self.num_bins per_num_expected = int(num_expected / self.num_bins) sampled_inds = [] for i in range(self.num_bins): start_iou = self.floor_thr + i * iou_interval end_iou = self.floor_thr + (i + 1) * iou_interval tmp_set = set( np.where( np.logical_and(max_overlaps >= start_iou, max_overlaps < end_iou))[0]) tmp_inds = list(tmp_set & full_set) if len(tmp_inds) > per_num_expected: tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected) else: tmp_sampled_set = np.array(tmp_inds, dtype=np.int) sampled_inds.append(tmp_sampled_set) sampled_inds = np.concatenate(sampled_inds) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array(list(full_set - set(sampled_inds))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate([sampled_inds, extra_inds]) return sampled_inds
Example #17
Source File: dataset_wrappers.py From mmdetection with Apache License 2.0 | 5 votes |
def __init__(self, datasets): super(ConcatDataset, self).__init__(datasets) self.CLASSES = datasets[0].CLASSES if hasattr(datasets[0], 'flag'): flags = [] for i in range(0, len(datasets)): flags.append(datasets[i].flag) self.flag = np.concatenate(flags)
Example #18
Source File: test.py From DDPAE-video-prediction with MIT License | 5 votes |
def save_images(prediction, gt, latent, save_dir, step): pose, components = latent['pose'].data.cpu(), latent['components'].data.cpu() batch_size, n_frames_total = prediction.shape[:2] n_components = components.shape[2] for i in range(batch_size): filename = '{:05d}.png'.format(step) y = gt[i, ...] rows = [y] if n_components > 1: for j in range(n_components): p = pose[i, :, j, :] comp = components[i, :, j, ...] if pose.size(-1) == 3: comp = utils.draw_components(comp, p) rows.append(utils.to_numpy(comp)) x = prediction[i, ...] rows.append(x) # Make a grid of 4 x n_frames_total images image = np.concatenate(rows, axis=2).squeeze(1) image = np.concatenate([image[i] for i in range(n_frames_total)], axis=1) image = (image * 255).astype(np.uint8) # Save image Image.fromarray(image).save(os.path.join(save_dir, filename)) step += 1 return step
Example #19
Source File: metrics.py From DDPAE-video-prediction with MIT License | 5 votes |
def get_scores(self): # Save positions if self.save_path != '': positions = np.array([np.concatenate(self.pred_positions, axis=0), np.concatenate(self.gt_positions, axis=0)]) np.save(os.path.join(self.save_path), positions) masks = np.concatenate(self.masks, axis=0) cosine = np.concatenate(self.cosine_similarities, axis=0) rel_error = np.concatenate(self.relative_errors, axis=0) numel = np.sum(masks == 1, axis=(0,2)) rel_error = np.sum(rel_error * masks, axis=(0,2)) / numel cosine = np.sum(cosine * masks, axis=(0,2)) / numel return {'relative_errors': rel_error, 'cosine_similarities': cosine}
Example #20
Source File: audio_transfer_learning.py From sklearn-audio-transfer-learning with ISC License | 5 votes |
def extract_vggish_features(paths, path2gt, model): """Extracts VGGish features and their corresponding ground_truth and identifiers (the path). VGGish features are extracted from non-overlapping audio patches of 0.96 seconds, where each audio patch covers 64 mel bands and 96 frames of 10 ms each. We repeat ground_truth and identifiers to fit the number of extracted VGGish features. """ # 1) Extract log-mel spectrograms first_audio = True for p in paths: if first_audio: input_data = vggish_input.wavfile_to_examples(config['audio_folder'] + p) ground_truth = np.repeat(path2gt[p], input_data.shape[0], axis=0) identifiers = np.repeat(p, input_data.shape[0], axis=0) first_audio = False else: tmp_in = vggish_input.wavfile_to_examples(config['audio_folder'] + p) input_data = np.concatenate((input_data, tmp_in), axis=0) tmp_gt = np.repeat(path2gt[p], tmp_in.shape[0], axis=0) ground_truth = np.concatenate((ground_truth, tmp_gt), axis=0) tmp_id = np.repeat(p, tmp_in.shape[0], axis=0) identifiers = np.concatenate((identifiers, tmp_id), axis=0) # 2) Load Tensorflow model to extract VGGish features with tf.Graph().as_default(), tf.Session() as sess: vggish_slim.define_vggish_slim(training=False) vggish_slim.load_vggish_slim_checkpoint(sess, 'vggish_model.ckpt') features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME) embedding_tensor = sess.graph.get_tensor_by_name(vggish_params.OUTPUT_TENSOR_NAME) extracted_feat = sess.run([embedding_tensor], feed_dict={features_tensor: input_data}) feature = np.squeeze(np.asarray(extracted_feat)) return [feature, ground_truth, identifiers]
Example #21
Source File: gen_noisy.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def evaluate_checkpoint(sess,model): dataset = 'cifar' #with tf.Session() as sess: # Iterate over the samples batch-by-batch num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) adv_x_samples=[] adv_y_samples=[] for ibatch in range(num_batches): bstart = ibatch * eval_batch_size bend = min(bstart + eval_batch_size, num_eval_examples) x_batch = mnist.test.images[bstart:bend,:] y_batch = mnist.test.labels[bstart:bend] x_batch_adv = attack.perturb(x_batch, y_batch, sess) if(ibatch == 0): adv_x_samples = x_batch_adv adv_y_samples = y_batch else: adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0) adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0) if(args.attack == 'xent'): atck = 'pgd' f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w") elif(args.attack == 'cw_pgd'): atck = 'cw_pgd' f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w") else: f = open(os.path.join(args.log_dir, "custom.p"), "w") pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f) f.close()
Example #22
Source File: gen_whitebox_adv.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def evaluate_checkpoint(sess,model): dataset = 'cifar' #with tf.Session() as sess: # Iterate over the samples batch-by-batch num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) adv_x_samples=[] adv_y_samples=[] for ibatch in range(num_batches): bstart = ibatch * eval_batch_size bend = min(bstart + eval_batch_size, num_eval_examples) x_batch = mnist.test.images[bstart:bend,:] y_batch = mnist.test.labels[bstart:bend] x_batch_adv = attack.perturb(x_batch, y_batch, sess) if(ibatch == 0): adv_x_samples = x_batch_adv adv_y_samples = y_batch else: adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0) adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0) if(args.attack == 'xent'): atck = 'pgd' f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w") elif(args.attack == 'cw_pgd'): atck = 'cw_pgd' f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w") else: f = open(os.path.join(args.log_dir, "custom.p"), "w") pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f) f.close()
Example #23
Source File: gen_whitebox_adv.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def evaluate_checkpoint(sess,model): dataset = 'mnist' #with tf.Session() as sess: # Iterate over the samples batch-by-batch num_batches = int(math.ceil(num_eval_examples / eval_batch_size)) adv_x_samples=[] adv_y_samples=[] for ibatch in range(num_batches): bstart = ibatch * eval_batch_size bend = min(bstart + eval_batch_size, num_eval_examples) x_batch = mnist.test.images[bstart:bend,:] y_batch = mnist.test.labels[bstart:bend] dict_nat = {model.x_input: x_batch, model.y_input: y_batch} x_batch_adv = attack.perturb(x_batch, y_batch, sess) if(ibatch == 0): adv_x_samples = x_batch_adv adv_y_samples = y_batch else: adv_x_samples = np.concatenate((adv_x_samples, x_batch_adv), axis = 0) adv_y_samples = np.concatenate((adv_y_samples, y_batch), axis = 0) if(args.attack == 'xent'): atck = 'pgd' f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w") elif(args.attack == 'cw_pgd'): atck = 'cw_pgd' f = open(os.path.join(args.log_dir, 'Adv_%s_%s.p' % (dataset, atck)), "w") else: f = open(os.path.join(args.log_dir, "custom.p"), "w") pickle.dump({"adv_input":adv_x_samples,"adv_labels":adv_y_samples},f) f.close()
Example #24
Source File: util.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def kmean_pca_batch(data, batch, k=10): data = np.asarray(data, dtype=np.float32) batch = np.asarray(batch, dtype=np.float32) a = np.zeros(batch.shape[0]) for i in np.arange(batch.shape[0]): tmp = np.concatenate((data, [batch[i]])) tmp_pca = PCA(n_components=2).fit_transform(tmp) a[i] = mle_single(tmp_pca[:-1], tmp_pca[-1], k=k) return a
Example #25
Source File: utils_cifar.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def read_CIFAR100(data_folder): """ Reads and parses examples from CIFAR100 python data files """ train_img = [] train_label = [] test_img = [] test_label = [] train_file_list = ["cifar-100-python/train"] test_file_list = ["cifar-100-python/test"] tmp_dict = unpickle(os.path.join(data_folder, train_file_list[0])) train_img.append(tmp_dict["data"]) train_label.append(tmp_dict["fine_labels"]) tmp_dict = unpickle(os.path.join(data_folder, test_file_list[0])) test_img.append(tmp_dict["data"]) test_label.append(tmp_dict["fine_labels"]) train_img = np.concatenate(train_img) train_label = np.concatenate(train_label) test_img = np.concatenate(test_img) test_label = np.concatenate(test_label) train_img = np.reshape( train_img, [NUM_TRAIN_IMG, NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH]) test_img = np.reshape( test_img, [NUM_TEST_IMG, NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH]) # change format from [B, C, H, W] to [B, H, W, C] for feeding to Tensorflow train_img = np.transpose(train_img, [0, 2, 3, 1]) test_img = np.transpose(test_img, [0, 2, 3, 1]) mean_img = np.mean(np.concatenate([train_img, test_img]), axis=0) CIFAR100_data = {} CIFAR100_data["train_img"] = train_img - mean_img CIFAR100_data["test_img"] = test_img - mean_img CIFAR100_data["train_label"] = train_label CIFAR100_data["test_label"] = test_label return CIFAR100_data
Example #26
Source File: DeepFM.py From tensorflow-DeepFM with MIT License | 5 votes |
def predict(self, Xi, Xv): """ :param Xi: list of list of feature indices of each sample in the dataset :param Xv: list of list of feature values of each sample in the dataset :return: predicted probability of each sample """ # dummy y dummy_y = [1] * len(Xi) batch_index = 0 Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index) y_pred = None while len(Xi_batch) > 0: num_batch = len(y_batch) feed_dict = {self.feat_index: Xi_batch, self.feat_value: Xv_batch, self.label: y_batch, self.dropout_keep_fm: [1.0] * len(self.dropout_fm), self.dropout_keep_deep: [1.0] * len(self.dropout_deep), self.train_phase: False} batch_out = self.sess.run(self.out, feed_dict=feed_dict) if batch_index == 0: y_pred = np.reshape(batch_out, (num_batch,)) else: y_pred = np.concatenate((y_pred, np.reshape(batch_out, (num_batch,)))) batch_index += 1 Xi_batch, Xv_batch, y_batch = self.get_batch(Xi, Xv, dummy_y, self.batch_size, batch_index) return y_pred
Example #27
Source File: dataloader_read_fasta.py From models with MIT License | 5 votes |
def read_fasta( fasta_file, split_char=' ', id_field=0 ): ''' Reads in fasta file containing multiple sequences. Returns dictionary holding multiple sequences or only single sequence, depending on input file In order to retrieve the protein identifier, the header is split after split_char and the field at position id_field is chosen as identifier. ''' sequences = dict() with open( fasta_file, 'r' ) as fasta_f: for line in fasta_f: # get uniprot ID from header and create new entry if line.startswith('>'): uniprot_id = line.replace('>', '').strip().split(split_char)[id_field] sequences[ uniprot_id ] = '' else: # repl. all whie-space chars and join seqs spanning multiple lines sequences[ uniprot_id ] += ''.join( line.split() ).upper() sequences = sorted(sequences.items(), key=lambda kv: len( sequences[kv[0]] ) ) identifier, seqs = zip(*sequences) seqs = [ np.asarray([seq]) for seq in seqs ] #seqs = np.concatenate( seqs ) print(seqs) return { "inputs": seqs, "metadata": { "id": identifier } }
Example #28
Source File: trainer.py From Deep_VoiceChanger with MIT License | 5 votes |
def preview_convert(iterator_a, iterator_b, g_a, g_b, device, gla, dst): @chainer.training.make_extension() def make_preview(trainer): with chainer.using_config('train', False): with chainer.no_backprop_mode(): x_a = iterator_a.next() x_a = convert.concat_examples(x_a, device) x_a = chainer.Variable(x_a) x_b = iterator_b.next() x_b = convert.concat_examples(x_b, device) x_b = chainer.Variable(x_b) x_ab = g_a(x_a) x_ba = g_b(x_b) x_bab = g_a(x_ba) x_aba = g_b(x_ab) preview_dir = '{}/preview'.format(dst) if not os.path.exists(preview_dir): os.makedirs(preview_dir) image_dir = '{}/image'.format(dst) if not os.path.exists(image_dir): os.makedirs(image_dir) names = ['a', 'ab', 'aba', 'b', 'ba', 'bab'] images = [x_a, x_ab, x_aba, x_b, x_ba, x_bab] for n, i in zip(names, images): i = cp.asnumpy(i.data)[:,:,padding:-padding,:].reshape(1, -1, 128) image.save(image_dir+'/{}{}.jpg'.format(trainer.updater.epoch,n), i) w = np.concatenate([gla.inverse(_i) for _i in dataset.reverse(i)]) dataset.save(preview_dir+'/{}{}.wav'.format(trainer.updater.epoch,n), 16000, w) return make_preview
Example #29
Source File: dataset.py From Deep_VoiceChanger with MIT License | 5 votes |
def reverse(output_image): src = output_image[0,padding:-padding,:] src[src > 1] = 1 src *= scale src -= bias np.abs(src, out=src) np.exp(src, out=src) src[src < 1000] = 1 mil = np.array(src[:,1:127][:,::-1]) src = np.concatenate([src, mil], 1) return src.astype(complex)
Example #30
Source File: dataset.py From Deep_VoiceChanger with MIT License | 5 votes |
def pre_encode(): import tqdm path = input('enter wave path...') ds = WaveDataset(path, -1, True) num = ds.max // dif imgs = [ds.get_example(i) for i in tqdm.tqdm(range(num))] dst = np.concatenate(imgs, axis=1) print(dst.shape) np.save(path[:-3]+'npy', dst) print('encoded file saved at', path[:-3]+'npy')