Python torch.utils.data.dataset.TensorDataset() Examples
The following are 11
code examples of torch.utils.data.dataset.TensorDataset().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.utils.data.dataset
, or try the search function
.
Example #1
Source File: sklearn_api.py From pt-sdae with MIT License | 6 votes |
def score(self, X, y=None, sample_weight=None) -> float: loss_function = torch.nn.MSELoss() if self.autoencoder is None: raise NotFittedError if issparse(X): X = X.todense() self.autoencoder.eval() ds = TensorDataset(torch.from_numpy(X.astype(np.float32))) dataloader = DataLoader(ds, batch_size=self.batch_size, shuffle=False) loss = 0 for index, batch in enumerate(dataloader): batch = batch[0] if self.cuda: batch = batch.cuda(non_blocking=True) output = self.autoencoder(batch) loss += float(loss_function(output, batch).item()) return loss
Example #2
Source File: sklearn_api.py From pt-sdae with MIT License | 6 votes |
def transform(self, X): if self.autoencoder is None: raise NotFittedError if issparse(X): X = X.todense() self.autoencoder.eval() ds = TensorDataset(torch.from_numpy(X.astype(np.float32))) dataloader = DataLoader(ds, batch_size=self.batch_size, shuffle=False) features_encoder = [[] for _ in self.autoencoder.encoder] features_decoder = [[] for _ in self.autoencoder.decoder] for index, batch in enumerate(dataloader): batch = batch[0] if self.cuda: batch = batch.cuda(non_blocking=True) for index, unit in enumerate(self.autoencoder.encoder): batch = unit(batch) features_encoder[index].append(batch.detach().cpu()) for index, unit in enumerate(self.autoencoder.decoder): batch = unit(batch) features_decoder[index].append(batch.detach().cpu()) return np.concatenate( [torch.cat(x).numpy() for x in features_encoder + features_decoder[:-1]], axis=1, )
Example #3
Source File: data.py From learnedcardinalities with MIT License | 5 votes |
def get_train_datasets(num_queries, num_materialized_samples): dicts, column_min_max_vals, min_val, max_val, labels_train, labels_test, max_num_joins, max_num_predicates, train_data, test_data = load_and_encode_train_data( num_queries, num_materialized_samples) train_dataset = make_dataset(*train_data, labels=labels_train, max_num_joins=max_num_joins, max_num_predicates=max_num_predicates) print("Created TensorDataset for training data") test_dataset = make_dataset(*test_data, labels=labels_test, max_num_joins=max_num_joins, max_num_predicates=max_num_predicates) print("Created TensorDataset for validation data") return dicts, column_min_max_vals, min_val, max_val, labels_train, labels_test, max_num_joins, max_num_predicates, train_dataset, test_dataset
Example #4
Source File: transformer_utils.py From mead-baseline with Apache License 2.0 | 5 votes |
def load(self, filename, vocabs): features = self.load_features(filename, vocabs) x_tensor = torch.tensor(features['x'], dtype=torch.long) num_sequences_word = (x_tensor.size(0) // self.nctx) * self.nctx x_tensor = x_tensor.narrow(0, 0, num_sequences_word).view(-1, self.nctx) return TensorDataset(x_tensor, x_tensor)
Example #5
Source File: transformer_utils.py From mead-baseline with Apache License 2.0 | 5 votes |
def load(self, filename, vocabs): features = self.load_features(filename, vocabs) y_tensor = torch.tensor(features['y'], dtype=torch.long) num_sequences_word = (y_tensor.size(0) // self.nctx) * self.nctx y_tensor = y_tensor.narrow(0, 0, num_sequences_word).view(-1, self.nctx) x_dataset = torch.tensor(features['x'], dtype=torch.long) x_tensor = torch.tensor(x_dataset, dtype=torch.long) x_tensor = x_tensor.narrow(0, 0, num_sequences_word) x_tensor = x_tensor.view(-1, self.nctx, self.chars_per_word) return TensorDataset(x_tensor, y_tensor)
Example #6
Source File: sklearn_api.py From pt-sdae with MIT License | 5 votes |
def fit(self, X, y=None): if issparse(X): X = X.todense() ds = TensorDataset(torch.from_numpy(X.astype(np.float32))) self.autoencoder = StackedDenoisingAutoEncoder( self.dimensions, final_activation=self.final_activation ) if self.cuda: self.autoencoder.cuda() ae.pretrain( ds, self.autoencoder, cuda=self.cuda, epochs=self.pretrain_epochs, batch_size=self.batch_size, optimizer=self.optimiser_pretrain, scheduler=self.scheduler, corruption=0.2, silent=True, ) ae_optimizer = self.optimiser_train(self.autoencoder) ae.train( ds, self.autoencoder, cuda=self.cuda, epochs=self.finetune_epochs, batch_size=self.batch_size, optimizer=ae_optimizer, scheduler=self.scheduler(ae_optimizer), corruption=self.corruption, silent=True, ) return self
Example #7
Source File: sklearn_api.py From pt-sdae with MIT License | 5 votes |
def _transform(X, autoencoder, batch_size, cuda): ds = TensorDataset(torch.from_numpy(X.astype(np.float32))) dataloader = DataLoader(ds, batch_size=batch_size, shuffle=False) features = [] for batch in dataloader: batch = batch[0] if cuda: batch = batch.cuda(non_blocking=True) features.append(autoencoder.encoder(batch).detach().cpu()) return torch.cat(features).numpy()
Example #8
Source File: utils.py From NRI with MIT License | 5 votes |
def load_kuramoto_data_old(batch_size=1, suffix=''): feat_train = np.load('data/old_kuramoto/feat_train' + suffix + '.npy') edges_train = np.load('data/old_kuramoto/edges_train' + suffix + '.npy') feat_valid = np.load('data/old_kuramoto/feat_valid' + suffix + '.npy') edges_valid = np.load('data/old_kuramoto/edges_valid' + suffix + '.npy') feat_test = np.load('data/old_kuramoto/feat_test' + suffix + '.npy') edges_test = np.load('data/old_kuramoto/edges_test' + suffix + '.npy') # [num_sims, num_atoms, num_timesteps, num_dims] num_atoms = feat_train.shape[1] # Reshape to: [num_sims, num_atoms, num_timesteps, num_dims] edges_train = np.reshape(edges_train, [-1, num_atoms ** 2]) edges_valid = np.reshape(edges_valid, [-1, num_atoms ** 2]) edges_test = np.reshape(edges_test, [-1, num_atoms ** 2]) feat_train = torch.FloatTensor(feat_train) edges_train = torch.LongTensor(edges_train) feat_valid = torch.FloatTensor(feat_valid) edges_valid = torch.LongTensor(edges_valid) feat_test = torch.FloatTensor(feat_test) edges_test = torch.LongTensor(edges_test) # Exclude self edges off_diag_idx = np.ravel_multi_index( np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)), [num_atoms, num_atoms]) edges_train = edges_train[:, off_diag_idx] edges_valid = edges_valid[:, off_diag_idx] edges_test = edges_test[:, off_diag_idx] train_data = TensorDataset(feat_train, edges_train) valid_data = TensorDataset(feat_valid, edges_valid) test_data = TensorDataset(feat_test, edges_test) train_data_loader = DataLoader(train_data, batch_size=batch_size) valid_data_loader = DataLoader(valid_data, batch_size=batch_size) test_data_loader = DataLoader(test_data, batch_size=batch_size) return train_data_loader, valid_data_loader, test_data_loader
Example #9
Source File: data.py From learnedcardinalities with MIT License | 4 votes |
def make_dataset(samples, predicates, joins, labels, max_num_joins, max_num_predicates): """Add zero-padding and wrap as tensor dataset.""" sample_masks = [] sample_tensors = [] for sample in samples: sample_tensor = np.vstack(sample) num_pad = max_num_joins + 1 - sample_tensor.shape[0] sample_mask = np.ones_like(sample_tensor).mean(1, keepdims=True) sample_tensor = np.pad(sample_tensor, ((0, num_pad), (0, 0)), 'constant') sample_mask = np.pad(sample_mask, ((0, num_pad), (0, 0)), 'constant') sample_tensors.append(np.expand_dims(sample_tensor, 0)) sample_masks.append(np.expand_dims(sample_mask, 0)) sample_tensors = np.vstack(sample_tensors) sample_tensors = torch.FloatTensor(sample_tensors) sample_masks = np.vstack(sample_masks) sample_masks = torch.FloatTensor(sample_masks) predicate_masks = [] predicate_tensors = [] for predicate in predicates: predicate_tensor = np.vstack(predicate) num_pad = max_num_predicates - predicate_tensor.shape[0] predicate_mask = np.ones_like(predicate_tensor).mean(1, keepdims=True) predicate_tensor = np.pad(predicate_tensor, ((0, num_pad), (0, 0)), 'constant') predicate_mask = np.pad(predicate_mask, ((0, num_pad), (0, 0)), 'constant') predicate_tensors.append(np.expand_dims(predicate_tensor, 0)) predicate_masks.append(np.expand_dims(predicate_mask, 0)) predicate_tensors = np.vstack(predicate_tensors) predicate_tensors = torch.FloatTensor(predicate_tensors) predicate_masks = np.vstack(predicate_masks) predicate_masks = torch.FloatTensor(predicate_masks) join_masks = [] join_tensors = [] for join in joins: join_tensor = np.vstack(join) num_pad = max_num_joins - join_tensor.shape[0] join_mask = np.ones_like(join_tensor).mean(1, keepdims=True) join_tensor = np.pad(join_tensor, ((0, num_pad), (0, 0)), 'constant') join_mask = np.pad(join_mask, ((0, num_pad), (0, 0)), 'constant') join_tensors.append(np.expand_dims(join_tensor, 0)) join_masks.append(np.expand_dims(join_mask, 0)) join_tensors = np.vstack(join_tensors) join_tensors = torch.FloatTensor(join_tensors) join_masks = np.vstack(join_masks) join_masks = torch.FloatTensor(join_masks) target_tensor = torch.FloatTensor(labels) return dataset.TensorDataset(sample_tensors, predicate_tensors, join_tensors, target_tensor, sample_masks, predicate_masks, join_masks)
Example #10
Source File: utils.py From NRI with MIT License | 4 votes |
def load_kuramoto_data(batch_size=1, suffix=''): feat_train = np.load('data/feat_train' + suffix + '.npy') edges_train = np.load('data/edges_train' + suffix + '.npy') feat_valid = np.load('data/feat_valid' + suffix + '.npy') edges_valid = np.load('data/edges_valid' + suffix + '.npy') feat_test = np.load('data/feat_test' + suffix + '.npy') edges_test = np.load('data/edges_test' + suffix + '.npy') # [num_sims, num_atoms, num_timesteps, num_dims] num_atoms = feat_train.shape[1] # Normalize each feature dim. individually feat_max = feat_train.max(0).max(0).max(0) feat_min = feat_train.min(0).min(0).min(0) feat_max = np.expand_dims(np.expand_dims(np.expand_dims(feat_max, 0), 0), 0) feat_min = np.expand_dims(np.expand_dims(np.expand_dims(feat_min, 0), 0), 0) # Normalize to [-1, 1] feat_train = (feat_train - feat_min) * 2 / (feat_max - feat_min) - 1 feat_valid = (feat_valid - feat_min) * 2 / (feat_max - feat_min) - 1 feat_test = (feat_test - feat_min) * 2 / (feat_max - feat_min) - 1 # Reshape to: [num_sims, num_atoms, num_timesteps, num_dims] edges_train = np.reshape(edges_train, [-1, num_atoms ** 2]) edges_valid = np.reshape(edges_valid, [-1, num_atoms ** 2]) edges_test = np.reshape(edges_test, [-1, num_atoms ** 2]) feat_train = torch.FloatTensor(feat_train) edges_train = torch.LongTensor(edges_train) feat_valid = torch.FloatTensor(feat_valid) edges_valid = torch.LongTensor(edges_valid) feat_test = torch.FloatTensor(feat_test) edges_test = torch.LongTensor(edges_test) # Exclude self edges off_diag_idx = np.ravel_multi_index( np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)), [num_atoms, num_atoms]) edges_train = edges_train[:, off_diag_idx] edges_valid = edges_valid[:, off_diag_idx] edges_test = edges_test[:, off_diag_idx] train_data = TensorDataset(feat_train, edges_train) valid_data = TensorDataset(feat_valid, edges_valid) test_data = TensorDataset(feat_test, edges_test) train_data_loader = DataLoader(train_data, batch_size=batch_size) valid_data_loader = DataLoader(valid_data, batch_size=batch_size) test_data_loader = DataLoader(test_data, batch_size=batch_size) return train_data_loader, valid_data_loader, test_data_loader
Example #11
Source File: utils.py From NRI with MIT License | 4 votes |
def load_motion_data(batch_size=1, suffix=''): feat_train = np.load('data/motion_train' + suffix + '.npy') feat_valid = np.load('data/motion_valid' + suffix + '.npy') feat_test = np.load('data/motion_test' + suffix + '.npy') adj = np.load('data/motion_adj' + suffix + '.npy') # NOTE: Already normalized # [num_samples, num_nodes, num_timesteps, num_dims] num_nodes = feat_train.shape[1] edges_train = np.repeat(np.expand_dims(adj.flatten(), 0), feat_train.shape[0], axis=0) edges_valid = np.repeat(np.expand_dims(adj.flatten(), 0), feat_valid.shape[0], axis=0) edges_test = np.repeat(np.expand_dims(adj.flatten(), 0), feat_test.shape[0], axis=0) feat_train = torch.FloatTensor(feat_train) edges_train = torch.LongTensor(np.array(edges_train, dtype=np.int64)) feat_valid = torch.FloatTensor(feat_valid) edges_valid = torch.LongTensor(np.array(edges_valid, dtype=np.int64)) feat_test = torch.FloatTensor(feat_test) edges_test = torch.LongTensor(np.array(edges_test, dtype=np.int64)) # Exclude self edges off_diag_idx = np.ravel_multi_index( np.where(np.ones((num_nodes, num_nodes)) - np.eye(num_nodes)), [num_nodes, num_nodes]) edges_train = edges_train[:, off_diag_idx] edges_valid = edges_valid[:, off_diag_idx] edges_test = edges_test[:, off_diag_idx] train_data = TensorDataset(feat_train, edges_train) valid_data = TensorDataset(feat_valid, edges_valid) test_data = TensorDataset(feat_test, edges_test) train_data_loader = DataLoader(train_data, batch_size=batch_size) valid_data_loader = DataLoader(valid_data, batch_size=batch_size) test_data_loader = DataLoader(test_data, batch_size=batch_size) return train_data_loader, valid_data_loader, test_data_loader