Python fuel.transformers.Mapping() Examples
The following are 6
code examples of fuel.transformers.Mapping().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
fuel.transformers
, or try the search function
.
Example #1
Source File: data.py From DeepMind-Teaching-Machines-to-Read-and-Comprehend with MIT License | 6 votes |
def setup_datastream(path, vocab_file, config): ds = QADataset(path, vocab_file, config.n_entities, need_sep_token=config.concat_ctx_and_question) it = QAIterator(path, shuffle=config.shuffle_questions) stream = DataStream(ds, iteration_scheme=it) if config.concat_ctx_and_question: stream = ConcatCtxAndQuestion(stream, config.concat_question_before, ds.reverse_vocab['<SEP>']) # Sort sets of multiple batches to make batches of similar sizes stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size * config.sort_batch_count)) comparison = _balanced_batch_helper(stream.sources.index('question' if config.concat_ctx_and_question else 'context')) stream = Mapping(stream, SortMapping(comparison)) stream = Unpack(stream) stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size)) stream = Padding(stream, mask_sources=['context', 'question', 'candidates'], mask_dtype='int32') return ds, stream
Example #2
Source File: timit.py From CTC-LSTM with Apache License 2.0 | 6 votes |
def setup_datastream(path, batch_size, sort_batch_count, valid=False): A = numpy.load(os.path.join(path, ('valid_x_raw.npy' if valid else 'train_x_raw.npy'))) B = numpy.load(os.path.join(path, ('valid_phn.npy' if valid else 'train_phn.npy'))) C = numpy.load(os.path.join(path, ('valid_seq_to_phn.npy' if valid else 'train_seq_to_phn.npy'))) D = [B[x[0]:x[1], 2] for x in C] ds = IndexableDataset({'input': A, 'output': D}) stream = DataStream(ds, iteration_scheme=ShuffledExampleScheme(len(A))) stream = Batch(stream, iteration_scheme=ConstantScheme(batch_size * sort_batch_count)) comparison = _balanced_batch_helper(stream.sources.index('input')) stream = Mapping(stream, SortMapping(comparison)) stream = Unpack(stream) stream = Batch(stream, iteration_scheme=ConstantScheme(batch_size, num_examples=len(A))) stream = Padding(stream, mask_sources=['input', 'output']) return ds, stream
Example #3
Source File: utils.py From blocks-char-rnn with MIT License | 5 votes |
def get_stream(hdf5_file, which_set, batch_size=None): dataset = H5PYDataset( hdf5_file, which_sets=(which_set,), load_in_memory=True) if batch_size == None: batch_size = dataset.num_examples stream = DataStream(dataset=dataset, iteration_scheme=ShuffledScheme( examples=dataset.num_examples, batch_size=batch_size)) # Required because Recurrent bricks receive as input [sequence, batch, # features] return Mapping(stream, transpose_stream)
Example #4
Source File: preprocessing.py From attention-lvcsr with MIT License | 5 votes |
def wrap_stream(self, stream): return Mapping(stream, Invoke(self, 'apply'))
Example #5
Source File: test_datasets.py From attention-lvcsr with MIT License | 5 votes |
def test_default_transformer(self): class DoublingDataset(IterableDataset): def apply_default_transformer(self, stream): return Mapping( stream, lambda sources: tuple(2 * s for s in sources)) dataset = DoublingDataset(self.data) stream = dataset.apply_default_transformer(DataStream(dataset)) assert_equal(list(stream.get_epoch_iterator()), [(2,), (4,), (6,)])
Example #6
Source File: test_datasets.py From fuel with MIT License | 5 votes |
def test_default_transformer(self): class DoublingDataset(IterableDataset): def apply_default_transformer(self, stream): return Mapping( stream, lambda sources: tuple(2 * s for s in sources)) dataset = DoublingDataset(self.data) stream = dataset.apply_default_transformer(DataStream(dataset)) assert_equal(list(stream.get_epoch_iterator()), [(2,), (4,), (6,)])