Python torch.nn.modules.Dropout() Examples
The following are 7
code examples of torch.nn.modules.Dropout().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.modules
, or try the search function
.
Example #1
Source File: lstm_swag.py From swagaf with MIT License | 6 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, # binary_feature_dim: int, embedding_dropout: float = 0.0, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(LstmSwag, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder # For the span based evaluation, we don't want to consider labels # for verb, because the verb index is provided to the model. self.encoder = encoder self.embedding_dropout = Dropout(p=embedding_dropout) self.output_prediction = Linear(self.encoder.get_output_dim(), 1, bias=False) check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(), "text embedding dim", "eq encoder input dim") self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #2
Source File: elmo.py From allennlp with Apache License 2.0 | 5 votes |
def __init__( self, options_file: str, weight_file: str, num_output_representations: int, requires_grad: bool = False, do_layer_norm: bool = False, dropout: float = 0.5, vocab_to_cache: List[str] = None, keep_sentence_boundaries: bool = False, scalar_mix_parameters: List[float] = None, module: torch.nn.Module = None, ) -> None: super().__init__() logger.info("Initializing ELMo") if module is not None: if options_file is not None or weight_file is not None: raise ConfigurationError("Don't provide options_file or weight_file with module") self._elmo_lstm = module else: self._elmo_lstm = _ElmoBiLm( options_file, weight_file, requires_grad=requires_grad, vocab_to_cache=vocab_to_cache, ) self._has_cached_vocab = vocab_to_cache is not None self._keep_sentence_boundaries = keep_sentence_boundaries self._dropout = Dropout(p=dropout) self._scalar_mixes: Any = [] for k in range(num_output_representations): scalar_mix = ScalarMix( self._elmo_lstm.num_layers, do_layer_norm=do_layer_norm, initial_scalar_parameters=scalar_mix_parameters, trainable=scalar_mix_parameters is None, ) self.add_module("scalar_mix_{}".format(k), scalar_mix) self._scalar_mixes.append(scalar_mix)
Example #3
Source File: elmo.py From magnitude with MIT License | 5 votes |
def __init__(self, options_file , weight_file , num_output_representations , requires_grad = False, do_layer_norm = False, dropout = 0.5, vocab_to_cache = None, module = None) : super(Elmo, self).__init__() logging.info(u"Initializing ELMo") if module is not None: if options_file is not None or weight_file is not None: raise ConfigurationError( u"Don't provide options_file or weight_file with module") self._elmo_lstm = module else: self._elmo_lstm = _ElmoBiLm(options_file, weight_file, requires_grad=requires_grad, vocab_to_cache=vocab_to_cache) self._has_cached_vocab = vocab_to_cache is not None self._dropout = Dropout(p=dropout) self._scalar_mixes = [] for k in range(num_output_representations): scalar_mix = ScalarMix(self._elmo_lstm.num_layers, do_layer_norm=do_layer_norm) self.add_module(u'scalar_mix_{}'.format(k), scalar_mix) self._scalar_mixes.append(scalar_mix)
Example #4
Source File: semantic_role_labeler.py From magnitude with MIT License | 5 votes |
def __init__(self, vocab , text_field_embedder , encoder , binary_feature_dim , embedding_dropout = 0.0, initializer = InitializerApplicator(), regularizer = None, label_smoothing = None) : super(SemanticRoleLabeler, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.num_classes = self.vocab.get_vocab_size(u"labels") # For the span based evaluation, we don't want to consider labels # for verb, because the verb index is provided to the model. self.span_metric = SpanBasedF1Measure(vocab, tag_namespace=u"labels", ignore_classes=[u"V"]) self.encoder = encoder # There are exactly 2 binary features for the verb predicate embedding. self.binary_feature_embedding = Embedding(2, binary_feature_dim) self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(), self.num_classes)) self.embedding_dropout = Dropout(p=embedding_dropout) self._label_smoothing = label_smoothing check_dimensions_match(text_field_embedder.get_output_dim() + binary_feature_dim, encoder.get_input_dim(), u"text embedding dim + verb indicator embedding dim", u"encoder input dim") initializer(self)
Example #5
Source File: elmo.py From claf with MIT License | 5 votes |
def __init__( self, options_file: str, weight_file: str, num_output_representations: int, requires_grad: bool = False, do_layer_norm: bool = False, dropout: float = 0.5, vocab_to_cache: List[str] = None, module: torch.nn.Module = None, ) -> None: super(Elmo, self).__init__() logging.info("Initializing ELMo") if module is not None: if options_file is not None or weight_file is not None: raise ValueError("Don't provide options_file or weight_file with module") self._elmo_lstm = module else: self._elmo_lstm = _ElmoBiLm( options_file, weight_file, requires_grad=requires_grad, vocab_to_cache=vocab_to_cache, ) self._has_cached_vocab = vocab_to_cache is not None self._dropout = Dropout(p=dropout) self._scalar_mixes: Any = [] for k in range(num_output_representations): scalar_mix = ScalarMix(self._elmo_lstm.num_layers, do_layer_norm=do_layer_norm) self.add_module("scalar_mix_{}".format(k), scalar_mix) self._scalar_mixes.append(scalar_mix)
Example #6
Source File: my_elmo.py From MultiTask-MRC with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, options_file: str, weight_file: str, num_output_representations: int, requires_grad: bool = False, do_layer_norm: bool = False, dropout: float = 0.5, vocab_to_cache: List[str] = None, module: torch.nn.Module = None) -> None: super(Elmo, self).__init__() logging.info("Initializing ELMo") if module is not None: if options_file is not None or weight_file is not None: raise ConfigurationError( "Don't provide options_file or weight_file with module") self._elmo_lstm = module else: self._elmo_lstm = _ElmoBiLm(options_file, weight_file, requires_grad=requires_grad, vocab_to_cache=vocab_to_cache) self._has_cached_vocab = vocab_to_cache is not None self._dropout = Dropout(p=dropout) self.num_output_representations=num_output_representations if num_output_representations!=-1: self._scalar_mixes: Any = [] for k in range(num_output_representations): scalar_mix = ScalarMix(self._elmo_lstm.num_layers, do_layer_norm=do_layer_norm) self.add_module('scalar_mix_{}'.format(k), scalar_mix) self._scalar_mixes.append(scalar_mix)
Example #7
Source File: summarunner.py From summarus with Apache License 2.0 | 4 votes |
def __init__(self, vocab: Vocabulary, source_embedder: TextFieldEmbedder, sentence_encoder: Seq2VecEncoder, sentence_accumulator: Seq2SeqEncoder, use_salience: bool, use_pos_embedding: bool, use_output_bias: bool, use_novelty: bool, dropout: float = 0.3, pos_embedding_num: int = 50, pos_embedding_size: int = 128) -> None: super(SummaRuNNer, self).__init__(vocab) self._source_embedder = source_embedder self._sentence_encoder = sentence_encoder self._se_output_dim = self._sentence_encoder.get_output_dim() self._sentence_accumulator = sentence_accumulator self._h_sentence_dim = self._sentence_accumulator.get_output_dim() self._dropout_layer = Dropout(dropout) self._content_projection_layer = Linear(self._h_sentence_dim, 1) self._use_salience = use_salience if use_salience: self._document_linear_layer = Linear(self._h_sentence_dim, self._h_sentence_dim, bias=True) self._salience_linear_layer = Linear(self._h_sentence_dim, self._h_sentence_dim, bias=False) self._use_pos_embedding = use_pos_embedding if use_pos_embedding: self._pos_embedding_num = pos_embedding_num self._pos_embedding_size = pos_embedding_size self._pos_embedding_layer = Embedding(pos_embedding_num, pos_embedding_size) self._pos_projection_layer = Linear(pos_embedding_size, 1) self._use_output_bias = use_output_bias if use_output_bias: self._output_bias = Parameter(torch.zeros(1).uniform_(-0.1,0.1), requires_grad=True) self._use_novelty = use_novelty if use_novelty: self._novelty_linear_layer = Linear(self._h_sentence_dim, self._h_sentence_dim, bias=False)