Python allennlp.modules.FeedForward() Examples
The following are 16
code examples of allennlp.modules.FeedForward().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
allennlp.modules
, or try the search function
.
Example #1
Source File: feedforward_encoder_test.py From allennlp with Apache License 2.0 | 6 votes |
def test_feedforward_encoder_exactly_match_feedforward_each_item(self): feedforward = FeedForward( input_dim=10, num_layers=1, hidden_dims=10, activations=Activation.by_name("linear")() ) encoder = FeedForwardEncoder(feedforward) tensor = torch.randn([2, 3, 10]) output = encoder(tensor) target = feedforward(tensor) numpy.testing.assert_array_almost_equal( target.detach().cpu().numpy(), output.detach().cpu().numpy() ) # mask should work mask = torch.tensor([[True, True, True], [True, False, False]]) output = encoder(tensor, mask) target = feedforward(tensor) * mask.unsqueeze(dim=-1).float() numpy.testing.assert_array_almost_equal( target.detach().cpu().numpy(), output.detach().cpu().numpy() )
Example #2
Source File: esim_comparator.py From multee with Apache License 2.0 | 6 votes |
def __init__(self, encoder: Seq2SeqEncoder, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, similarity_function: SimilarityFunction = None, dropout: float = 0.5) -> None: super().__init__() self._encoder = encoder self._matrix_attention = LegacyMatrixAttention(similarity_function) self._projection_feedforward = projection_feedforward self._inference_encoder = inference_encoder if dropout: self.dropout = torch.nn.Dropout(dropout) self.rnn_input_dropout = InputVariationalDropout(dropout) else: self.dropout = None self.rnn_input_dropout = None self._output_feedforward = output_feedforward
Example #3
Source File: esim_comparator.py From multee with Apache License 2.0 | 6 votes |
def __init__(self, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, dropout: float = 0.5) -> None: super().__init__() if dropout: self.dropout = torch.nn.Dropout(dropout) self.rnn_input_dropout = InputVariationalDropout(dropout) else: self.dropout = None self.rnn_input_dropout = None self._projection_feedforward = projection_feedforward self._inference_encoder = inference_encoder self._output_feedforward = output_feedforward # self._weight_premise_token = weight_premise_token
Example #4
Source File: logistic_normal.py From vampire with Apache License 2.0 | 6 votes |
def __init__(self, vocab, encoder: FeedForward, mean_projection: FeedForward, log_variance_projection: FeedForward, decoder: FeedForward, kld_clamp: Optional[float] = None, z_dropout: float = 0.2) -> None: super(LogisticNormal, self).__init__(vocab) self.encoder = encoder self.mean_projection = mean_projection self.log_variance_projection = log_variance_projection self._kld_clamp = kld_clamp self._decoder = torch.nn.Linear(decoder.get_input_dim(), decoder.get_output_dim(), bias=False) self._z_dropout = torch.nn.Dropout(z_dropout) self.latent_dim = mean_projection.get_output_dim()
Example #5
Source File: model.py From ConvLab with MIT License | 5 votes |
def __init__(self, vocab: Vocabulary, input_dim: int, num_classes: int, label_namespace: str = "labels", feedforward: Optional[FeedForward] = None, dropout: Optional[float] = None, verbose_metrics: bool = False, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab, regularizer) self.label_namespace = label_namespace self.input_dim = input_dim self.num_classes = num_classes self._verbose_metrics = verbose_metrics if dropout: self.dropout = torch.nn.Dropout(dropout) else: self.dropout = None self._feedforward = feedforward if self._feedforward is not None: self.projection_layer = Linear(feedforward.get_output_dim(), self.num_classes) else: self.projection_layer = Linear(self.input_dim, self.num_classes) self.metrics = { "accuracy": CategoricalAccuracy(), "accuracy3": CategoricalAccuracy(top_k=3), "accuracy5": CategoricalAccuracy(top_k=5) } self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #6
Source File: text_classifier.py From scibert with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, text_encoder: Seq2SeqEncoder, classifier_feedforward: FeedForward, verbose_metrics: False, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None, ) -> None: super(TextClassifier, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.num_classes = self.vocab.get_vocab_size("labels") self.text_encoder = text_encoder self.classifier_feedforward = classifier_feedforward self.prediction_layer = torch.nn.Linear(self.classifier_feedforward.get_output_dim() , self.num_classes) self.label_accuracy = CategoricalAccuracy() self.label_f1_metrics = {} self.verbose_metrics = verbose_metrics for i in range(self.num_classes): self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i) self.loss = torch.nn.CrossEntropyLoss() self.pool = lambda text, mask: util.get_final_encoder_states(text, mask, bidirectional=True) initializer(self)
Example #7
Source File: feedforward_encoder_test.py From allennlp with Apache License 2.0 | 5 votes |
def test_get_dimension_is_correct(self): feedforward = FeedForward( input_dim=10, num_layers=1, hidden_dims=10, activations=Activation.by_name("linear")() ) encoder = FeedForwardEncoder(feedforward) assert encoder.get_input_dim() == feedforward.get_input_dim() assert encoder.get_output_dim() == feedforward.get_output_dim()
Example #8
Source File: linking_transition_function.py From allennlp-semparse with Apache License 2.0 | 5 votes |
def __init__( self, encoder_output_dim: int, action_embedding_dim: int, input_attention: Attention, activation: Activation = Activation.by_name("relu")(), add_action_bias: bool = True, mixture_feedforward: FeedForward = None, dropout: float = 0.0, num_layers: int = 1, ) -> None: super().__init__( encoder_output_dim=encoder_output_dim, action_embedding_dim=action_embedding_dim, input_attention=input_attention, activation=activation, add_action_bias=add_action_bias, dropout=dropout, num_layers=num_layers, ) self._mixture_feedforward = mixture_feedforward if mixture_feedforward is not None: check_dimensions_match( encoder_output_dim, mixture_feedforward.get_input_dim(), "hidden state embedding dim", "mixture feedforward input dim", ) check_dimensions_match( mixture_feedforward.get_output_dim(), 1, "mixture feedforward output dim", "dimension for scalar value", )
Example #9
Source File: linking_coverage_transition_function.py From allennlp-semparse with Apache License 2.0 | 5 votes |
def __init__( self, encoder_output_dim: int, action_embedding_dim: int, input_attention: Attention, activation: Activation = Activation.by_name("relu")(), add_action_bias: bool = True, mixture_feedforward: FeedForward = None, dropout: float = 0.0, ) -> None: super().__init__( encoder_output_dim=encoder_output_dim, action_embedding_dim=action_embedding_dim, input_attention=input_attention, activation=activation, add_action_bias=add_action_bias, dropout=dropout, ) self._linked_checklist_multiplier = Parameter(torch.FloatTensor([1.0])) self._mixture_feedforward = mixture_feedforward if mixture_feedforward is not None: check_dimensions_match( encoder_output_dim, mixture_feedforward.get_input_dim(), "hidden state embedding dim", "mixture feedforward input dim", ) check_dimensions_match( mixture_feedforward.get_output_dim(), 1, "mixture feedforward output dim", "dimension for scalar value", )
Example #10
Source File: single_correct_mcq_multee_esim.py From multee with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, final_feedforward: FeedForward, coverage_loss: CoverageLoss, similarity_function: SimilarityFunction = DotProductSimilarity(), dropout: float = 0.5, contextualize_pair_comparators: bool = False, pair_context_encoder: Seq2SeqEncoder = None, pair_feedforward: FeedForward = None, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: # Need to send it verbatim because otherwise FromParams doesn't work appropriately. super().__init__(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, similarity_function=similarity_function, projection_feedforward=projection_feedforward, inference_encoder=inference_encoder, output_feedforward=output_feedforward, output_logit=output_logit, final_feedforward=final_feedforward, contextualize_pair_comparators=contextualize_pair_comparators, coverage_loss=coverage_loss, pair_context_encoder=pair_context_encoder, pair_feedforward=pair_feedforward, dropout=dropout, initializer=initializer, regularizer=regularizer) self._answer_loss = torch.nn.CrossEntropyLoss() self._accuracy = CategoricalAccuracy()
Example #11
Source File: multiple_correct_mcq_multee_esim.py From multee with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, final_feedforward: FeedForward, coverage_loss: CoverageLoss, similarity_function: SimilarityFunction = DotProductSimilarity(), dropout: float = 0.5, contextualize_pair_comparators: bool = False, pair_context_encoder: Seq2SeqEncoder = None, pair_feedforward: FeedForward = None, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, similarity_function=similarity_function, projection_feedforward=projection_feedforward, inference_encoder=inference_encoder, output_feedforward=output_feedforward, output_logit=output_logit, final_feedforward=final_feedforward, coverage_loss=coverage_loss, contextualize_pair_comparators=contextualize_pair_comparators, pair_context_encoder=pair_context_encoder, pair_feedforward=pair_feedforward, dropout=dropout, initializer=initializer, regularizer=regularizer) self._ignore_index = -1 self._answer_loss = torch.nn.CrossEntropyLoss(ignore_index=self._ignore_index) self._coverage_loss = coverage_loss self._accuracy = CategoricalAccuracy() self._entailment_f1 = F1Measure(self._label2idx["entailment"])
Example #12
Source File: encoder.py From vampire with Apache License 2.0 | 5 votes |
def __init__(self, architecture: FeedForward) -> None: super(MLP, self).__init__(architecture) self._architecture = architecture
Example #13
Source File: basic_classifier.py From allennlp with Apache License 2.0 | 4 votes |
def __init__( self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, seq2vec_encoder: Seq2VecEncoder, seq2seq_encoder: Seq2SeqEncoder = None, feedforward: Optional[FeedForward] = None, dropout: float = None, num_labels: int = None, label_namespace: str = "labels", namespace: str = "tokens", initializer: InitializerApplicator = InitializerApplicator(), **kwargs, ) -> None: super().__init__(vocab, **kwargs) self._text_field_embedder = text_field_embedder if seq2seq_encoder: self._seq2seq_encoder = seq2seq_encoder else: self._seq2seq_encoder = None self._seq2vec_encoder = seq2vec_encoder self._feedforward = feedforward if feedforward is not None: self._classifier_input_dim = self._feedforward.get_output_dim() else: self._classifier_input_dim = self._seq2vec_encoder.get_output_dim() if dropout: self._dropout = torch.nn.Dropout(dropout) else: self._dropout = None self._label_namespace = label_namespace self._namespace = namespace if num_labels: self._num_labels = num_labels else: self._num_labels = vocab.get_vocab_size(namespace=self._label_namespace) self._classification_layer = torch.nn.Linear(self._classifier_input_dim, self._num_labels) self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #14
Source File: esim_pair2vec.py From pair2vec with Apache License 2.0 | 4 votes |
def __init__(self, vocab: Vocabulary, encoder_keys: List[str], mask_key: str, pair2vec_config_file: str, pair2vec_model_file: str, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, similarity_function: SimilarityFunction, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, initializer: InitializerApplicator = InitializerApplicator(), dropout: float = 0.5, pair2vec_dropout: float = 0.0, bidirectional_pair2vec: bool = True, regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab, regularizer) self._vocab = vocab self.pair2vec = util.get_pair2vec(pair2vec_config_file, pair2vec_model_file) self._encoder_keys = encoder_keys self._mask_key = mask_key self._text_field_embedder = text_field_embedder self._projection_feedforward = projection_feedforward self._encoder = encoder from allennlp.modules.matrix_attention import DotProductMatrixAttention self._matrix_attention = DotProductMatrixAttention() self._inference_encoder = inference_encoder self._pair2vec_dropout = torch.nn.Dropout(pair2vec_dropout) self._bidirectional_pair2vec = bidirectional_pair2vec if dropout: self.dropout = torch.nn.Dropout(dropout) self.rnn_input_dropout = VariationalDropout(dropout) else: self.dropout = None self.rnn_input_dropout = None self._output_feedforward = output_feedforward self._output_logit = output_logit self._num_labels = vocab.get_vocab_size(namespace="labels") self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #15
Source File: multee_esim.py From multee with Apache License 2.0 | 4 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, similarity_function: SimilarityFunction, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, final_feedforward: FeedForward, coverage_loss: CoverageLoss = None, contextualize_pair_comparators: bool = False, pair_context_encoder: Seq2SeqEncoder = None, pair_feedforward: FeedForward = None, optimize_coverage_for: List = ["entailment", "neutral"], dropout: float = 0.5, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab, regularizer) self._label2idx = self.vocab.get_token_to_index_vocabulary('labels') self._text_field_embedder = text_field_embedder self._entailment_comparator_layer_1 = EsimComparatorLayer1(encoder, dropout) self._entailment_comparator_layer_2 = EsimComparatorLayer2(similarity_function) self._td_entailment_comparator_layer_1 = TimeDistributed(self._entailment_comparator_layer_1) self._td_entailment_comparator_layer_2 = TimeDistributed(self._entailment_comparator_layer_2) self._entailment_comparator_layer_3plus_local = EsimComparatorLayer3Plus(projection_feedforward, inference_encoder, output_feedforward, dropout) self._td_entailment_comparator_layer_3plus_local = EachOutputTimeDistributed(self._entailment_comparator_layer_3plus_local) self._entailment_comparator_layer_3plus_global = copy.deepcopy(self._entailment_comparator_layer_3plus_local) self._contextualize_pair_comparators = contextualize_pair_comparators if not self._contextualize_pair_comparators: self._output_logit = output_logit self._td_output_logit = TimeDistributed(self._output_logit) self._final_feedforward = final_feedforward self._td_final_feedforward = TimeDistributed(final_feedforward) linear = torch.nn.Linear(2*self._entailment_comparator_layer_3plus_local.get_output_dim(), self._final_feedforward.get_input_dim()) self._local_global_projection = torch.nn.Sequential(linear, torch.nn.ReLU()) if self._contextualize_pair_comparators: self._pair_context_encoder = pair_context_encoder self._td_pair_feedforward = TimeDistributed(pair_feedforward) self._coverage_loss = coverage_loss # Do not apply initializer. If you do, make sure it doesn't reinitialize transferred parameters.
Example #16
Source File: model.py From glyce with Apache License 2.0 | 4 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, sentence_encoder: Seq2VecEncoder, classifier_feedforward: FeedForward, label_weight: Dict[str, float] = None, use_label_distribution: bool = False, image_classification_ratio: float = 0.0, decay_every_i_step=100000, decay_ratio=0.8, instance_count=100000, max_epoch=10, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None ) -> None: super(BasicClassifier, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.num_classes = self.vocab.get_vocab_size("labels") self.sentence_encoder = sentence_encoder self.classifier_feedforward = classifier_feedforward if text_field_embedder.get_output_dim() != sentence_encoder.get_input_dim(): raise ConfigurationError("The output dimension of the text_field_embedder must match the " "input dimension of the title_encoder. Found {} and {}, " "respectively.".format(text_field_embedder.get_output_dim(), sentence_encoder.get_input_dim())) self.metrics = { "accuracy": CategoricalAccuracy(), "cnn_loss": Average() } if not use_label_distribution: self.loss = torch.nn.CrossEntropyLoss() else: self.loss = torch.nn.CrossEntropyLoss() self.image_classification_ratio = image_classification_ratio self.decay_every_i_step = decay_every_i_step self.decay_ratio = decay_ratio self.training_step = 0 self.current_ratio = image_classification_ratio self.total_steps = max_epoch*instance_count//64 self.step_every_epoch = instance_count // 64 print("每个epoch的step数量", self.step_every_epoch) initializer(self)