Python allennlp.nn.RegularizerApplicator() Examples
The following are 18
code examples of allennlp.nn.RegularizerApplicator().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
allennlp.nn
, or try the search function
.
Example #1
Source File: bert_text_classifier.py From scibert with Apache License 2.0 | 6 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, verbose_metrics: bool = False, dropout: float = 0.2, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None, ) -> None: super(TextClassifier, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.dropout = torch.nn.Dropout(dropout) self.num_classes = self.vocab.get_vocab_size("labels") self.classifier_feedforward = torch.nn.Linear(self.text_field_embedder.get_output_dim() , self.num_classes) self.label_accuracy = CategoricalAccuracy() self.label_f1_metrics = {} self.verbose_metrics = verbose_metrics for i in range(self.num_classes): self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i) self.loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #2
Source File: multiple_correct_mcq_multee_esim.py From multee with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, final_feedforward: FeedForward, coverage_loss: CoverageLoss, similarity_function: SimilarityFunction = DotProductSimilarity(), dropout: float = 0.5, contextualize_pair_comparators: bool = False, pair_context_encoder: Seq2SeqEncoder = None, pair_feedforward: FeedForward = None, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, similarity_function=similarity_function, projection_feedforward=projection_feedforward, inference_encoder=inference_encoder, output_feedforward=output_feedforward, output_logit=output_logit, final_feedforward=final_feedforward, coverage_loss=coverage_loss, contextualize_pair_comparators=contextualize_pair_comparators, pair_context_encoder=pair_context_encoder, pair_feedforward=pair_feedforward, dropout=dropout, initializer=initializer, regularizer=regularizer) self._ignore_index = -1 self._answer_loss = torch.nn.CrossEntropyLoss(ignore_index=self._ignore_index) self._coverage_loss = coverage_loss self._accuracy = CategoricalAccuracy() self._entailment_f1 = F1Measure(self._label2idx["entailment"])
Example #3
Source File: simple_tagger.py From HIT-SCIR-CoNLL2019 with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, calculate_span_f1: bool = None, label_encoding: Optional[str] = None, label_namespace: str = "labels", verbose_metrics: bool = False, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(SimpleTagger, self).__init__(vocab, regularizer) self.label_namespace = label_namespace self.text_field_embedder = text_field_embedder self.num_classes = self.vocab.get_vocab_size(label_namespace) self.encoder = encoder self._verbose_metrics = verbose_metrics self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(), self.num_classes)) check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(), "text field embedding dim", "encoder input dim") # We keep calculate_span_f1 as a constructor argument for API consistency with # the CrfTagger, even it is redundant in this class # (label_encoding serves the same purpose). if calculate_span_f1 and not label_encoding: raise ConfigurationError("calculate_span_f1 is True, but " "no label_encoding was specified.") self.metrics = { "accuracy": CategoricalAccuracy(), "accuracy3": CategoricalAccuracy(top_k=3) } if calculate_span_f1 or label_encoding: self._f1_metric = SpanBasedF1Measure(vocab, tag_namespace=label_namespace, label_encoding=label_encoding) else: self._f1_metric = None initializer(self)
Example #4
Source File: rnet.py From R-net with MIT License | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, question_encoder: Seq2SeqEncoder, passage_encoder: Seq2SeqEncoder, pair_encoder: AttentionEncoder, self_encoder: AttentionEncoder, output_layer: QAOutputLayer, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None, share_encoder: bool = False): super().__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.question_encoder = question_encoder self.passage_encoder = passage_encoder self.pair_encoder = pair_encoder self.self_encoder = self_encoder self.output_layer = output_layer self._span_start_accuracy = CategoricalAccuracy() self._span_end_accuracy = CategoricalAccuracy() self._span_accuracy = BooleanAccuracy() self._squad_metrics = SquadEmAndF1() self.share_encoder = share_encoder self.loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #5
Source File: custom_composed_seq2seq.py From summarus with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, source_text_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, decoder: SeqDecoder, tied_source_embedder_key: Optional[str] = None, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(CustomComposedSeq2Seq, self).__init__(vocab, regularizer) self._source_text_embedder = source_text_embedder self._encoder = encoder self._decoder = decoder if self._encoder.get_output_dim() != self._decoder.get_output_dim(): raise ConfigurationError(f"Encoder output dimension {self._encoder.get_output_dim()} should be" f" equal to decoder dimension {self._decoder.get_output_dim()}.") if tied_source_embedder_key: if not isinstance(self._source_text_embedder, BasicTextFieldEmbedder): raise ConfigurationError("Unable to tie embeddings," "Source text embedder is not an instance of `BasicTextFieldEmbedder`.") source_embedder = self._source_text_embedder._token_embedders[tied_source_embedder_key] if not isinstance(source_embedder, Embedding): raise ConfigurationError("Unable to tie embeddings," "Selected source embedder is not an instance of `Embedding`.") if source_embedder.get_output_dim() != self._decoder.target_embedder.get_output_dim(): raise ConfigurationError(f"Output Dimensions mismatch between" f"source embedder and target embedder.") self._source_text_embedder._token_embedders[tied_source_embedder_key] = self._decoder.target_embedder initializer(self)
Example #6
Source File: slqa_h.py From SLQA with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, phrase_layer: Seq2SeqEncoder, projected_layer: Seq2SeqEncoder, flow_layer: Seq2SeqEncoder, contextual_passage: Seq2SeqEncoder, contextual_question: Seq2SeqEncoder, dropout: float = 0.2, regularizer: Optional[RegularizerApplicator] = None, initializer: InitializerApplicator = InitializerApplicator(), ): super(MultiGranularityHierarchicalAttentionFusionNetworks, self).__init__(vocab, regularizer) self._text_field_embedder = text_field_embedder self._phrase_layer = phrase_layer self._encoding_dim = self._phrase_layer.get_output_dim() self.projected_layer = torch.nn.Linear(self._encoding_dim + 1024, self._encoding_dim) self.fuse = FusionLayer(self._encoding_dim) self.projected_lstm = projected_layer self.flow = flow_layer self.contextual_layer_p = contextual_passage self.contextual_layer_q = contextual_question self.linear_self_align = torch.nn.Linear(self._encoding_dim, 1) self.bilinear_layer_s = BilinearSeqAtt(self._encoding_dim, self._encoding_dim) self.bilinear_layer_e = BilinearSeqAtt(self._encoding_dim, self._encoding_dim) self.yesno_predictor = torch.nn.Linear(self._encoding_dim, 3) self.relu = torch.nn.ReLU() self._max_span_length = 30 self._span_start_accuracy = CategoricalAccuracy() self._span_end_accuracy = CategoricalAccuracy() self._span_accuracy = BooleanAccuracy() self._squad_metrics = SquadEmAndF1() self._span_yesno_accuracy = CategoricalAccuracy() self._official_f1 = Average() self._variational_dropout = InputVariationalDropout(dropout) self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #7
Source File: model.py From ConvLab with MIT License | 5 votes |
def __init__(self, vocab: Vocabulary, input_dim: int, num_classes: int, label_namespace: str = "labels", feedforward: Optional[FeedForward] = None, dropout: Optional[float] = None, verbose_metrics: bool = False, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab, regularizer) self.label_namespace = label_namespace self.input_dim = input_dim self.num_classes = num_classes self._verbose_metrics = verbose_metrics if dropout: self.dropout = torch.nn.Dropout(dropout) else: self.dropout = None self._feedforward = feedforward if self._feedforward is not None: self.projection_layer = Linear(feedforward.get_output_dim(), self.num_classes) else: self.projection_layer = Linear(self.input_dim, self.num_classes) self.metrics = { "accuracy": CategoricalAccuracy(), "accuracy3": CategoricalAccuracy(top_k=3), "accuracy5": CategoricalAccuracy(top_k=5) } self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #8
Source File: single_correct_mcq_multee_esim.py From multee with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, final_feedforward: FeedForward, coverage_loss: CoverageLoss, similarity_function: SimilarityFunction = DotProductSimilarity(), dropout: float = 0.5, contextualize_pair_comparators: bool = False, pair_context_encoder: Seq2SeqEncoder = None, pair_feedforward: FeedForward = None, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: # Need to send it verbatim because otherwise FromParams doesn't work appropriately. super().__init__(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, similarity_function=similarity_function, projection_feedforward=projection_feedforward, inference_encoder=inference_encoder, output_feedforward=output_feedforward, output_logit=output_logit, final_feedforward=final_feedforward, contextualize_pair_comparators=contextualize_pair_comparators, coverage_loss=coverage_loss, pair_context_encoder=pair_context_encoder, pair_feedforward=pair_feedforward, dropout=dropout, initializer=initializer, regularizer=regularizer) self._answer_loss = torch.nn.CrossEntropyLoss() self._accuracy = CategoricalAccuracy()
Example #9
Source File: seq2labels_model.py From NLP_Toolkit with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, predictor_dropout=0.0, labels_namespace: str = "labels", detect_namespace: str = "d_tags", verbose_metrics: bool = False, label_smoothing: float = 0.0, confidence: float = 0.0, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(Seq2Labels, self).__init__(vocab, regularizer) self.label_namespaces = [labels_namespace, detect_namespace] self.text_field_embedder = text_field_embedder self.num_labels_classes = self.vocab.get_vocab_size(labels_namespace) self.num_detect_classes = self.vocab.get_vocab_size(detect_namespace) self.label_smoothing = label_smoothing self.confidence = confidence self.incorr_index = self.vocab.get_token_index("INCORRECT", namespace=detect_namespace) self._verbose_metrics = verbose_metrics self.predictor_dropout = TimeDistributed(torch.nn.Dropout(predictor_dropout)) self.tag_labels_projection_layer = TimeDistributed( Linear(text_field_embedder._token_embedders['bert'].get_output_dim(), self.num_labels_classes)) self.tag_detect_projection_layer = TimeDistributed( Linear(text_field_embedder._token_embedders['bert'].get_output_dim(), self.num_detect_classes)) self.metrics = {"accuracy": CategoricalAccuracy()} initializer(self)
Example #10
Source File: pico_crf_tagger.py From scibert with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, include_start_end_transitions: bool = True, dropout: Optional[float] = None, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab, regularizer) self.label_namespace = 'labels' self.num_tags = self.vocab.get_vocab_size(self.label_namespace) # encode text self.text_field_embedder = text_field_embedder self.encoder = encoder self.dropout = torch.nn.Dropout(dropout) if dropout else None # crf output_dim = self.encoder.get_output_dim() self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_tags)) self.crf = ConditionalRandomField(self.num_tags, constraints=None, include_start_end_transitions=include_start_end_transitions) self.metrics = { "accuracy": CategoricalAccuracy(), "accuracy3": CategoricalAccuracy(top_k=3) } for index, label in self.vocab.get_index_to_token_vocabulary(self.label_namespace).items(): self.metrics['F1_' + label] = F1Measure(positive_label=index) initializer(self)
Example #11
Source File: text_classifier.py From scibert with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, text_encoder: Seq2SeqEncoder, classifier_feedforward: FeedForward, verbose_metrics: False, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None, ) -> None: super(TextClassifier, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.num_classes = self.vocab.get_vocab_size("labels") self.text_encoder = text_encoder self.classifier_feedforward = classifier_feedforward self.prediction_layer = torch.nn.Linear(self.classifier_feedforward.get_output_dim() , self.num_classes) self.label_accuracy = CategoricalAccuracy() self.label_f1_metrics = {} self.verbose_metrics = verbose_metrics for i in range(self.num_classes): self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i) self.loss = torch.nn.CrossEntropyLoss() self.pool = lambda text, mask: util.get_final_encoder_states(text, mask, bidirectional=True) initializer(self)
Example #12
Source File: esim_pair2vec.py From pair2vec with Apache License 2.0 | 4 votes |
def __init__(self, vocab: Vocabulary, encoder_keys: List[str], mask_key: str, pair2vec_config_file: str, pair2vec_model_file: str, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, similarity_function: SimilarityFunction, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, initializer: InitializerApplicator = InitializerApplicator(), dropout: float = 0.5, pair2vec_dropout: float = 0.0, bidirectional_pair2vec: bool = True, regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab, regularizer) self._vocab = vocab self.pair2vec = util.get_pair2vec(pair2vec_config_file, pair2vec_model_file) self._encoder_keys = encoder_keys self._mask_key = mask_key self._text_field_embedder = text_field_embedder self._projection_feedforward = projection_feedforward self._encoder = encoder from allennlp.modules.matrix_attention import DotProductMatrixAttention self._matrix_attention = DotProductMatrixAttention() self._inference_encoder = inference_encoder self._pair2vec_dropout = torch.nn.Dropout(pair2vec_dropout) self._bidirectional_pair2vec = bidirectional_pair2vec if dropout: self.dropout = torch.nn.Dropout(dropout) self.rnn_input_dropout = VariationalDropout(dropout) else: self.dropout = None self.rnn_input_dropout = None self._output_feedforward = output_feedforward self._output_logit = output_logit self._num_labels = vocab.get_vocab_size(namespace="labels") self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #13
Source File: udify_model.py From udify with MIT License | 4 votes |
def __init__(self, vocab: Vocabulary, tasks: List[str], text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, decoders: Dict[str, Model], post_encoder_embedder: TextFieldEmbedder = None, dropout: float = 0.0, word_dropout: float = 0.0, mix_embedding: int = None, layer_dropout: int = 0.0, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(UdifyModel, self).__init__(vocab, regularizer) self.tasks = sorted(tasks) self.vocab = vocab self.bert_vocab = BertTokenizer.from_pretrained("config/archive/bert-base-multilingual-cased/vocab.txt").vocab self.text_field_embedder = text_field_embedder self.post_encoder_embedder = post_encoder_embedder self.shared_encoder = encoder self.word_dropout = word_dropout self.dropout = torch.nn.Dropout(p=dropout) self.decoders = torch.nn.ModuleDict(decoders) if mix_embedding: self.scalar_mix = torch.nn.ModuleDict({ task: ScalarMixWithDropout(mix_embedding, do_layer_norm=False, dropout=layer_dropout) for task in self.decoders }) else: self.scalar_mix = None self.metrics = {} for task in self.tasks: if task not in self.decoders: raise ConfigurationError(f"Task {task} has no corresponding decoder. Make sure their names match.") check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(), "text field embedding dim", "encoder input dim") initializer(self) self._count_params()
Example #14
Source File: multee_esim.py From multee with Apache License 2.0 | 4 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, similarity_function: SimilarityFunction, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, final_feedforward: FeedForward, coverage_loss: CoverageLoss = None, contextualize_pair_comparators: bool = False, pair_context_encoder: Seq2SeqEncoder = None, pair_feedforward: FeedForward = None, optimize_coverage_for: List = ["entailment", "neutral"], dropout: float = 0.5, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab, regularizer) self._label2idx = self.vocab.get_token_to_index_vocabulary('labels') self._text_field_embedder = text_field_embedder self._entailment_comparator_layer_1 = EsimComparatorLayer1(encoder, dropout) self._entailment_comparator_layer_2 = EsimComparatorLayer2(similarity_function) self._td_entailment_comparator_layer_1 = TimeDistributed(self._entailment_comparator_layer_1) self._td_entailment_comparator_layer_2 = TimeDistributed(self._entailment_comparator_layer_2) self._entailment_comparator_layer_3plus_local = EsimComparatorLayer3Plus(projection_feedforward, inference_encoder, output_feedforward, dropout) self._td_entailment_comparator_layer_3plus_local = EachOutputTimeDistributed(self._entailment_comparator_layer_3plus_local) self._entailment_comparator_layer_3plus_global = copy.deepcopy(self._entailment_comparator_layer_3plus_local) self._contextualize_pair_comparators = contextualize_pair_comparators if not self._contextualize_pair_comparators: self._output_logit = output_logit self._td_output_logit = TimeDistributed(self._output_logit) self._final_feedforward = final_feedforward self._td_final_feedforward = TimeDistributed(final_feedforward) linear = torch.nn.Linear(2*self._entailment_comparator_layer_3plus_local.get_output_dim(), self._final_feedforward.get_input_dim()) self._local_global_projection = torch.nn.Sequential(linear, torch.nn.ReLU()) if self._contextualize_pair_comparators: self._pair_context_encoder = pair_context_encoder self._td_pair_feedforward = TimeDistributed(pair_feedforward) self._coverage_loss = coverage_loss # Do not apply initializer. If you do, make sure it doesn't reinitialize transferred parameters.
Example #15
Source File: seperate_slqa.py From SLQA with Apache License 2.0 | 4 votes |
def __init__(self, vocab: Vocabulary, elmo_embedder: TextFieldEmbedder, tokens_embedder: TextFieldEmbedder, features_embedder: TextFieldEmbedder, phrase_layer: Seq2SeqEncoder, projected_layer: Seq2SeqEncoder, contextual_passage: Seq2SeqEncoder, contextual_question: Seq2SeqEncoder, dropout: float = 0.2, regularizer: Optional[RegularizerApplicator] = None, initializer: InitializerApplicator = InitializerApplicator(), ): super(MultiGranularityHierarchicalAttentionFusionNetworks, self).__init__(vocab, regularizer) self.elmo_embedder = elmo_embedder self.tokens_embedder = tokens_embedder self.features_embedder = features_embedder self._phrase_layer = phrase_layer self._encoding_dim = self._phrase_layer.get_output_dim() self.projected_layer = torch.nn.Linear(self._encoding_dim + 1024, self._encoding_dim) self.fuse_p = FusionLayer(self._encoding_dim) self.fuse_q = FusionLayer(self._encoding_dim) self.fuse_s = FusionLayer(self._encoding_dim) self.projected_lstm = projected_layer self.contextual_layer_p = contextual_passage self.contextual_layer_q = contextual_question self.linear_self_align = torch.nn.Linear(self._encoding_dim, 1) # self._self_attention = LinearMatrixAttention(self._encoding_dim, self._encoding_dim, 'x,y,x*y') self._self_attention = BilinearMatrixAttention(self._encoding_dim, self._encoding_dim) self.bilinear_layer_s = BilinearSeqAtt(self._encoding_dim, self._encoding_dim) self.bilinear_layer_e = BilinearSeqAtt(self._encoding_dim, self._encoding_dim) self.yesno_predictor = FeedForward(self._encoding_dim, self._encoding_dim, 3) self.relu = torch.nn.ReLU() self._max_span_length = 30 self._span_start_accuracy = CategoricalAccuracy() self._span_end_accuracy = CategoricalAccuracy() self._span_accuracy = BooleanAccuracy() self._squad_metrics = SquadEmAndF1() self._span_yesno_accuracy = CategoricalAccuracy() self._official_f1 = Average() self._variational_dropout = InputVariationalDropout(dropout) self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #16
Source File: tag_decoder.py From udify with MIT License | 4 votes |
def __init__(self, vocab: Vocabulary, task: str, encoder: Seq2SeqEncoder, label_smoothing: float = 0.0, dropout: float = 0.0, adaptive: bool = False, features: List[str] = None, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(TagDecoder, self).__init__(vocab, regularizer) self.task = task self.encoder = encoder self.output_dim = encoder.get_output_dim() self.label_smoothing = label_smoothing self.num_classes = self.vocab.get_vocab_size(task) self.adaptive = adaptive self.features = features if features else [] self.metrics = { "acc": CategoricalAccuracy(), # "acc3": CategoricalAccuracy(top_k=3) } if self.adaptive: # TODO adaptive_cutoffs = [round(self.num_classes / 15), 3 * round(self.num_classes / 15)] self.task_output = AdaptiveLogSoftmaxWithLoss(self.output_dim, self.num_classes, cutoffs=adaptive_cutoffs, div_value=4.0) else: self.task_output = TimeDistributed(Linear(self.output_dim, self.num_classes)) self.feature_outputs = torch.nn.ModuleDict() self.features_metrics = {} for feature in self.features: self.feature_outputs[feature] = TimeDistributed(Linear(self.output_dim, vocab.get_vocab_size(feature))) self.features_metrics[feature] = { "acc": CategoricalAccuracy(), } initializer(self)
Example #17
Source File: slqa.py From SLQA with Apache License 2.0 | 4 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, phrase_layer: Seq2SeqEncoder, projected_layer: Seq2SeqEncoder, contextual_passage: Seq2SeqEncoder, contextual_question: Seq2SeqEncoder, dropout: float = 0.2, regularizer: Optional[RegularizerApplicator] = None, initializer: InitializerApplicator = InitializerApplicator(), ): super(MultiGranularityHierarchicalAttentionFusionNetworks, self).__init__(vocab, regularizer) self._text_field_embedder = text_field_embedder self._phrase_layer = phrase_layer self._encoding_dim = self._phrase_layer.get_output_dim() self.projected_layer = torch.nn.Linear(self._encoding_dim + 1024, self._encoding_dim) self.fuse_p = FusionLayer(self._encoding_dim) self.fuse_q = FusionLayer(self._encoding_dim) self.fuse_s = FusionLayer(self._encoding_dim) self.projected_lstm = projected_layer self.contextual_layer_p = contextual_passage self.contextual_layer_q = contextual_question self.linear_self_align = torch.nn.Linear(self._encoding_dim, 1) # self.bilinear_self_align = BilinearSelfAlign(self._encoding_dim) # self._self_attention = LinearMatrixAttention(self._encoding_dim, self._encoding_dim, 'x,y,x*y') self._self_attention = BilinearMatrixAttention(self._encoding_dim, self._encoding_dim) self.bilinear_layer_s = BilinearSeqAtt(self._encoding_dim, self._encoding_dim) self.bilinear_layer_e = BilinearSeqAtt(self._encoding_dim, self._encoding_dim) self.yesno_predictor = torch.nn.Linear(self._encoding_dim, 3) self.relu = torch.nn.ReLU() self._max_span_length = 30 self._span_start_accuracy = CategoricalAccuracy() self._span_end_accuracy = CategoricalAccuracy() self._span_accuracy = BooleanAccuracy() self._squad_metrics = SquadEmAndF1() self._span_yesno_accuracy = CategoricalAccuracy() self._official_f1 = Average() self._variational_dropout = InputVariationalDropout(dropout) self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #18
Source File: model.py From glyce with Apache License 2.0 | 4 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, sentence_encoder: Seq2VecEncoder, classifier_feedforward: FeedForward, label_weight: Dict[str, float] = None, use_label_distribution: bool = False, image_classification_ratio: float = 0.0, decay_every_i_step=100000, decay_ratio=0.8, instance_count=100000, max_epoch=10, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None ) -> None: super(BasicClassifier, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.num_classes = self.vocab.get_vocab_size("labels") self.sentence_encoder = sentence_encoder self.classifier_feedforward = classifier_feedforward if text_field_embedder.get_output_dim() != sentence_encoder.get_input_dim(): raise ConfigurationError("The output dimension of the text_field_embedder must match the " "input dimension of the title_encoder. Found {} and {}, " "respectively.".format(text_field_embedder.get_output_dim(), sentence_encoder.get_input_dim())) self.metrics = { "accuracy": CategoricalAccuracy(), "cnn_loss": Average() } if not use_label_distribution: self.loss = torch.nn.CrossEntropyLoss() else: self.loss = torch.nn.CrossEntropyLoss() self.image_classification_ratio = image_classification_ratio self.decay_every_i_step = decay_every_i_step self.decay_ratio = decay_ratio self.training_step = 0 self.current_ratio = image_classification_ratio self.total_steps = max_epoch*instance_count//64 self.step_every_epoch = instance_count // 64 print("每个epoch的step数量", self.step_every_epoch) initializer(self)