Python allennlp.training.metrics.CategoricalAccuracy() Examples
The following are 30
code examples of allennlp.training.metrics.CategoricalAccuracy().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
allennlp.training.metrics
, or try the search function
.
Example #1
Source File: bert_text_classifier.py From scibert with Apache License 2.0 | 6 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, verbose_metrics: bool = False, dropout: float = 0.2, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None, ) -> None: super(TextClassifier, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.dropout = torch.nn.Dropout(dropout) self.num_classes = self.vocab.get_vocab_size("labels") self.classifier_feedforward = torch.nn.Linear(self.text_field_embedder.get_output_dim() , self.num_classes) self.label_accuracy = CategoricalAccuracy() self.label_f1_metrics = {} self.verbose_metrics = verbose_metrics for i in range(self.num_classes): self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i) self.loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #2
Source File: simple_tagger.py From magnitude with MIT License | 6 votes |
def __init__(self, vocab , text_field_embedder , encoder , initializer = InitializerApplicator(), regularizer = None) : super(SimpleTagger, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.num_classes = self.vocab.get_vocab_size(u"labels") self.encoder = encoder self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(), self.num_classes)) check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(), u"text field embedding dim", u"encoder input dim") self.metrics = { u"accuracy": CategoricalAccuracy(), u"accuracy3": CategoricalAccuracy(top_k=3) } initializer(self) #overrides
Example #3
Source File: lstm_swag.py From swagaf with MIT License | 6 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, # binary_feature_dim: int, embedding_dropout: float = 0.0, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(LstmSwag, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder # For the span based evaluation, we don't want to consider labels # for verb, because the verb index is provided to the model. self.encoder = encoder self.embedding_dropout = Dropout(p=embedding_dropout) self.output_prediction = Linear(self.encoder.get_output_dim(), 1, bias=False) check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(), "text embedding dim", "eq encoder input dim") self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #4
Source File: knowbert.py From kb with Apache License 2.0 | 6 votes |
def __init__(self, vocab: Vocabulary, regularizer: RegularizerApplicator = None): super().__init__(vocab, regularizer) self.nsp_loss_function = torch.nn.CrossEntropyLoss(ignore_index=-1) self.lm_loss_function = torch.nn.CrossEntropyLoss(ignore_index=0) self._metrics = { "total_loss_ema": ExponentialMovingAverage(alpha=0.5), "nsp_loss_ema": ExponentialMovingAverage(alpha=0.5), "lm_loss_ema": ExponentialMovingAverage(alpha=0.5), "total_loss": Average(), "nsp_loss": Average(), "lm_loss": Average(), "lm_loss_wgt": WeightedAverage(), "mrr": MeanReciprocalRank(), } self._accuracy = CategoricalAccuracy()
Example #5
Source File: categorical_accuracy_test.py From magnitude with MIT License | 6 votes |
def test_top_k_categorical_accuracy_works_for_sequences(self): accuracy = CategoricalAccuracy(top_k=2) predictions = torch.Tensor([[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]], [[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]]]) targets = torch.Tensor([[0, 3, 4], [0, 1, 4]]) accuracy(predictions, targets) actual_accuracy = accuracy.get_metric(reset=True) numpy.testing.assert_almost_equal(actual_accuracy, 0.6666666) # Test the same thing but with a mask: mask = torch.Tensor([[0, 1, 1], [1, 0, 1]]) accuracy(predictions, targets, mask) actual_accuracy = accuracy.get_metric(reset=True) numpy.testing.assert_almost_equal(actual_accuracy, 0.50)
Example #6
Source File: categorical_accuracy_test.py From allennlp with Apache License 2.0 | 6 votes |
def test_top_k_categorical_accuracy_works_for_sequences(self, device: str): accuracy = CategoricalAccuracy(top_k=2) predictions = torch.tensor( [ [[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]], [[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.6, 0.1, 0.2, 0.0]], ], device=device, ) targets = torch.tensor([[0, 3, 4], [0, 1, 4]], device=device) accuracy(predictions, targets) actual_accuracy = accuracy.get_metric(reset=True) assert_allclose(actual_accuracy, 0.6666666) # Test the same thing but with a mask: mask = torch.tensor([[False, True, True], [True, False, True]], device=device) accuracy(predictions, targets, mask) actual_accuracy = accuracy.get_metric(reset=True) assert_allclose(actual_accuracy, 0.50)
Example #7
Source File: prolocal_model.py From propara with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, seq2seq_encoder: Seq2SeqEncoder, initializer: InitializerApplicator) -> None: super(ProLocalModel, self).__init__(vocab) self.text_field_embedder = text_field_embedder self.seq2seq_encoder = seq2seq_encoder self.attention_layer = \ Attention(similarity_function=BilinearSimilarity(2 * seq2seq_encoder.get_output_dim(), seq2seq_encoder.get_output_dim()), normalize=True) self.num_types = self.vocab.get_vocab_size("state_change_type_labels") self.aggregate_feedforward = Linear(seq2seq_encoder.get_output_dim(), self.num_types) self.span_metric = SpanBasedF1Measure(vocab, tag_namespace="state_change_tags") # by default "O" is ignored in metric computation self.num_tags = self.vocab.get_vocab_size("state_change_tags") self.tag_projection_layer = TimeDistributed(Linear(self.seq2seq_encoder.get_output_dim() + 2 , self.num_tags)) self._type_accuracy = CategoricalAccuracy() self.type_f1_metrics = {} self.type_labels_vocab = self.vocab.get_index_to_token_vocabulary("state_change_type_labels") for type_label in self.type_labels_vocab.values(): self.type_f1_metrics["type_" + type_label] = F1Measure(self.vocab.get_token_index(type_label, "state_change_type_labels")) self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #8
Source File: decomposable_attention.py From magnitude with MIT License | 5 votes |
def __init__(self, vocab , text_field_embedder , attend_feedforward , similarity_function , compare_feedforward , aggregate_feedforward , premise_encoder = None, hypothesis_encoder = None, initializer = InitializerApplicator(), regularizer = None) : super(DecomposableAttention, self).__init__(vocab, regularizer) self._text_field_embedder = text_field_embedder self._attend_feedforward = TimeDistributed(attend_feedforward) self._matrix_attention = LegacyMatrixAttention(similarity_function) self._compare_feedforward = TimeDistributed(compare_feedforward) self._aggregate_feedforward = aggregate_feedforward self._premise_encoder = premise_encoder self._hypothesis_encoder = hypothesis_encoder or premise_encoder self._num_labels = vocab.get_vocab_size(namespace=u"labels") check_dimensions_match(text_field_embedder.get_output_dim(), attend_feedforward.get_input_dim(), u"text field embedding dim", u"attend feedforward input dim") check_dimensions_match(aggregate_feedforward.get_output_dim(), self._num_labels, u"final output dimension", u"number of labels") self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #9
Source File: categorical_accuracy_test.py From magnitude with MIT License | 5 votes |
def test_categorical_accuracy(self): accuracy = CategoricalAccuracy() predictions = torch.Tensor([[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]]) targets = torch.Tensor([0, 3]) accuracy(predictions, targets) actual_accuracy = accuracy.get_metric() assert actual_accuracy == 0.50
Example #10
Source File: categorical_accuracy_test.py From magnitude with MIT License | 5 votes |
def test_top_k_categorical_accuracy(self): accuracy = CategoricalAccuracy(top_k=2) predictions = torch.Tensor([[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]]) targets = torch.Tensor([0, 3]) accuracy(predictions, targets) actual_accuracy = accuracy.get_metric() assert actual_accuracy == 1.0
Example #11
Source File: model.py From ConvLab with MIT License | 5 votes |
def __init__(self, vocab: Vocabulary, input_dim: int, num_classes: int, label_namespace: str = "labels", feedforward: Optional[FeedForward] = None, dropout: Optional[float] = None, verbose_metrics: bool = False, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab, regularizer) self.label_namespace = label_namespace self.input_dim = input_dim self.num_classes = num_classes self._verbose_metrics = verbose_metrics if dropout: self.dropout = torch.nn.Dropout(dropout) else: self.dropout = None self._feedforward = feedforward if self._feedforward is not None: self.projection_layer = Linear(feedforward.get_output_dim(), self.num_classes) else: self.projection_layer = Linear(self.input_dim, self.num_classes) self.metrics = { "accuracy": CategoricalAccuracy(), "accuracy3": CategoricalAccuracy(top_k=3), "accuracy5": CategoricalAccuracy(top_k=5) } self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #12
Source File: categorical_accuracy_test.py From magnitude with MIT License | 5 votes |
def test_top_k_categorical_accuracy_respects_mask(self): accuracy = CategoricalAccuracy(top_k=2) predictions = torch.Tensor([[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0], [0.1, 0.2, 0.5, 0.2, 0.0]]) targets = torch.Tensor([0, 3, 0]) mask = torch.Tensor([0, 1, 1]) accuracy(predictions, targets, mask) actual_accuracy = accuracy.get_metric() assert actual_accuracy == 0.50
Example #13
Source File: categorical_accuracy_test.py From magnitude with MIT License | 5 votes |
def test_tie_break_categorical_accuracy(self): accuracy = CategoricalAccuracy(tie_break=True) predictions = torch.Tensor([[0.35, 0.25, 0.35, 0.35, 0.35], [0.1, 0.6, 0.1, 0.2, 0.2], [0.1, 0.0, 0.1, 0.2, 0.2]]) # Test without mask: targets = torch.Tensor([2, 1, 4]) accuracy(predictions, targets) assert accuracy.get_metric(reset=True) == (0.25 + 1 + 0.5)/3.0 # # # Test with mask mask = torch.Tensor([1, 0, 1]) targets = torch.Tensor([2, 1, 4]) accuracy(predictions, targets, mask) assert accuracy.get_metric(reset=True) == (0.25 + 0.5)/2.0 # # Test tie-break with sequence predictions = torch.Tensor([[[0.35, 0.25, 0.35, 0.35, 0.35], [0.1, 0.6, 0.1, 0.2, 0.2], [0.1, 0.0, 0.1, 0.2, 0.2]], [[0.35, 0.25, 0.35, 0.35, 0.35], [0.1, 0.6, 0.1, 0.2, 0.2], [0.1, 0.0, 0.1, 0.2, 0.2]]]) targets = torch.Tensor([[0, 1, 3], # 0.25 + 1 + 0.5 [0, 3, 4]]) # 0.25 + 0 + 0.5 = 2.5 accuracy(predictions, targets) actual_accuracy = accuracy.get_metric(reset=True) numpy.testing.assert_almost_equal(actual_accuracy, 2.5/6.0)
Example #14
Source File: categorical_accuracy_test.py From magnitude with MIT License | 5 votes |
def test_top_k_and_tie_break_together_catches_exceptions(self): with pytest.raises(ConfigurationError): CategoricalAccuracy(top_k=2, tie_break=True)
Example #15
Source File: categorical_accuracy_test.py From magnitude with MIT License | 5 votes |
def test_incorrect_top_k_catches_exceptions(self): with pytest.raises(ConfigurationError): CategoricalAccuracy(top_k=0)
Example #16
Source File: categorical_accuracy_test.py From magnitude with MIT License | 5 votes |
def test_does_not_divide_by_zero_with_no_count(self): accuracy = CategoricalAccuracy() self.assertAlmostEqual(accuracy.get_metric(), 0.0)
Example #17
Source File: categorical_accuracy_test.py From magnitude with MIT License | 5 votes |
def test_top_k_categorical_accuracy_accumulates_and_resets_correctly(self): accuracy = CategoricalAccuracy(top_k=2) predictions = torch.Tensor([[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]]) targets = torch.Tensor([0, 3]) accuracy(predictions, targets) accuracy(predictions, targets) accuracy(predictions, torch.Tensor([4, 4])) accuracy(predictions, torch.Tensor([4, 4])) actual_accuracy = accuracy.get_metric(reset=True) assert actual_accuracy == 0.50 assert accuracy.correct_count == 0.0 assert accuracy.total_count == 0.0
Example #18
Source File: single_correct_mcq_multee_esim.py From multee with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, final_feedforward: FeedForward, coverage_loss: CoverageLoss, similarity_function: SimilarityFunction = DotProductSimilarity(), dropout: float = 0.5, contextualize_pair_comparators: bool = False, pair_context_encoder: Seq2SeqEncoder = None, pair_feedforward: FeedForward = None, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: # Need to send it verbatim because otherwise FromParams doesn't work appropriately. super().__init__(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, similarity_function=similarity_function, projection_feedforward=projection_feedforward, inference_encoder=inference_encoder, output_feedforward=output_feedforward, output_logit=output_logit, final_feedforward=final_feedforward, contextualize_pair_comparators=contextualize_pair_comparators, coverage_loss=coverage_loss, pair_context_encoder=pair_context_encoder, pair_feedforward=pair_feedforward, dropout=dropout, initializer=initializer, regularizer=regularizer) self._answer_loss = torch.nn.CrossEntropyLoss() self._accuracy = CategoricalAccuracy()
Example #19
Source File: multiple_correct_mcq_multee_esim.py From multee with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, final_feedforward: FeedForward, coverage_loss: CoverageLoss, similarity_function: SimilarityFunction = DotProductSimilarity(), dropout: float = 0.5, contextualize_pair_comparators: bool = False, pair_context_encoder: Seq2SeqEncoder = None, pair_feedforward: FeedForward = None, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, similarity_function=similarity_function, projection_feedforward=projection_feedforward, inference_encoder=inference_encoder, output_feedforward=output_feedforward, output_logit=output_logit, final_feedforward=final_feedforward, coverage_loss=coverage_loss, contextualize_pair_comparators=contextualize_pair_comparators, pair_context_encoder=pair_context_encoder, pair_feedforward=pair_feedforward, dropout=dropout, initializer=initializer, regularizer=regularizer) self._ignore_index = -1 self._answer_loss = torch.nn.CrossEntropyLoss(ignore_index=self._ignore_index) self._coverage_loss = coverage_loss self._accuracy = CategoricalAccuracy() self._entailment_f1 = F1Measure(self._label2idx["entailment"])
Example #20
Source File: slqa_h.py From SLQA with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, phrase_layer: Seq2SeqEncoder, projected_layer: Seq2SeqEncoder, flow_layer: Seq2SeqEncoder, contextual_passage: Seq2SeqEncoder, contextual_question: Seq2SeqEncoder, dropout: float = 0.2, regularizer: Optional[RegularizerApplicator] = None, initializer: InitializerApplicator = InitializerApplicator(), ): super(MultiGranularityHierarchicalAttentionFusionNetworks, self).__init__(vocab, regularizer) self._text_field_embedder = text_field_embedder self._phrase_layer = phrase_layer self._encoding_dim = self._phrase_layer.get_output_dim() self.projected_layer = torch.nn.Linear(self._encoding_dim + 1024, self._encoding_dim) self.fuse = FusionLayer(self._encoding_dim) self.projected_lstm = projected_layer self.flow = flow_layer self.contextual_layer_p = contextual_passage self.contextual_layer_q = contextual_question self.linear_self_align = torch.nn.Linear(self._encoding_dim, 1) self.bilinear_layer_s = BilinearSeqAtt(self._encoding_dim, self._encoding_dim) self.bilinear_layer_e = BilinearSeqAtt(self._encoding_dim, self._encoding_dim) self.yesno_predictor = torch.nn.Linear(self._encoding_dim, 3) self.relu = torch.nn.ReLU() self._max_span_length = 30 self._span_start_accuracy = CategoricalAccuracy() self._span_end_accuracy = CategoricalAccuracy() self._span_accuracy = BooleanAccuracy() self._squad_metrics = SquadEmAndF1() self._span_yesno_accuracy = CategoricalAccuracy() self._official_f1 = Average() self._variational_dropout = InputVariationalDropout(dropout) self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #21
Source File: bert_cls.py From transformer-kernel-ranking with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, bert_model: Union[str, BertModel], dropout: float = 0.0, num_labels: int = None, index: str = "tokens", label_namespace: str = "labels", trainable: bool = True, initializer: InitializerApplicator = InitializerApplicator()) -> None: super().__init__(vocab) if isinstance(bert_model, str): self.bert_model = PretrainedBertModel.load(bert_model) else: self.bert_model = bert_model self.bert_model.requires_grad = trainable in_features = self.bert_model.config.hidden_size #if num_labels: out_features = 1 #else: # out_features = vocab.get_vocab_size(label_namespace) self._dropout = torch.nn.Dropout(p=dropout) self._classification_layer = torch.nn.Linear(in_features, out_features) #self._accuracy = CategoricalAccuracy() #self._loss = torch.nn.CrossEntropyLoss() self._index = index #initializer(self._classification_layer)
Example #22
Source File: rnet.py From R-net with MIT License | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, question_encoder: Seq2SeqEncoder, passage_encoder: Seq2SeqEncoder, pair_encoder: AttentionEncoder, self_encoder: AttentionEncoder, output_layer: QAOutputLayer, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None, share_encoder: bool = False): super().__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.question_encoder = question_encoder self.passage_encoder = passage_encoder self.pair_encoder = pair_encoder self.self_encoder = self_encoder self.output_layer = output_layer self._span_start_accuracy = CategoricalAccuracy() self._span_end_accuracy = CategoricalAccuracy() self._span_accuracy = BooleanAccuracy() self._squad_metrics = SquadEmAndF1() self.share_encoder = share_encoder self.loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #23
Source File: classifier.py From vampire with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, input_embedder: TextFieldEmbedder, encoder: Encoder = None, dropout: float = None, initializer: InitializerApplicator = InitializerApplicator() ) -> None: """ Parameters ---------- vocab: `Vocabulary` vocab to use input_embedder: `TextFieldEmbedder` generic embedder of tokens encoder: `Encoder`, optional (default = None) Seq2Vec or Seq2Seq Encoder wrapper. If no encoder is provided, assume that the input is a bag of word counts, for linear classification. dropout: `float`, optional (default = None) if set, will apply dropout to output of encoder. initializer: `InitializerApplicator` generic initializer """ super().__init__(vocab) self._input_embedder = input_embedder if dropout: self._dropout = torch.nn.Dropout(dropout) else: self._dropout = None self._encoder = encoder self._num_labels = vocab.get_vocab_size(namespace="labels") if self._encoder: self._clf_input_dim = self._encoder.get_output_dim() else: self._clf_input_dim = self._input_embedder.get_output_dim() self._classification_layer = torch.nn.Linear(self._clf_input_dim, self._num_labels) self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #24
Source File: simple_tagger.py From HIT-SCIR-CoNLL2019 with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, calculate_span_f1: bool = None, label_encoding: Optional[str] = None, label_namespace: str = "labels", verbose_metrics: bool = False, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(SimpleTagger, self).__init__(vocab, regularizer) self.label_namespace = label_namespace self.text_field_embedder = text_field_embedder self.num_classes = self.vocab.get_vocab_size(label_namespace) self.encoder = encoder self._verbose_metrics = verbose_metrics self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(), self.num_classes)) check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(), "text field embedding dim", "encoder input dim") # We keep calculate_span_f1 as a constructor argument for API consistency with # the CrfTagger, even it is redundant in this class # (label_encoding serves the same purpose). if calculate_span_f1 and not label_encoding: raise ConfigurationError("calculate_span_f1 is True, but " "no label_encoding was specified.") self.metrics = { "accuracy": CategoricalAccuracy(), "accuracy3": CategoricalAccuracy(top_k=3) } if calculate_span_f1 or label_encoding: self._f1_metric = SpanBasedF1Measure(vocab, tag_namespace=label_namespace, label_encoding=label_encoding) else: self._f1_metric = None initializer(self)
Example #25
Source File: tree_attention.py From ARC-Solvers with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, phrase_probability: FeedForward, edge_probability: FeedForward, premise_encoder: Seq2SeqEncoder, edge_embedding: Embedding, use_encoding_for_node: bool, ignore_edges: bool, attention_similarity: SimilarityFunction, initializer: InitializerApplicator = InitializerApplicator()) -> None: super(TreeAttention, self).__init__(vocab) self._text_field_embedder = text_field_embedder self._premise_encoder = premise_encoder self._nodes_attention = SingleTimeDistributed(MatrixAttention(attention_similarity), 0) self._num_labels = vocab.get_vocab_size(namespace="labels") self._phrase_probability = TimeDistributed(phrase_probability) self._ignore_edges = ignore_edges if not self._ignore_edges: self._num_edges = vocab.get_vocab_size(namespace="edges") self._edge_probability = TimeDistributed(edge_probability) self._edge_embedding = edge_embedding self._use_encoding_for_node = use_encoding_for_node self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #26
Source File: seq2labels_model.py From NLP_Toolkit with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, predictor_dropout=0.0, labels_namespace: str = "labels", detect_namespace: str = "d_tags", verbose_metrics: bool = False, label_smoothing: float = 0.0, confidence: float = 0.0, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(Seq2Labels, self).__init__(vocab, regularizer) self.label_namespaces = [labels_namespace, detect_namespace] self.text_field_embedder = text_field_embedder self.num_labels_classes = self.vocab.get_vocab_size(labels_namespace) self.num_detect_classes = self.vocab.get_vocab_size(detect_namespace) self.label_smoothing = label_smoothing self.confidence = confidence self.incorr_index = self.vocab.get_token_index("INCORRECT", namespace=detect_namespace) self._verbose_metrics = verbose_metrics self.predictor_dropout = TimeDistributed(torch.nn.Dropout(predictor_dropout)) self.tag_labels_projection_layer = TimeDistributed( Linear(text_field_embedder._token_embedders['bert'].get_output_dim(), self.num_labels_classes)) self.tag_detect_projection_layer = TimeDistributed( Linear(text_field_embedder._token_embedders['bert'].get_output_dim(), self.num_detect_classes)) self.metrics = {"accuracy": CategoricalAccuracy()} initializer(self)
Example #27
Source File: simple_overlap.py From scitail with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, classifier: FeedForward, initializer: InitializerApplicator = InitializerApplicator()) -> None: super(SimpleOverlap, self).__init__(vocab) self.linear_mlp = classifier self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #28
Source File: tree_attention.py From scitail with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, phrase_probability: FeedForward, edge_probability: FeedForward, premise_encoder: Seq2SeqEncoder, edge_embedding: Embedding, use_encoding_for_node: bool, ignore_edges: bool, attention_similarity: SimilarityFunction, initializer: InitializerApplicator = InitializerApplicator()) -> None: super(TreeAttention, self).__init__(vocab) self._text_field_embedder = text_field_embedder self._premise_encoder = premise_encoder self._nodes_attention = SingleTimeDistributed(MatrixAttention(attention_similarity), 0) self._num_labels = vocab.get_vocab_size(namespace="labels") self._phrase_probability = TimeDistributed(phrase_probability) self._ignore_edges = ignore_edges if not self._ignore_edges: self._num_edges = vocab.get_vocab_size(namespace="edges") self._edge_probability = TimeDistributed(edge_probability) self._edge_embedding = edge_embedding self._use_encoding_for_node = use_encoding_for_node self._accuracy = CategoricalAccuracy() self._loss = torch.nn.CrossEntropyLoss() initializer(self)
Example #29
Source File: categorical_accuracy_test.py From allennlp with Apache License 2.0 | 5 votes |
def test_incorrect_top_k_catches_exceptions(self, device: str): with pytest.raises(ConfigurationError): CategoricalAccuracy(top_k=0)
Example #30
Source File: categorical_accuracy_test.py From allennlp with Apache License 2.0 | 5 votes |
def test_top_k_and_tie_break_together_catches_exceptions(self, device: str): with pytest.raises(ConfigurationError): CategoricalAccuracy(top_k=2, tie_break=True)