Python allennlp.modules.ConditionalRandomField() Examples
The following are 13
code examples of allennlp.modules.ConditionalRandomField().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
allennlp.modules
, or try the search function
.
Example #1
Source File: evaluation.py From DE-CNN with MIT License | 6 votes |
def __init__(self, gen_emb, domain_emb, num_classes=3, dropout=0.5, crf=False): super(Model, self).__init__() self.gen_embedding = torch.nn.Embedding(gen_emb.shape[0], gen_emb.shape[1]) self.gen_embedding.weight=torch.nn.Parameter(torch.from_numpy(gen_emb), requires_grad=False) self.domain_embedding = torch.nn.Embedding(domain_emb.shape[0], domain_emb.shape[1]) self.domain_embedding.weight=torch.nn.Parameter(torch.from_numpy(domain_emb), requires_grad=False) self.conv1=torch.nn.Conv1d(gen_emb.shape[1]+domain_emb.shape[1], 128, 5, padding=2 ) self.conv2=torch.nn.Conv1d(gen_emb.shape[1]+domain_emb.shape[1], 128, 3, padding=1 ) self.dropout=torch.nn.Dropout(dropout) self.conv3=torch.nn.Conv1d(256, 256, 5, padding=2) self.conv4=torch.nn.Conv1d(256, 256, 5, padding=2) self.conv5=torch.nn.Conv1d(256, 256, 5, padding=2) self.linear_ae=torch.nn.Linear(256, num_classes) self.crf_flag=crf if self.crf_flag: from allennlp.modules import ConditionalRandomField self.crf=ConditionalRandomField(num_classes)
Example #2
Source File: train.py From DE-CNN with MIT License | 6 votes |
def __init__(self, gen_emb, domain_emb, num_classes=3, dropout=0.5, crf=False): super(Model, self).__init__() self.gen_embedding = torch.nn.Embedding(gen_emb.shape[0], gen_emb.shape[1]) self.gen_embedding.weight=torch.nn.Parameter(torch.from_numpy(gen_emb), requires_grad=False) self.domain_embedding = torch.nn.Embedding(domain_emb.shape[0], domain_emb.shape[1]) self.domain_embedding.weight=torch.nn.Parameter(torch.from_numpy(domain_emb), requires_grad=False) self.conv1=torch.nn.Conv1d(gen_emb.shape[1]+domain_emb.shape[1], 128, 5, padding=2 ) self.conv2=torch.nn.Conv1d(gen_emb.shape[1]+domain_emb.shape[1], 128, 3, padding=1 ) self.dropout=torch.nn.Dropout(dropout) self.conv3=torch.nn.Conv1d(256, 256, 5, padding=2) self.conv4=torch.nn.Conv1d(256, 256, 5, padding=2) self.conv5=torch.nn.Conv1d(256, 256, 5, padding=2) self.linear_ae=torch.nn.Linear(256, num_classes) self.crf_flag=crf if self.crf_flag: from allennlp.modules import ConditionalRandomField self.crf=ConditionalRandomField(num_classes)
Example #3
Source File: sentiment_modeling.py From SpanABSA with Apache License 2.0 | 6 votes |
def __init__(self, config, use_crf=False): super(BertForBIOAspectExtraction, self).__init__() self.bert = BertModel(config) self.use_crf = use_crf # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version # self.dropout = nn.Dropout(config.hidden_dropout_prob) self.affine = nn.Linear(config.hidden_size, 3) if self.use_crf: self.crf = ConditionalRandomField(3) def init_weights(module): if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=config.initializer_range) elif isinstance(module, BERTLayerNorm): module.beta.data.normal_(mean=0.0, std=config.initializer_range) module.gamma.data.normal_(mean=0.0, std=config.initializer_range) if isinstance(module, nn.Linear): module.bias.data.zero_() self.apply(init_weights)
Example #4
Source File: sentiment_modeling.py From SpanABSA with Apache License 2.0 | 6 votes |
def __init__(self, config, use_crf=False): super(BertForBIOAspectClassification, self).__init__() self.bert = BertModel(config) self.use_crf = use_crf # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version # self.dropout = nn.Dropout(config.hidden_dropout_prob) self.affine = nn.Linear(config.hidden_size, 5) if self.use_crf: self.crf = ConditionalRandomField(5) def init_weights(module): if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=config.initializer_range) elif isinstance(module, BERTLayerNorm): module.beta.data.normal_(mean=0.0, std=config.initializer_range) module.gamma.data.normal_(mean=0.0, std=config.initializer_range) if isinstance(module, nn.Linear): module.bias.data.zero_() self.apply(init_weights)
Example #5
Source File: sentiment_modeling.py From SpanABSA with Apache License 2.0 | 6 votes |
def __init__(self, config, use_crf=False): super(BertForJointBIOExtractAndClassification, self).__init__() self.bert = BertModel(config) self.use_crf = use_crf # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version # self.dropout = nn.Dropout(config.hidden_dropout_prob) self.bio_affine = nn.Linear(config.hidden_size, 3) self.cls_affine = nn.Linear(config.hidden_size, 5) if self.use_crf: self.cls_crf = ConditionalRandomField(5) def init_weights(module): if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=config.initializer_range) elif isinstance(module, BERTLayerNorm): module.beta.data.normal_(mean=0.0, std=config.initializer_range) module.gamma.data.normal_(mean=0.0, std=config.initializer_range) if isinstance(module, nn.Linear): module.bias.data.zero_() self.apply(init_weights)
Example #6
Source File: sentiment_modeling.py From SpanABSA with Apache License 2.0 | 6 votes |
def __init__(self, config, use_crf=False): super(BertForCollapsedBIOAspectExtractionAndClassification, self).__init__() self.bert = BertModel(config) self.use_crf = use_crf # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version # self.dropout = nn.Dropout(config.hidden_dropout_prob) self.affine = nn.Linear(config.hidden_size, 7) if self.use_crf: self.crf = ConditionalRandomField(7) def init_weights(module): if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=config.initializer_range) elif isinstance(module, BERTLayerNorm): module.beta.data.normal_(mean=0.0, std=config.initializer_range) module.gamma.data.normal_(mean=0.0, std=config.initializer_range) if isinstance(module, nn.Linear): module.bias.data.zero_() self.apply(init_weights)
Example #7
Source File: lstm_crf.py From allennlp_tutorial with MIT License | 6 votes |
def __init__(self, vocab: Vocabulary, embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder) -> None: super().__init__(vocab) self._embedder = embedder self._encoder = encoder self._classifier = torch.nn.Linear( in_features=encoder.get_output_dim(), out_features=vocab.get_vocab_size('labels') ) self._crf = ConditionalRandomField( vocab.get_vocab_size('labels') ) self._f1 = SpanBasedF1Measure(vocab, 'labels')
Example #8
Source File: pico_crf_tagger.py From scibert with Apache License 2.0 | 5 votes |
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, include_start_end_transitions: bool = True, dropout: Optional[float] = None, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super().__init__(vocab, regularizer) self.label_namespace = 'labels' self.num_tags = self.vocab.get_vocab_size(self.label_namespace) # encode text self.text_field_embedder = text_field_embedder self.encoder = encoder self.dropout = torch.nn.Dropout(dropout) if dropout else None # crf output_dim = self.encoder.get_output_dim() self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_tags)) self.crf = ConditionalRandomField(self.num_tags, constraints=None, include_start_end_transitions=include_start_end_transitions) self.metrics = { "accuracy": CategoricalAccuracy(), "accuracy3": CategoricalAccuracy(top_k=3) } for index, label in self.vocab.get_index_to_token_vocabulary(self.label_namespace).items(): self.metrics['F1_' + label] = F1Measure(positive_label=index) initializer(self)
Example #9
Source File: conditional_random_field_test.py From allennlp with Apache License 2.0 | 5 votes |
def setup_method(self): super().setup_method() self.logits = torch.Tensor( [ [[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3, 0.1], [0, 0, 0.9, 10, 1]], [[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, 0.3, 0.1], [0, 0, 0.9, 1, 1]], ] ) self.tags = torch.LongTensor([[2, 3, 4], [3, 2, 2]]) self.transitions = torch.Tensor( [ [0.1, 0.2, 0.3, 0.4, 0.5], [0.8, 0.3, 0.1, 0.7, 0.9], [-0.3, 2.1, -5.6, 3.4, 4.0], [0.2, 0.4, 0.6, -0.3, -0.4], [1.0, 1.0, 1.0, 1.0, 1.0], ] ) self.transitions_from_start = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.6]) self.transitions_to_end = torch.Tensor([-0.1, -0.2, 0.3, -0.4, -0.4]) # Use the CRF Module with fixed transitions to compute the log_likelihood self.crf = ConditionalRandomField(5) self.crf.transitions = torch.nn.Parameter(self.transitions) self.crf.start_transitions = torch.nn.Parameter(self.transitions_from_start) self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)
Example #10
Source File: conditional_random_field_test.py From allennlp with Apache License 2.0 | 5 votes |
def test_constrained_viterbi_tags(self): constraints = { (0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 3), (3, 3), (3, 4), (4, 4), (4, 0), } # Add the transitions to the end tag # and from the start tag. for i in range(5): constraints.add((5, i)) constraints.add((i, 6)) crf = ConditionalRandomField(num_tags=5, constraints=constraints) crf.transitions = torch.nn.Parameter(self.transitions) crf.start_transitions = torch.nn.Parameter(self.transitions_from_start) crf.end_transitions = torch.nn.Parameter(self.transitions_to_end) mask = torch.tensor([[True, True, True], [True, True, False]]) viterbi_path = crf.viterbi_tags(self.logits, mask) # Get just the tags from each tuple of (tags, score). viterbi_tags = [x for x, y in viterbi_path] # Now the tags should respect the constraints assert viterbi_tags == [[2, 3, 3], [2, 3]]
Example #11
Source File: conditional_random_field_test.py From magnitude with MIT License | 5 votes |
def setUp(self): super(TestConditionalRandomField, self).setUp() self.logits = torch.Tensor([ [[0, 0, .5, .5, .2], [0, 0, .3, .3, .1], [0, 0, .9, 10, 1]], [[0, 0, .2, .5, .2], [0, 0, 3, .3, .1], [0, 0, .9, 1, 1]], ]) self.tags = torch.LongTensor([ [2, 3, 4], [3, 2, 2] ]) self.transitions = torch.Tensor([ [0.1, 0.2, 0.3, 0.4, 0.5], [0.8, 0.3, 0.1, 0.7, 0.9], [-0.3, 2.1, -5.6, 3.4, 4.0], [0.2, 0.4, 0.6, -0.3, -0.4], [1.0, 1.0, 1.0, 1.0, 1.0] ]) self.transitions_from_start = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.6]) self.transitions_to_end = torch.Tensor([-0.1, -0.2, 0.3, -0.4, -0.4]) # Use the CRF Module with fixed transitions to compute the log_likelihood self.crf = ConditionalRandomField(5) self.crf.transitions = torch.nn.Parameter(self.transitions) self.crf.start_transitions = torch.nn.Parameter(self.transitions_from_start) self.crf.end_transitions = torch.nn.Parameter(self.transitions_to_end)
Example #12
Source File: conditional_random_field_test.py From magnitude with MIT License | 5 votes |
def test_constrained_viterbi_tags(self): constraints = set([(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 3), (3, 3), (3, 4), (4, 4), (4, 0)]) # Add the transitions to the end tag # and from the start tag. for i in range(5): constraints.add((5, i)) constraints.add((i, 6)) crf = ConditionalRandomField(num_tags=5, constraints=constraints) crf.transitions = torch.nn.Parameter(self.transitions) crf.start_transitions = torch.nn.Parameter(self.transitions_from_start) crf.end_transitions = torch.nn.Parameter(self.transitions_to_end) mask = torch.LongTensor([ [1, 1, 1], [1, 1, 0] ]) viterbi_path = crf.viterbi_tags(self.logits, mask) # Get just the tags from each tuple of (tags, score). viterbi_tags = [x for x, y in viterbi_path] # Now the tags should respect the constraints assert viterbi_tags == [ [2, 3, 3], [2, 3] ]
Example #13
Source File: crf_tagger.py From magnitude with MIT License | 4 votes |
def __init__(self, vocab , text_field_embedder , encoder , label_namespace = u"labels", constraint_type = None, feedforward = None, include_start_end_transitions = True, dropout = None, verbose_metrics = False, initializer = InitializerApplicator(), regularizer = None) : super(CrfTagger, self).__init__(vocab, regularizer) self.label_namespace = label_namespace self.text_field_embedder = text_field_embedder self.num_tags = self.vocab.get_vocab_size(label_namespace) self.encoder = encoder self._verbose_metrics = verbose_metrics if dropout: self.dropout = torch.nn.Dropout(dropout) else: self.dropout = None self._feedforward = feedforward if feedforward is not None: output_dim = feedforward.get_output_dim() else: output_dim = self.encoder.get_output_dim() self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_tags)) if constraint_type is not None: labels = self.vocab.get_index_to_token_vocabulary(label_namespace) constraints = allowed_transitions(constraint_type, labels) else: constraints = None self.crf = ConditionalRandomField( self.num_tags, constraints, include_start_end_transitions=include_start_end_transitions ) self.span_metric = SpanBasedF1Measure(vocab, tag_namespace=label_namespace, label_encoding=constraint_type or u"BIO") check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(), u"text field embedding dim", u"encoder input dim") if feedforward is not None: check_dimensions_match(encoder.get_output_dim(), feedforward.get_input_dim(), u"encoder output dim", u"feedforward input dim") initializer(self) #overrides