Python pytorch_pretrained_bert.modeling.BertModel() Examples

The following are 30 code examples of pytorch_pretrained_bert.modeling.BertModel(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module pytorch_pretrained_bert.modeling , or try the search function .
Example #1
Source File: seq2seq_bert_encoder.py    From stog with MIT License 6 votes vote down vote up
def forward(self, input_ids,
                token_type_ids=None,
                attention_mask=None,
                output_all_encoded_layers=True,
                token_subword_index=None):
        """
        :param input_ids: same as it in BertModel
        :param token_type_ids: same as it in BertModel
        :param attention_mask: same as it in BertModel
        :param output_all_encoded_layers: same as it in BertModel
        :param token_subword_index: [batch_size, num_tokens, num_subwords]
        :return:
        """
        # encoded_layers: [batch_size, num_subword_pieces, hidden_size]
        encoded_layers, pooled_output = super(Seq2SeqBertEncoder, self).forward(
            input_ids, token_type_ids, attention_mask, output_all_encoded_layers)
        if token_subword_index is None:
            return encoded_layers, pooled_output
        else:
            return self.average_pooling(encoded_layers, token_subword_index), pooled_output 
Example #2
Source File: seq2seq_bert_encoder.py    From gtos with MIT License 6 votes vote down vote up
def forward(self, input_ids,
                token_type_ids=None,
                attention_mask=None,
                output_all_encoded_layers=True,
                token_subword_index=None):
        """
        :param input_ids: same as it in BertModel
        :param token_type_ids: same as it in BertModel
        :param attention_mask: same as it in BertModel
        :param output_all_encoded_layers: same as it in BertModel
        :param token_subword_index: [batch_size, num_tokens, num_subwords]
        :return:
        """
        # encoded_layers: [batch_size, num_subword_pieces, hidden_size]
        encoded_layers, pooled_output = super(Seq2SeqBertEncoder, self).forward(
            input_ids, token_type_ids, attention_mask, output_all_encoded_layers)
        if token_subword_index is None:
            return encoded_layers, pooled_output
        else:
            return self.average_pooling(encoded_layers, token_subword_index), pooled_output 
Example #3
Source File: net.py    From CAIL2019 with MIT License 6 votes vote down vote up
def __init__(self, config):
        super(BertCNNForTripletNet, self).__init__(config)

        filters = [3, 4, 5]

        self.bert = BertModel(config)
        self.embedding_dropout = SpatialDropout1D(config.hidden_dropout_prob)

        self.conv_layers = nn.ModuleList()
        for filter_size in filters:
            conv_block = nn.Sequential(
                nn.Conv1d(
                    config.hidden_size,
                    CHANNEL_UNITS,
                    kernel_size=filter_size,
                    padding=1,
                ),
                # nn.BatchNorm1d(CHANNEL_UNITS),
                # nn.ReLU(inplace=True),
            )
            self.conv_layers.append(conv_block)

        self.apply(self.init_bert_weights) 
Example #4
Source File: CailModel.py    From cail2019 with Apache License 2.0 6 votes vote down vote up
def __init__(self, config, answer_verification=True, hidden_dropout_prob=0.3):
        super(CailModel, self).__init__(config)
        self.bert = BertModel(config)
        # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
        # self.qa_dropout = nn.Dropout(config.hidden_dropout_prob)
        self.qa_outputs = nn.Linear(config.hidden_size*4, 2)
        self.apply(self.init_bert_weights)
        self.answer_verification = answer_verification
        if self.answer_verification:
            self.retionale_outputs = nn.Linear(config.hidden_size*4, 1)
            self.unk_ouputs = nn.Linear(config.hidden_size, 1)
            self.doc_att = nn.Linear(config.hidden_size*4, 1)
            self.yes_no_ouputs = nn.Linear(config.hidden_size*4, 2)
            self.ouputs_cls_3 = nn.Linear(config.hidden_size*4, 3)

            self.beta = 100
        else:
            # self.unk_yes_no_outputs_dropout = nn.Dropout(config.hidden_dropout_prob)
            self.unk_yes_no_outputs = nn.Linear(config.hidden_size, 3) 
Example #5
Source File: run_ner.py    From bert-ner with MIT License 5 votes vote down vote up
def __init__(self, config, num_labels):
        super(BertForNER, self).__init__(config)
        self.num_labels = num_labels
        self.bert = BertModel(config)
        self.dropout = torch.nn.Dropout(0.4)
        self.hidden2label = torch.nn.Linear(config.hidden_size, num_labels)
        self.apply(self.init_bert_weights) 
Example #6
Source File: net.py    From CAIL2019 with MIT License 5 votes vote down vote up
def __init__(self, config):
        super(BertForTripletNet, self).__init__(config)

        self.bert = BertModel(config)
        self.apply(self.init_bert_weights) 
Example #7
Source File: net.py    From CAIL2019 with MIT License 5 votes vote down vote up
def __init__(self, config):
        super(BertNormForTripletNet, self).__init__(config)

        self.bert = BertModel(config)
        self.norm = nn.BatchNorm1d(config.hidden_size)
        self.apply(self.init_bert_weights) 
Example #8
Source File: net.py    From CAIL2019 with MIT License 5 votes vote down vote up
def __init__(self, config):
        super(BertPoolForTripletNet, self).__init__(config)

        self.bert = BertModel(config)
        self.apply(self.init_bert_weights) 
Example #9
Source File: net.py    From CAIL2019 with MIT License 5 votes vote down vote up
def __init__(self, config):
        super(BertEmbeddingForTripletNet, self).__init__(config)

        self.bert = BertModel(config)
        self.apply(self.init_bert_weights) 
Example #10
Source File: net.py    From CAIL2019 with MIT License 5 votes vote down vote up
def __init__(self, config):
        super(BertTwoForTripletNet, self).__init__(config)

        self.bert = BertModel(config)
        self.bert2 = BertModel(config)
        self.apply(self.init_bert_weights) 
Example #11
Source File: net.py    From CAIL2019 with MIT License 5 votes vote down vote up
def __init__(self, config, num_fts=19):
        super(BertFtsForTripletNet, self).__init__(config)

        self.bert = BertModel(config)
        self.fc = nn.Linear(num_fts, 19)
        self.apply(self.init_bert_weights) 
Example #12
Source File: net.py    From CAIL2019 with MIT License 5 votes vote down vote up
def __init__(self, config):
        super(BertLSTMGRUForTripletNet, self).__init__(config)

        self.bert = BertModel(config)
        # self.embedding_dropout = SpatialDropout1D(config.hidden_dropout_prob)

        self.lstm = nn.LSTM(
            config.hidden_size, LSTM_UNITS, bidirectional=True, batch_first=True
        )
        self.gru = nn.GRU(
            LSTM_UNITS * 2, LSTM_UNITS, bidirectional=True, batch_first=True
        )
        self.apply(self.init_bert_weights) 
Example #13
Source File: net.py    From CAIL2019 with MIT License 5 votes vote down vote up
def __init__(self, config):
        super(BertLSTMForTripletNet, self).__init__(config)

        self.bert = BertModel(config)

        self.lstm = nn.LSTM(
            config.hidden_size, 30, bidirectional=True, batch_first=True
        )
        self.apply(self.init_bert_weights) 
Example #14
Source File: san.py    From MT-DNN with MIT License 5 votes vote down vote up
def __init__(
        self,
        init_checkpoint_model: Union[BertModel, FairseqRobertModel],
        pooler,
        config: MTDNNConfig,
    ):
        super(SANBERTNetwork, self).__init__()
        self.config = config
        self.bert = init_checkpoint_model
        self.pooler = pooler
        self.dropout_list = nn.ModuleList()
        self.encoder_type = config.encoder_type
        self.hidden_size = self.config.hidden_size

        # Dump other features if value is set to true
        if config.dump_feature:
            return

        # Update bert parameters
        if config.update_bert_opt > 0:
            for param in self.bert.parameters():
                param.requires_grad = False

        # Set decoder and scoring list parameters
        self.decoder_opts = config.decoder_opts
        self.scoring_list = nn.ModuleList()

        # Set task specific paramaters
        self.task_types = config.task_types
        self.task_dropout_p = config.tasks_dropout_p
        self.tasks_nclass_list = config.tasks_nclass_list

        # TODO - Move to training
        # Generate tasks decoding and scoring lists
        self._generate_tasks_decoding_scoring_options()

        # Initialize weights

        # self._my_init() 
Example #15
Source File: model.py    From neutralizing-bias with MIT License 5 votes vote down vote up
def __init__(self, config, cls_num_labels=2, tok_num_labels=2, tok2id=None):
        super(BertForMultitaskWithFeaturesOnTop, self).__init__(config)
        global ARGS
        
        self.bert = BertModel(config)
        
        self.featurizer = features.Featurizer(
            tok2id, lexicon_feature_bits=ARGS.lexicon_feature_bits) 
        # TODO -- don't hardcode this...
        nfeats = 90 if ARGS.lexicon_feature_bits == 1 else 118

        if ARGS.extra_features_method == 'concat':
            self.tok_classifier = ConcatCombine(
                config.hidden_size, nfeats, tok_num_labels, 
                ARGS.combiner_layers, config.hidden_dropout_prob,
                ARGS.small_waist, pre_enrich=ARGS.pre_enrich,
                activation=ARGS.activation_hidden,
                include_categories=ARGS.concat_categories,
                category_emb=ARGS.category_emb,
                add_category_emb=ARGS.add_category_emb)
        else:
            self.tok_classifier = AddCombine(
                config.hidden_size, nfeats, ARGS.combiner_layers,
                config.hidden_dropout_prob, ARGS.small_waist,
                out_dim=tok_num_labels, pre_enrich=ARGS.pre_enrich,
                include_categories=ARGS.concat_categories,
                category_emb=ARGS.category_emb,
                add_category_emb=ARGS.add_category_emb)

        self.cls_dropout = nn.Dropout(config.hidden_dropout_prob)
        self.cls_classifier = nn.Linear(config.hidden_size, cls_num_labels)

        self.category_emb = ARGS.category_emb
        if ARGS.category_emb:
            self.category_embeddings = nn.Embedding(43, nfeats)

        self.apply(self.init_bert_weights) 
Example #16
Source File: model.py    From neutralizing-bias with MIT License 5 votes vote down vote up
def __init__(self, config, cls_num_labels=2, tok_num_labels=2, tok2id=None):
        super(BertForMultitask, self).__init__(config)
        self.bert = BertModel(config)

        self.cls_dropout = nn.Dropout(config.hidden_dropout_prob)
        self.cls_classifier = nn.Linear(config.hidden_size, cls_num_labels)
        
        self.tok_dropout = nn.Dropout(config.hidden_dropout_prob)
        self.tok_classifier = nn.Linear(config.hidden_size, tok_num_labels)
        
        self.apply(self.init_bert_weights) 
Example #17
Source File: bert_pytorch.py    From MAX-Toxic-Comment-Classifier with Apache License 2.0 5 votes vote down vote up
def __init__(self, config, num_labels=2):
        super(BertForMultiLabelSequenceClassification, self).__init__(config)
        self.num_labels = num_labels
        self.bert = BertModel(config)
        self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
        self.classifier = torch.nn.Linear(config.hidden_size, num_labels)
        self.apply(self.init_bert_weights) 
Example #18
Source File: sumbt.py    From tatk with Apache License 2.0 5 votes vote down vote up
def __init__(self, config):
        super(BertForUtteranceEncoding, self).__init__(config)

        self.config = config
        self.bert = BertModel(config) 
Example #19
Source File: run_smooth.py    From curriculum with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, config, num_labels=2):
        super(BertForSmooth, self).__init__(config)
        self.dropout = torch.nn.Dropout(0.2)
        self.bert = BertModel(config)
        self.classifier = torch.nn.Linear(config.hidden_size, num_labels)
        self.loss = torch.nn.CrossEntropyLoss(torch.FloatTensor([1.0, 12.5]))
        self.apply(self.init_bert_weights) 
Example #20
Source File: BertForLabelEncoding.py    From SUMBT with MIT License 5 votes vote down vote up
def __init__(self, config, trainable=False):
        super(BertForLabelEncoding, self).__init__(config)

        self.config = config
        self.bert = BertModel(config)
        #self.apply(self.init_bert_weights)     # don't need to perform due to pre-trained params loading

        if not trainable:
            for p in self.bert.parameters():
                p.requires_grad = False 
Example #21
Source File: bert_qa.py    From mrc-for-flat-nested-ner with Apache License 2.0 5 votes vote down vote up
def __init__(self, config):
        super(BertQA, self).__init__(config)
        self.bert = BertModel(config)

        self.qa_outputs = nn.Linear(config.hidden_size, 2)
        self.apply(self.init_bert_weights) 
Example #22
Source File: ner_model.py    From Doc2EDAG with MIT License 5 votes vote down vote up
def __init__(self, config, num_entity_labels):
        super(BertForBasicNER, self).__init__(config)
        self.bert = BertModel(config)

        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, num_entity_labels)
        self.apply(self.init_bert_weights)

        self.num_entity_labels = num_entity_labels 
Example #23
Source File: BeliefTrackerSlotQueryMultiSlot.py    From ConvLab with MIT License 5 votes vote down vote up
def __init__(self, config):
        super(BertForUtteranceEncoding, self).__init__(config)

        self.config = config
        self.bert = BertModel(config) 
Example #24
Source File: bert.py    From SemEval2019Task3 with MIT License 5 votes vote down vote up
def __init__(self, config):
        super(BERT_classifer, self).__init__(config)

        self.num_labels = NUM_EMO
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(0.1)
        self.apply(self.init_bert_weights)
        self.bert_out_dim = None
        self.out2label = None
        self.out2binary = None
        self.out2emo = None 
Example #25
Source File: probert.py    From gap with MIT License 5 votes vote down vote up
def __init__(self, config, num_labels):
        super().__init__(config)
        self.num_labels = num_labels
        self.bert = BertModel(config)

        self.pooler = BertPooler(config)

        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(1*config.hidden_size, num_labels)
        self.apply(self.init_bert_weights) 
Example #26
Source File: grep.py    From gap with MIT License 5 votes vote down vote up
def __init__(self, config, num_labels):
        super().__init__(config)
        self.num_labels = num_labels
        self.bert = BertModel(config)

        self.pooler = BertPooler(config)

        self.evidence_pooler_p = EvidencePooler(config)

        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(2 * config.hidden_size, num_labels)

        torch.nn.init.xavier_uniform_(self.classifier.weight)

        self.apply(self.init_bert_weights) 
Example #27
Source File: matcher.py    From gobbli with Apache License 2.0 5 votes vote down vote up
def __init__(self, opt, bert_config=None):
        super(SANBertNetwork, self).__init__()
        self.dropout_list = nn.ModuleList()
        self.bert_config = BertConfig.from_dict(opt)
        self.bert = BertModel(self.bert_config)
        if opt['update_bert_opt'] > 0:
            for p in self.bert.parameters():
                p.requires_grad = False
        mem_size = self.bert_config.hidden_size
        self.decoder_opt = opt['answer_opt']
        self.scoring_list = nn.ModuleList()
        labels = [int(ls) for ls in opt['label_size'].split(',')]
        task_dropout_p = opt['tasks_dropout_p']
        self.bert_pooler = None

        for task, lab in enumerate(labels):
            decoder_opt = self.decoder_opt[task]
            dropout = DropoutWrapper(task_dropout_p[task], opt['vb_dropout'])
            self.dropout_list.append(dropout)
            if decoder_opt == 1:
                out_proj = SANClassifier(mem_size, mem_size, lab, opt, prefix='answer', dropout=dropout)
                self.scoring_list.append(out_proj)
            else:
                out_proj = nn.Linear(self.bert_config.hidden_size, lab)
                self.scoring_list.append(out_proj)

        self.opt = opt
        self._my_init()
        self.set_embed(opt) 
Example #28
Source File: BeliefTrackerSlotQueryMultiSlot.py    From SUMBT with MIT License 5 votes vote down vote up
def __init__(self, config):
        super(BertForUtteranceEncoding, self).__init__(config)

        self.config = config
        self.bert = BertModel(config) 
Example #29
Source File: BeliefTrackerSlotQueryMultiSlotTransformer.py    From SUMBT with MIT License 5 votes vote down vote up
def __init__(self, config):
        super(BertForUtteranceEncoding, self).__init__(config)

        self.config = config
        self.bert = BertModel(config) 
Example #30
Source File: run_cmrc2019_baseline.py    From cmrc2019 with Creative Commons Attribution Share Alike 4.0 International 5 votes vote down vote up
def __init__(self, config):
        super(BertForQuestionAnswering, self).__init__(config)
        self.bert = BertModel(config)
        self.qa_outputs = nn.Linear(config.hidden_size, 1)
        self.apply(self.init_bert_weights)