Python text.symbols.symbols() Examples
The following are 26
code examples of text.symbols.symbols().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
text.symbols
, or try the search function
.
Example #1
Source File: Models.py From FastSpeech with MIT License | 6 votes |
def __init__(self, n_src_vocab=len(symbols)+1, len_max_seq=hp.max_sep_len, d_word_vec=hp.word_vec_dim, n_layers=hp.encoder_n_layer, n_head=hp.encoder_head, d_k=64, d_v=64, d_model=hp.word_vec_dim, d_inner=hp.encoder_conv1d_filter_size, dropout=hp.dropout): super(Encoder, self).__init__() n_position = len_max_seq + 1 self.src_word_emb = nn.Embedding( n_src_vocab, d_word_vec, padding_idx=Constants.PAD) self.position_enc = nn.Embedding.from_pretrained( get_sinusoid_encoding_table(n_position, d_word_vec, padding_idx=0), freeze=True) self.layer_stack = nn.ModuleList([FFTBlock( d_model, d_inner, n_head, d_k, d_v, dropout=dropout) for _ in range(n_layers)])
Example #2
Source File: __init__.py From Tacotron-Wavenet-Vocoder-Korean with MIT License | 5 votes |
def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
Example #3
Source File: __init__.py From Tacotron2-Wavenet-Korean-TTS with MIT License | 5 votes |
def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
Example #4
Source File: __init__.py From Tacotron2-Wavenet-Korean-TTS with MIT License | 5 votes |
def _text_to_sequence(text, cleaner_names, as_token): '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text ''' sequence = [] # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) # Append EOS token sequence.append(_symbol_to_id[EOS]) # [14, 29, 45, 2, 27, 62, 20, 21, 4, 39, 45, 1] if as_token: return sequence_to_text(sequence, combine_jamo=True) else: return np.array(sequence, dtype=np.int32)
Example #5
Source File: __init__.py From Tacotron2-Wavenet-Korean-TTS with MIT License | 5 votes |
def convert_to_en_symbols(): '''Converts built-in korean symbols to english, to be used for english training ''' global _symbol_to_id, _id_to_symbol, isEn if not isEn: print(" [!] Converting to english mode") _symbol_to_id = {s: i for i, s in enumerate(en_symbols)} _id_to_symbol = {i: s for i, s in enumerate(en_symbols)} isEn=True
Example #6
Source File: __init__.py From Tacotron2-PyTorch with MIT License | 5 votes |
def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
Example #7
Source File: __init__.py From Tacotron2-PyTorch with MIT License | 5 votes |
def text_to_sequence(text, cleaner_names): '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text ''' sequence = [] # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) return sequence
Example #8
Source File: __init__.py From FastSpeech with MIT License | 5 votes |
def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
Example #9
Source File: __init__.py From FastSpeech with MIT License | 5 votes |
def text_to_sequence(text, cleaner_names): '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text ''' sequence = [] # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence( _clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) return sequence
Example #10
Source File: __init__.py From tacotron with MIT License | 5 votes |
def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
Example #11
Source File: __init__.py From tacotron with MIT License | 5 votes |
def text_to_sequence(text, cleaner_names): '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text ''' sequence = [] # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) # Append EOS token sequence.append(_symbol_to_id['~']) return sequence
Example #12
Source File: __init__.py From LightSpeech with MIT License | 5 votes |
def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
Example #13
Source File: __init__.py From LightSpeech with MIT License | 5 votes |
def text_to_sequence(text, cleaner_names): '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text ''' sequence = [] # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence( _clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) return sequence
Example #14
Source File: network.py From Tacotron-pytorch with Apache License 2.0 | 5 votes |
def __init__(self, embedding_size): """ :param embedding_size: dimension of embedding """ super(Encoder, self).__init__() self.embedding_size = embedding_size self.embed = nn.Embedding(len(symbols), embedding_size) self.prenet = Prenet(embedding_size, hp.hidden_size * 2, hp.hidden_size) self.cbhg = CBHG(hp.hidden_size)
Example #15
Source File: __init__.py From Tacotron-Wavenet-Vocoder-Korean with MIT License | 5 votes |
def _text_to_sequence(text, cleaner_names, as_token): '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text ''' sequence = [] # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) # Append EOS token sequence.append(_symbol_to_id[EOS]) # [14, 29, 45, 2, 27, 62, 20, 21, 4, 39, 45, 1] if as_token: return sequence_to_text(sequence, combine_jamo=True) else: return np.array(sequence, dtype=np.int32)
Example #16
Source File: __init__.py From Tacotron-Wavenet-Vocoder-Korean with MIT License | 5 votes |
def convert_to_en_symbols(): '''Converts built-in korean symbols to english, to be used for english training ''' global _symbol_to_id, _id_to_symbol, isEn if not isEn: print(" [!] Converting to english mode") _symbol_to_id = {s: i for i, s in enumerate(en_symbols)} _id_to_symbol = {i: s for i, s in enumerate(en_symbols)} isEn=True
Example #17
Source File: __init__.py From MelNet with MIT License | 5 votes |
def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
Example #18
Source File: __init__.py From MelNet with MIT License | 5 votes |
def _text_to_sequence(text, cleaner_names, as_token): '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text ''' sequence = [] # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) # Append EOS token sequence.append(_symbol_to_id[EOS]) # [14, 29, 45, 2, 27, 62, 20, 21, 4, 39, 45, 1] if as_token: return sequence_to_text(sequence, combine_jamo=True) else: return np.array(sequence, dtype=np.int32)
Example #19
Source File: __init__.py From MelNet with MIT License | 5 votes |
def convert_to_en_symbols(): '''Converts built-in korean symbols to english, to be used for english training ''' global _symbol_to_id, _id_to_symbol, isEn if not isEn: print(" [!] Converting to english mode") _symbol_to_id = {s: i for i, s in enumerate(en_symbols)} _id_to_symbol = {i: s for i, s in enumerate(en_symbols)} isEn=True
Example #20
Source File: __init__.py From arabic-tacotron-tts with MIT License | 5 votes |
def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
Example #21
Source File: __init__.py From arabic-tacotron-tts with MIT License | 5 votes |
def text_to_sequence(text, cleaner_names): '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text ''' sequence = [] # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) # Append EOS token sequence.append(_symbol_to_id['~']) return sequence
Example #22
Source File: module.py From Transformer-TTS with MIT License | 5 votes |
def __init__(self, embedding_size, num_hidden): super(EncoderPrenet, self).__init__() self.embedding_size = embedding_size self.embed = nn.Embedding(len(symbols), embedding_size, padding_idx=0) self.conv1 = Conv(in_channels=embedding_size, out_channels=num_hidden, kernel_size=5, padding=int(np.floor(5 / 2)), w_init='relu') self.conv2 = Conv(in_channels=num_hidden, out_channels=num_hidden, kernel_size=5, padding=int(np.floor(5 / 2)), w_init='relu') self.conv3 = Conv(in_channels=num_hidden, out_channels=num_hidden, kernel_size=5, padding=int(np.floor(5 / 2)), w_init='relu') self.batch_norm1 = nn.BatchNorm1d(num_hidden) self.batch_norm2 = nn.BatchNorm1d(num_hidden) self.batch_norm3 = nn.BatchNorm1d(num_hidden) self.dropout1 = nn.Dropout(p=0.2) self.dropout2 = nn.Dropout(p=0.2) self.dropout3 = nn.Dropout(p=0.2) self.projection = Linear(num_hidden, num_hidden)
Example #23
Source File: __init__.py From Transformer-TTS with MIT License | 5 votes |
def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
Example #24
Source File: __init__.py From Transformer-TTS with MIT License | 5 votes |
def text_to_sequence(text, cleaner_names): '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text ''' sequence = [] # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) # Append EOS token sequence.append(_symbol_to_id['~']) return sequence
Example #25
Source File: __init__.py From vae_tacotron with MIT License | 5 votes |
def _symbols_to_sequence(symbols): return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
Example #26
Source File: __init__.py From vae_tacotron with MIT License | 5 votes |
def text_to_sequence(text, cleaner_names): '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. The text can optionally have ARPAbet sequences enclosed in curly braces embedded in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street." Args: text: string to convert to a sequence cleaner_names: names of the cleaner functions to run the text through Returns: List of integers corresponding to the symbols in the text ''' sequence = [] # Check for curly braces and treat their contents as ARPAbet: while len(text): m = _curly_re.match(text) if not m: sequence += _symbols_to_sequence(_clean_text(text, cleaner_names)) break sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names)) sequence += _arpabet_to_sequence(m.group(2)) text = m.group(3) # Append EOS token sequence.append(_symbol_to_id['~']) return sequence