Python langdetect.detect() Examples
The following are 18
code examples of langdetect.detect().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
langdetect
, or try the search function
.
Example #1
Source File: google_translator.py From B.E.N.J.I. with MIT License | 6 votes |
def google_translate(link): translator = Translator() # Google Translate API pystring = " ".join(link[1:-2]) lang = detect(pystring) if link[-1] == "english": id = "en" elif link[-1] == "spanish": id = "es" elif link[-1] == "french": id = "fr" elif link[-1] == "german": id = "de" elif link[-1] == "italian": id = "it" elif link[-1] == "portugese" or link[-1] == "portuguese": id = "pt" else: id = "en" translated = translator.translate(pystring, src=lang, dest=id) # To Translate the given language to the required language print(translated.text) # Print the translated script try: speak.say("The translated text is "+translated.text) speak.runAndWait() except: print("Error speaking, here is the translated text: {}".format(translated.text))
Example #2
Source File: reviews_preprocessor.py From yelp with GNU Lesser General Public License v2.1 | 6 votes |
def tag_reviews_language(self): print('%s: tag reviews language' % time.strftime("%Y/%m/%d-%H:%M:%S")) if os.path.exists(Constants.LANGUAGE_RECORDS_FILE): print('Records have already been tagged with language field') self.records = \ ETLUtils.load_json_file(Constants.LANGUAGE_RECORDS_FILE) return DetectorFactory.seed = 0 for record in self.records: try: language = langdetect.detect(record[Constants.TEXT_FIELD]) except LangDetectException: language = 'unknown' record[Constants.LANGUAGE_FIELD] = language ETLUtils.save_json_file(Constants.LANGUAGE_RECORDS_FILE, self.records)
Example #3
Source File: chatbot.py From messenger-maid-chan with MIT License | 5 votes |
def get_response(self, query): try: probable_language = detect(query) except LangDetectException: probable_language = "" logging.info("Probable language is {}".format(probable_language)) return self.get_response_from_chatbot(query, probable_language)
Example #4
Source File: text.py From MLPrimitives with MIT License | 5 votes |
def _remove_stopwords(self, text): if text == '': return text if self.language_code: language_code = self.language_code elif self.language == 'multi': language_code = langdetect.detect(text) sw = self.get_stopwords(language_code) return ' '.join(word for word in text.split() if word not in sw)
Example #5
Source File: text.py From MLPrimitives with MIT License | 5 votes |
def detect_language(texts): texts = pd.Series(texts) language_codes = texts.apply(langdetect.detect).value_counts() return language_codes.index[0]
Example #6
Source File: extractor.py From metadoc with MIT License | 5 votes |
def detect_language(self): """Langdetect is non-deterministic, so to achieve a higher probability we attempt detection multiple times and only report success if we get identical results. """ if self.language: return try: nondet_attempts = [detect(self.fulltext) for i in range(0,2)] is_unique = len(set(nondet_attempts)) == 1 self.language = nondet_attempts[0] if is_unique else False except: pass
Example #7
Source File: preprocessing.py From open-solution-toxic-comments with MIT License | 5 votes |
def _safe_detect(text): try: lang = detect(text) except Exception: lang = 'en' return lang
Example #8
Source File: rebuild_comments.py From mehdix.ir with Creative Commons Zero v1.0 Universal | 5 votes |
def detect_language(text): from langdetect import DetectorFactory, detect # Stay consistent between builds DetectorFactory.seed = 0 return detect(text)
Example #9
Source File: translator.py From pyconjpbot with MIT License | 5 votes |
def translate(message, cmd, option, text): """ 指定した文字列を翻訳する http://docs.microsofttranslator.com/text-translate.html#!/default/get_Translate """ if text in ('help', 'list'): return lang = 'ja' if option: # 指定した言語に翻訳する _, lang = option.split('-', 1) elif detect(text) in ('ja', 'ko'): # 漢字が多いと日本語なのに ko と判定される # 日本語の場合は英語に翻訳する lang = 'en' url = API_BASE_URL + 'Translate' headers = { 'Ocp-Apim-Subscription-Key': settings.TRANSLATOR_API_KEY, } query = { 'to': lang, 'text': text, } r = requests.get(url, headers=headers, params=query) if r.status_code == 400: # エラーが発生したので内容を表示する error_message = r.text if "Message: 'to' must be a valid language" in error_message: botsend(message, '`{}` は無効な言語です'.format(lang)) else: botsend(message, 'エラーが発生しました\n```\n{}\n```'.format(r.text)) return tree = ET.fromstring(r.text) botsend(message, tree.text)
Example #10
Source File: Utilities.py From delft with Apache License 2.0 | 5 votes |
def detect_lang(x): try: language = langdetect.detect(x) except: language = 'unk' return language # language detection with textblob package
Example #11
Source File: clean_chinese.py From nanigonet with MIT License | 5 votes |
def main(): parent_dir = TRAIN_DIR / 'cmn' for target_file in ['tatoeba.txt', 'w2c.txt']: hant_file = open(TRAIN_DIR / 'cmn-hant' / target_file, mode='w') hans_file = open(TRAIN_DIR / 'cmn-hans' / target_file, mode='w') with open(parent_dir / target_file) as f: for line in f: text = line.rstrip() if not text: continue try: lang = langdetect.detect(text) except langdetect.lang_detect_exception.LangDetectException: continue if lang in {'zh-tw', 'ko'}: text = HanziConv.toTraditional(text) hant_file.write(text) hant_file.write('\n') elif lang == 'zh-cn': text = HanziConv.toSimplified(text) hans_file.write(text) hans_file.write('\n') hant_file.close() hans_file.close()
Example #12
Source File: clean_data.py From UNMT-SPR with MIT License | 5 votes |
def main(args): with open(args.input[0], "r", errors='ignore') as frs: with open(args.input[1], "r", errors='ignore') as frt: data1 = [line for line in frs] data2 = [line for line in frt] if len(data1) != len(data2): print(len(data1), len(data2)) raise ValueError("length of two files are not equal") src_lang = args.lang[0] trg_lang = args.lang[1] with open(args.input[0] + "." + args.suffix, "w") as fws: with open(args.input[1] + "." + args.suffix, "w") as fwt: for i in range(len(data1)): if i % 10000 == 0: print(i, end = '\r') try: line1 = data1[i].replace("@@ ", "") line2 = data2[i].replace("@@ ", "") length1 = len(line1.split(' ')) length2 = len(line2.split(' ')) if length1 / length2 < 0.7 or length2 / length1 < 0.7: continue if detect(line1) != src_lang or detect(line2) != trg_lang: continue fws.write(data1[i]) fwt.write(data2[i]) except: continue
Example #13
Source File: text_lang_detect_transformer.py From driverlessai-recipes with Apache License 2.0 | 5 votes |
def detectLanguageAndEncode(s): # mod = importlib.import_module("langdetect") # detect_method = getattr(mod, "detect") # code = detect_method(s) from langdetect import detect code = detect(s) code_index = TextLangDetectTransformer.language_codes.index( code) if code in TextLangDetectTransformer.language_codes else -1 return code_index
Example #14
Source File: translation.py From pst-extraction with Apache License 2.0 | 5 votes |
def language(text, override_language=None): if override_language: return override_language try: return detect(text) except LangDetectException: return 'en'
Example #15
Source File: simple_cipher_cracker.py From Awesome-Scripts with MIT License | 4 votes |
def main(): sentences = {} ignores = '' ciphered_sentance = input('Enter the ciphered sentence: ') answer = 0 while answer is not 4: try: print( f''' Enter the numbers you want to ignore in the deciphering process: 0. Ignore small letters. {SMALL_ALPHABEIT} 1. Ignore capital letters. {CAPITAL_ALPHABEIT} 2. Ignore numbers. {NUMBERS} 3. Custom symbols. Currently ignoring: {''.join(set(ignores))} Press Return to continue. ''' ) answer = input('Enter your choice: ') if answer is '': # Check if its the exit sign(Nothing) print('Deciphering...') break ignores += ANSWERS[int(answer)] except ValueError: # If its not an integer print('Invalid choice.') except KeyError as e: # If its not form the ANSWERS dictionary if e.args[0] == 3: # Check if its a custom ignore custom_ignore = input('Enter the symbols to ignore: ') ignores += custom_ignore else: print('Invalid choice.') ignore = ''.join(set([SPECIAL, ignores])) # Get all the possible combinations for key in range(1, 26): sentences[key] = decipher(ciphered_sentance, key, ignore) # Check if it thinks its English or not, print if it thinks found = 0 for key in sentences.keys(): try: if detect(sentences[key]) == 'en': # Check to see if it thinks its English found += 1 print(f'Maybe {key} is a possibility because I got: "{sentences[key]}"') except Exception: pass # Tell the user if it didn't find anything if found == 0: print('I didn\'t find anything...') print('Do you want to see every possibility? [y/n]') answer = input('') if answer == 'y': for key in sentences.keys(): print(f'{key}: {sentences[key]}')
Example #16
Source File: lang_detect_extractor.py From news-please with Apache License 2.0 | 4 votes |
def _language(self, item): """Returns the language of the extracted article by analyzing metatags and inspecting the visible text with langdetect""" response = item['spider_response'].body root = html.fromstring(response) # Check for lang-attributes lang = root.get('lang') if lang is None: lang = root.get('xml:lang') # Check for general meta tags if lang is None: meta = root.cssselect('meta[name="language"]') if len(meta) > 0: lang = meta[0].get('content') # Check for open graph tags if lang is None: meta = root.cssselect('meta[property="og:locale"]') if len(meta) > 0: lang = meta[0].get('content') # Look for <article> elements and inspect the one with the largest payload with langdetect if lang is None: article_list = [] for article in root.xpath('//article'): article_list.append(re.sub(r'\s+', ' ', article.text_content().strip())) if len(article_list) > 0: lang = detect(max(article_list)) # Analyze the whole body with langdetect if lang is None: try: lang = detect(root.text_content().strip()) except LangDetectException: pass # Try to normalize output if lang is not None: # First search for suitable locale in the original output matches = self.langcode_pattern.search(lang) if matches is not None: lang = matches.group(0) else: # If no match was found, normalize the original output and search again normalized = locale.normalize(re.split(r'\s|;|,', lang.strip())[0]) matches = self.langcode_pattern.search(normalized) if matches is not None: lang = matches.group(0) return lang
Example #17
Source File: precendent_directory_cleaner.py From JusticeAI with MIT License | 4 votes |
def remove_files(directory_path): """ removes precedents that matches the regex list or are written in english :param directory_path: directory where the precedents are located :return: (names of files removed due to regex match, names of files that were in english) """ files_matching_regexes = [] files_in_english = [] files_parse = 0 nb_of_files = len(os.listdir(directory_path)) Log.write('Filtering precedents') for filename in os.listdir(directory_path): percent = float(files_parse / nb_of_files) * 100 stdout.write("\rINFO: Filtering: %f " % percent + "%") stdout.flush() files_parse += 1 if filename.endswith(".txt"): precedent_file = open(directory_path + filename, "r", encoding="ISO-8859-1") file_removed = False # remove precedents that matches regexes for line in precedent_file.readlines(): for reg in regexes: if reg.search(line): os.remove(directory_path + filename) file_removed = True files_matching_regexes.append(filename) break if file_removed: break if file_removed: precedent_file.close() continue # remove english precedents precedent_file.seek(0) file_content = precedent_file.read() if detect(file_content) == 'en': os.remove(directory_path + filename) files_in_english.append(filename) precedent_file.close() print('') Log.write('Done filtering precedents') Log.write('Removed {} file in english'.format(str(len(files_in_english)))) Log.write('Removed {} files without value'.format(str(len(files_matching_regexes)))) return files_in_english, files_matching_regexes
Example #18
Source File: util.py From linkedevents with MIT License | 4 votes |
def separate_scripts(text, scripts): """ Takes in a string and an iterable of language tags and returns an array of string paragraphs separated by language. The first language in scripts is the default. The paragraphs may be either html (separated by <p> or triple <br><br><br> tags) or text (separated by \n or dash). :param text: The plain text or html to separate paragraphs in by language. :param scripts: Iterable of allowed languages. :return: """ # separate the text by paragraphs, matching to select html and plain text delimiters in data paragraphs = re.split(r'(</p><p>|\n|</p>|<p>| – |<br><br><br>)+', text) separated = {script: '' for script in scripts} # the first language given is the default one last_language = scripts[0] last_paragraph = '' for paragraph in paragraphs: if paragraph in (r'</p><p>', r'</p>' r'\n', r'<p>', r'<br><br><br>'): # skip paragraph breaks to prevent misdetection separated[last_language] += paragraph last_paragraph = paragraph continue # replace any misleading tags left paragraph_stripped = re.sub(r'(<(/)?strong>)|(<br>)+|&|<a href=.*">|</a>', ' ', paragraph) try: language = detect(paragraph_stripped) except LangDetectException: # an exception means no language could be detected language = last_language # langdetect maps "Simplified Chinese" to "zh-cn" # However, we store it as "zh_hans" if language == "zh-cn": language = "zh_hans" if language not in scripts: # only detect allowed languages, no exceptions language = last_language if language != last_language: # fix html paragraph breaks after language change logger.debug('supported language detected: ' + language) if last_paragraph in (r'</p><p>', r'</p>', r'<p>'): separated[last_language] = re.sub(r'<p>$', '', separated[last_language]) separated[language] += r'<p>' # remove useless dashes after language change if last_paragraph in (r' – ',): separated[last_language] = re.sub(r' – $', '', separated[last_language]) # replace the awful triple-<br> if last_paragraph in (r'<br><br><br>',): separated[last_language] = re.sub(r'<br><br><br>$', '', separated[last_language]) separated[last_language] += r'</p>' separated[language] += r'<p>' separated[language] += paragraph last_language = language last_paragraph = paragraph return separated