Python spacy.__version__() Examples
The following are 8
code examples of spacy.__version__().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
spacy
, or try the search function
.
Example #1
Source File: test_spacy_udpipe.py From spacy-udpipe with MIT License | 6 votes |
def test_morph_exception() -> None: assert spacy.__version__ <= SPACY_VERSION lang = RO text = "Ce mai faci?" download(lang=lang) try: nlp = load(lang=lang) assert nlp._meta["lang"] == f"udpipe_{lang}" doc = nlp(text) except ValueError: nlp = load(lang=lang, ignore_tag_map=True) assert nlp._meta["lang"] == f"udpipe_{lang}" doc = nlp(text) assert doc
Example #2
Source File: sentence_splitter.py From allennlp with Apache License 2.0 | 5 votes |
def __init__(self, language: str = "en_core_web_sm", rule_based: bool = False) -> None: # we need spacy's dependency parser if we're not using rule-based sentence boundary detection. self.spacy = get_spacy_model(language, parse=not rule_based, ner=False, pos_tags=False) if rule_based: # we use `sentencizer`, a built-in spacy module for rule-based sentence boundary detection. # depending on the spacy version, it could be called 'sentencizer' or 'sbd' sbd_name = "sbd" if spacy.__version__ < "2.1" else "sentencizer" if not self.spacy.has_pipe(sbd_name): sbd = self.spacy.create_pipe(sbd_name) self.spacy.add_pipe(sbd)
Example #3
Source File: base_pipeline.py From medaCy with GNU General Public License v3.0 | 5 votes |
def get_report(self): """ Generates a report about the pipeline class's configuration :return: str """ # Get data about these components learner_name, learner = self.get_learner() tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() spacy_metadata = self.spacy_pipeline.meta # Start the report with the name of the class and the docstring report = f"{type(self).__name__}\n{self.__doc__}\n\n" report += f"Report created at {time.asctime()}\n\n" report += f"MedaCy Version: {medacy.__version__}\nSpaCy Version: {spacy.__version__}\n" report += f"SpaCy Model: {spacy_metadata['name']}, version {spacy_metadata['version']}\n" report += f"Entities: {self.entities}\n" report += f"Constructor arguments: {self._kwargs}\n\n" # Print data about the feature overlayers if self.overlayers: report += "Feature Overlayers:\n\n" report += "\n\n".join(o.get_report() for o in self.overlayers) + '\n\n' # Print data about the feature extractor report += f"Feature Extractor: {type(feature_extractor).__name__} at {inspect.getfile(type(feature_extractor))}\n" report += f"\tWindow Size: {feature_extractor.window_size}\n" report += f"\tSpaCy Features: {feature_extractor.spacy_features}\n" # Print the name and location of the remaining components report += f"Learner: {learner_name} at {inspect.getfile(type(learner))}\n" if self.get_tokenizer(): report += f"Tokenizer: {type(tokenizer).__name__} at {inspect.getfile(type(tokenizer))}\n" else: report += f"Tokenizer: spaCy pipeline default\n" return report
Example #4
Source File: 20_TestLemmatizer.py From LemmInflect with MIT License | 5 votes |
def __init__(self): global lemminflect import lemminflect self.name = 'LemmInflect' self.version_string = 'LemmInflect version: %s' % lemminflect.__version__ # Force loading dictionary and model so lazy loading doesn't show up in run times lemmas = lemminflect.getAllLemmas('testing', 'VERB') lemmas = lemminflect.getAllLemmasOOV('xxtesting', 'VERB') # Use only the dictionary methods
Example #5
Source File: 20_TestLemmatizer.py From LemmInflect with MIT License | 5 votes |
def __init__(self, smodel): import spacy self.lemmatizer = spacy.load(smodel).vocab.morphology.lemmatizer self.name = 'Spacy' self.version_string = 'Spacy version: %s' % spacy.__version__ # get the lemmas for every upos (pos_type='a' will have adv and adj)
Example #6
Source File: 20_TestLemmatizer.py From LemmInflect with MIT License | 5 votes |
def __init__(self): global pattern_lemmatize from pattern.en import lemma as pattern_lemmatize self.name = 'PatternEN' import pattern self.version_string = 'Pattern.en version: %s' % pattern.__version__ # get the lemmas for every upos (pos_type='a' will have adv and adj)
Example #7
Source File: 20_TestLemmatizer.py From LemmInflect with MIT License | 5 votes |
def __init__(self): import nltk self.lemmatizer = nltk.stem.WordNetLemmatizer() self.name = 'NLTK' self.version_string = 'NLTK version: %s' % nltk.__version__ # get the lemmas for every upos (pos_type='a' will have adv and adj)
Example #8
Source File: spacy.py From mlflow with Apache License 2.0 | 5 votes |
def get_default_conda_env(): """ :return: The default Conda environment for MLflow Models produced by calls to :func:`save_model()` and :func:`log_model()`. """ import spacy return _mlflow_conda_env( additional_conda_deps=None, additional_pip_deps=[ "spacy=={}".format(spacy.__version__), ], additional_conda_channels=None)