Python sqlalchemy.ext.declarative.DeclarativeMeta() Examples
The following are 30
code examples of sqlalchemy.ext.declarative.DeclarativeMeta().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sqlalchemy.ext.declarative
, or try the search function
.
Example #1
Source File: mixins.py From my-dev-space with MIT License | 6 votes |
def to_dict(self, rel=None, backref=None): if rel is None: rel = self.RELATIONSHIPS_TO_DICT res = { column.key: self.custom_encoding(attr) for attr, column in self.__mapper__.c.items() } if self.__mapper__.relationships.items(): for attr, relation in self.__mapper__.relationships.items(): # Avoid recursive loop between to tables. if backref == relation.table: continue value = getattr(self, attr) if value is None: res[relation.key] = None elif isinstance(value.__class__, DeclarativeMeta): res[relation.key] = value.to_dict(backref=self.__table__) else: res[relation.key] = [ i.to_dict(backref=self.__table__) for i in value ] return res
Example #2
Source File: serializer.py From tache with MIT License | 6 votes |
def default(self, o): if isinstance(o, datetime.datetime): # d = datetime_safe.new_datetime(o) # return d.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT)) return o.strftime("%s %s" % (self.DATE_FORMAT, self.TIME_FORMAT)) elif isinstance(o, datetime.date): # d = datetime_safe.new_date(o) return o.strftime(self.DATE_FORMAT) elif isinstance(o, datetime.time): return o.strftime(self.TIME_FORMAT) elif isinstance(o, decimal.Decimal): return str(o) elif isinstance(o, ResultProxy): return list(o) elif isinstance(o, RowProxy): return dict(o) elif isinstance(o.__class__, DeclarativeMeta): fields = {} instance_dict = o.__dict__ for field in instance_dict: if not field.startswith('_'): fields[field] = instance_dict[field] return fields else: return super(AwareJSONEncoder, self).default(o)
Example #3
Source File: mongoquery_settings_handler.py From py-mongosql with BSD 2-Clause "Simplified" License | 6 votes |
def validate_related_settings(self, bags: mongosql.ModelPropertyBags): """ Validate the settings for related entities. This method only validates the keys for "related" and "related_models". :raises KeyError: Invalid keys """ # Validate "related": all keys must be relationship names invalid_keys = set(self._nested_relation_settings.keys()) - bags.relations.names - {'*'} if invalid_keys: raise KeyError('Invalid relationship name provided to "related": {!r}' .format(list(invalid_keys))) # Validated "related_models": all keys must be models, not names invalid_keys = set(v for v in self._nested_model_settings.keys() if not isinstance(v, DeclarativeMeta)) invalid_keys -= {'*'} if invalid_keys: raise KeyError('Invalid related model object provided to "related_models": {!r}' .format(list(invalid_keys)))
Example #4
Source File: outputmixin.py From maniwani with MIT License | 6 votes |
def to_dict(self, rel=None, backref=None): if rel is None: rel = self.RELATIONSHIPS_TO_DICT res = {column.key: getattr(self, attr) for attr, column in self.__mapper__.c.items()} if rel: for attr, relation in self.__mapper__.relationships.items(): # Avoid recursive loop between to tables. if backref == relation.table: continue value = getattr(self, attr) if value is None: res[relation.key] = None elif isinstance(value.__class__, DeclarativeMeta): res[relation.key] = value.to_dict(backref=self.__table__) else: res[relation.key] = [i.to_dict(backref=self.__table__) for i in value] return res
Example #5
Source File: bulk.py From py-mongosql with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, Model: DeclarativeMeta, entity_dict: dict, *, ordinal_number: int = None, pk_names: Sequence[str] = None): super().__init__(entity_dict) self.Model = Model self.ordinal_number = ordinal_number # Primary key names: use the provided list; get it ourselves if not provided if not pk_names: _, pk_names = model_primary_key_columns_and_names(Model) # The primary key tuple try: self.primary_key_tuple = tuple(entity_dict[pk_field] for pk_field in pk_names) self.has_primary_key = True # If any of the primary key fields has raised a KeyError, assume that no PK is defined except KeyError: self.has_primary_key = False self.primary_key_tuple = None
Example #6
Source File: bag.py From py-mongosql with BSD 2-Clause "Simplified" License | 6 votes |
def for_model(cls, model: DeclarativeMeta) -> 'ModelPropertyBags': """ Get bags for a model. Please use this method over __init__(), because it initializes those bags only once """ # The goal of this method is to only initialize a ModelPropertyBags only once per model. # Previously, we used to store them inside model attributes. try: # We want ever model class to have its own ModelPropertyBags, # and we want no one to inherit it. # We could use model.__dict__ for this, but classes in Python 3 use an immutable `mappingproxy` instead. # Thus, we have to keep our own cache of ModelPropertyBags. return cls.__bags_per_model_cache[model] except KeyError: cls.__bags_per_model_cache[model] = bags = cls(model) return bags
Example #7
Source File: crudhelper.py From py-mongosql with BSD 2-Clause "Simplified" License | 6 votes |
def __init__(self, model: DeclarativeMeta, writable_properties=True, **handler_settings): """ Init CRUD helper :param model: The model to work with :param handler_settings: Settings for the MongoQuery used to make queries """ self.model = model self.handler_settings = handler_settings self.bags = self._MODEL_PROPERTY_BAGS_CLS.for_model(model) self.reusable_mongoquery = Reusable(self._MONGOQUERY_CLS(self.model, handler_settings)) # type: MongoQuery # Settings self.writable_properties = writable_properties # We also need `legacy_fields` # we're going to ignore them in the input self.legacy_fields = self.reusable_mongoquery.handler_project.legacy_fields
Example #8
Source File: api.py From zvt with MIT License | 6 votes |
def get_schemas(provider: str) -> List[DeclarativeMeta]: """ get domain schemas supported by the provider :param provider: :type provider: :return: :rtype: """ schemas = [] for provider1, dbs in zvt_context.provider_map_dbnames.items(): if provider == provider1: for dbname in dbs: schemas1 = zvt_context.dbname_map_schemas.get(dbname) if schemas1: schemas += schemas1 return schemas
Example #9
Source File: alchemy_json_encoder.py From AppServer with MIT License | 6 votes |
def decode(obj): if obj and isinstance(obj.__class__, DeclarativeMeta): fields = {} for field in [x for x in dir(obj) if not x.startswith('_') and not x.endswith('_') and x != 'metadata']: data = obj.__getattribute__(field) if isinstance(data, datetime.datetime): fields[field] = data.timestamp() elif isinstance(data, datetime.date): fields[field] = data.isoformat() elif isinstance(data, datetime.timedelta): fields[field] = (datetime.datetime.min + data).time().isoformat() elif isinstance(data, int) or isinstance(data, float) or isinstance(data, str): fields[field] = data elif isinstance(data, enum.Enum): fields[field] = data.value elif isinstance(data.__class__, DeclarativeMeta): fields[field] = AlchemyEncoder.decode(data) elif isinstance(data, list): fields[field] = [AlchemyEncoder.decode(d) for d in data] return fields else: return obj
Example #10
Source File: io.py From spandex with BSD 3-Clause "New" or "Revised" License | 5 votes |
def update_df(df, column, table): """ Add or update column in DataFrame from database table. Database table must contain column with the same name as DataFrame's index (df.index.name). Parameters ---------- df : pandas.DataFrame DataFrame to return an updated copy of. column : sqlalchemy.orm.attributes.InstrumentedAttribute Column ORM object to update DataFrame with. table : sqlalchemy.ext.declarative.DeclarativeMeta Table ORM class containing columns to update with and index on. Returns ------- df : pandas.DataFrame """ # Get table column to use as index based on DataFrame index name. index_column = getattr(table, df.index.name) # Query index column and column to update DataFrame with. with db.session() as sess: q = sess.query(index_column, column) # Update DataFrame column. new_df = db_to_df(q, index_col=df.index.name) df[column.name] = new_df[column.name] return df
Example #11
Source File: io.py From spandex with BSD 3-Clause "New" or "Revised" License | 5 votes |
def duplicate(self, table, new_table_name, schema_name='public'): """ Duplicate a PostgreSQL table, including indexes and constraints. Parameters ---------- table : sqlalchemy.ext.declarative.api.DeclarativeMeta Table ORM class to duplicate. new_table_name : str Name of new table. schema_name : str, optional Name of schema to contain the new table. Default is 'public'. Returns ------- new_table : sqlalchemy.ext.declarative.api.DeclarativeMeta Duplicated ORM table class. """ # Copy schema including constraints and indexes, then insert values. # This may be inefficient, unfortunately. t = table.__table__ with db.cursor() as cur: cur.execute(""" CREATE TABLE {nschema}.{ntable} (LIKE {oschema}.{otable} INCLUDING ALL); INSERT INTO {nschema}.{ntable} SELECT * FROM {oschema}.{otable}; """.format(nschema=schema_name, ntable=new_table_name, oschema=t.schema, otable=t.name)) # Refresh ORM and return table class. db.refresh() return getattr(getattr(db.tables, schema_name), new_table_name)
Example #12
Source File: io.py From spandex with BSD 3-Clause "New" or "Revised" License | 5 votes |
def add_column(table, column_name, type_name, default=None): """ Add column to table. Parameters ---------- table : sqlalchemy.ext.declarative.DeclarativeMeta Table ORM class to add column to. column_name : str Name of column to add to table. type_name : str Name of column type. default : str, optional Default value for column. Must include quotes if string. Returns ------- column : sqlalchemy.orm.attributes.InstrumentedAttribute Column ORM object that was added. """ if default: default_str = "DEFAULT {}".format(default) else: default_str = "" t = table.__table__ with db.cursor() as cur: cur.execute(""" ALTER TABLE {schema}.{table} ADD COLUMN {column} {type} {default_str}; """.format( schema=t.schema, table=t.name, column=column_name, type=type_name, default_str=default_str)) db.refresh() return getattr(table, column_name)
Example #13
Source File: db_utils.py From IBATS_HuobiFeeder_old with GNU General Public License v3.0 | 5 votes |
def default(self, obj): # print("obj.__class__", obj.__class__, "isinstance(obj.__class__, DeclarativeMeta)", isinstance(obj.__class__, DeclarativeMeta)) if isinstance(obj.__class__, DeclarativeMeta): # an SQLAlchemy class fields = {} for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']: data = obj.__getattribute__(field) try: json.dumps(data) # this will fail on non-encodable values, like other classes fields[field] = data except TypeError: # 添加了对datetime的处理 print(data) if isinstance(data, datetime): fields[field] = data.isoformat() elif isinstance(data, date): fields[field] = data.isoformat() elif isinstance(data, timedelta): fields[field] = (datetime.min + data).time().isoformat() else: fields[field] = None # a json-encodable dict return fields elif isinstance(obj, date): return json.dumps(date_2_str(obj)) return json.JSONEncoder.default(self, obj)
Example #14
Source File: datastore.py From eventsourcing with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__( self, settings: SQLAlchemySettings, base: DeclarativeMeta = Base, tables: Optional[Sequence] = None, connection_strategy: str = "plain", session: Optional[Union[Session, scoped_session]] = None, ): super(SQLAlchemyDatastore, self).__init__(settings=settings) self._was_session_created_here = False self._session = session if session: self._engine: Optional[Engine] = session.get_bind() else: self._engine = None self._base = base self._tables = tables self._connection_strategy = connection_strategy
Example #15
Source File: io.py From spandex with BSD 3-Clause "New" or "Revised" License | 5 votes |
def db_to_db(query, table_name, schema=None, view=False, pk='id'): """ Create a table or view from Query, table, or ORM objects, like columns. Do not use to duplicate a table. The new table will not contain the same indexes or constraints. Parameters ---------- query : sqlalchemy.orm.Query, sqlalchemy.ext.declarative.DeclarativeMeta, or iterable Query ORM object, table ORM class, or list of ORM objects to query, like columns. table_name : str Name of table or view to create. schema : schema class, optional Schema of table to create. Defaults to public. view : bool, optional Whether to create a view instead of a table. Defaults to False. Returns ------- None """ if schema: schema_name = schema.__name__ else: schema_name = 'public' qualified_name = schema_name + "." + table_name q = db_to_query(query) # Create new table from results of the query. with db.session() as sess: sess.execute(CreateTableAs(qualified_name, q, view)) if pk: sess.execute(""" ALTER TABLE {} ADD COLUMN {} serial PRIMARY KEY; """.format(qualified_name, pk)) db.refresh()
Example #16
Source File: customjsonencoder.py From maniwani with MIT License | 5 votes |
def default(self, obj): if isinstance(obj.__class__, DeclarativeMeta): return obj.to_dict() return super(CustomJSONEncoder, self).default(obj)
Example #17
Source File: extensions.py From flask-shop with BSD 3-Clause "New" or "Revised" License | 5 votes |
def make_declarative_base(self, model, metadata=None): if not isinstance(model, DeclarativeMeta): model = declarative_base( cls=model, name="Model", metadata=metadata, metaclass=CombinedMeta ) if metadata is not None and model.metadata is not metadata: model.metadata = metadata if not getattr(model, "query_class", None): model.query_class = self.Query model.query = _QueryProperty(self) return model
Example #18
Source File: io.py From spandex with BSD 3-Clause "New" or "Revised" License | 5 votes |
def db_to_df(query, index_col=None): """ Return DataFrame from Query, table, or ORM objects, like columns. Parameters ---------- query : sqlalchemy.orm.Query, sqlalchemy.ext.declarative.DeclarativeMeta, or iterable Query ORM object, table ORM class, or list of ORM objects to query, like columns. index_col : str, optional Name of column to use as DataFrame index. If provided, column must be contained in query. Returns ------- df : pandas.DataFrame """ q = db_to_query(query) # Get list of column names. entities = q.column_descriptions if (len(entities) == 1 and isinstance(entities[0]['type'], DeclarativeMeta)): # If we query a table, column_descriptions refers to the table itself, # not its columns. table = q.column_descriptions[0]['type'] column_names = table.__table__.columns.keys() else: column_names = [desc['name'] for desc in q.column_descriptions] # Convert Query object to DataFrame. data = (rec.__dict__ for rec in q.all()) df = pd.DataFrame.from_records(data, columns=column_names, coerce_float=True) if index_col: df.set_index(index_col, inplace=True) return df
Example #19
Source File: task_hierarchy_io.py From anima with MIT License | 5 votes |
def default(self, obj): from sqlalchemy.ext.declarative import DeclarativeMeta if isinstance(obj.__class__, DeclarativeMeta): # don't re-visit self if obj in self._visited_objs: return None # do not append if this is a type instance if obj.entity_type != 'Type': self._visited_objs.append(obj) # an SQLAlchemy class fields = {} for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']: # skip ignore fields if field in self.ignore_fields: continue # skip callables if callable(obj.__getattribute__(field)): continue try: fields[field] = obj.__getattribute__(field) except (AttributeError, TypeError, NotImplementedError, RuntimeError): pass # a json-encodable dict return fields try: return json.JSONEncoder.default(self, obj) except TypeError: return None
Example #20
Source File: model.py From planespotter with MIT License | 5 votes |
def should_set_tablename(cls): """Determine whether ``__tablename__`` should be automatically generated for a model. * If no class in the MRO sets a name, one should be generated. * If a declared attr is found, it should be used instead. * If a name is found, it should be used if the class is a mixin, otherwise one should be generated. * Abstract models should not have one generated. Later, :meth:`._BoundDeclarativeMeta.__table_cls__` will determine if the model looks like single or joined-table inheritance. If no primary key is found, the name will be unset. """ if ( cls.__dict__.get('__abstract__', False) or not any(isinstance(b, DeclarativeMeta) for b in cls.__mro__[1:]) ): return False for base in cls.__mro__: if '__tablename__' not in base.__dict__: continue if isinstance(base.__dict__['__tablename__'], declared_attr): return False return not ( base is cls or base.__dict__.get('__abstract__', False) or not isinstance(base, DeclarativeMeta) ) return True
Example #21
Source File: __init__.py From planespotter with MIT License | 5 votes |
def make_declarative_base(self, model, metadata=None): """Creates the declarative base that all models will inherit from. :param model: base model class (or a tuple of base classes) to pass to :func:`~sqlalchemy.ext.declarative.declarative_base`. Or a class returned from ``declarative_base``, in which case a new base class is not created. :param: metadata: :class:`~sqlalchemy.MetaData` instance to use, or none to use SQLAlchemy's default. .. versionchanged 2.3.0:: ``model`` can be an existing declarative base in order to support complex customization such as changing the metaclass. """ if not isinstance(model, DeclarativeMeta): model = declarative_base( cls=model, name='Model', metadata=metadata, metaclass=DefaultMeta ) # if user passed in a declarative base and a metaclass for some reason, # make sure the base uses the metaclass if metadata is not None and model.metadata is not metadata: model.metadata = metadata if not getattr(model, 'query_class', None): model.query_class = self.Query model.query = _QueryProperty(self) return model
Example #22
Source File: serializer.py From tache with MIT License | 5 votes |
def _encode_object(o): """Encode date/time and decimal types, and also ResultProxy/RowProxy of SQLAlchemy. """ DATE_FORMAT = "%Y-%m-%d" TIME_FORMAT = "%H:%M:%S" if type(o) in (list, tuple): return [_encode_object(i) for i in o] elif type(o) in (int, long, float, str, unicode, bool, dict, None): return o elif isinstance(o, datetime.datetime): return o.strftime("%s %s" % (DATE_FORMAT, TIME_FORMAT)) elif isinstance(o, datetime.date): return o.strftime(DATE_FORMAT) elif isinstance(o, datetime.time): return o.strftime(TIME_FORMAT) elif isinstance(o, decimal.Decimal): return str(o) elif isinstance(o, ResultProxy): return _encode_object(list(o)) elif isinstance(o, RowProxy): return dict(o) elif isinstance(o.__class__, DeclarativeMeta): fields = {} instance_dict = o.__dict__ for field in instance_dict: if not field.startswith('_'): fields[field] = instance_dict[field] return fields else: return o
Example #23
Source File: models.py From backend with GNU General Public License v2.0 | 5 votes |
def default(self, obj): if isinstance(obj.__class__, DeclarativeMeta): # an SQLAlchemy class fields = {} for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']: data = obj.__getattribute__(field) try: json.dumps(data) # this will fail on non-encodable values, like other classes fields[field] = data except TypeError: fields[field] = None # a json-encodable dict return fields return json.JSONEncoder.default(self, obj)
Example #24
Source File: query.py From py-mongosql with BSD 2-Clause "Simplified" License | 5 votes |
def aliased(self, model: DeclarativeMeta) -> 'MongoQuery': """ Make a query to an aliased model instead. This is used by MongoJoin handler to issue subqueries. Note that the method modifies the current object and does not make a copy! Note: should always be called after as_relation_of(), not before! :param model: Aliased model """ # Aliased bags self.bags = self.bags.aliased(model) self.model = model # Aliased loader interface # Currently, our join path looks like this: [..., User] # Now, when we're using an alias instead, we have to replace that last element with an alias too # SqlAlchemy 1.2.x used to work well without doing it; # SqlAlchemy 1.3.x now requires adapting a relationship by using of_type() on it. # See: https://github.com/sqlalchemy/sqlalchemy/issues/4566 if self._join_path: # not empty # Okay. First. Replace the last element on the join path with the aliased model's relationship new_join_path = self._join_path[0:-1] + (self._join_path[-1].of_type(model),) # Second. Apply the new join path self.as_relation(new_join_path) else: # empty self._as_relation = Load(self.model) # use the alias # Aliased handlers for handler_name in self.HANDLER_ATTR_NAMES: setattr(self, handler_name, getattr(self, handler_name).aliased(model)) return self
Example #25
Source File: json_encoder.py From safrs with GNU General Public License v3.0 | 5 votes |
def default(self, obj, **kwargs): """ override the default json encoding :param obj: object to be encoded :return: encoded/serizlaized object """ if obj is Included: return Included.encode() if isinstance(obj, Included): result = obj.encode() return result if isinstance(obj, SAFRSBase): result = obj._s_jsonapi_encode() return result if isinstance(obj, datetime.datetime): return obj.isoformat(" ") if isinstance(obj, (datetime.date, datetime.time)): return obj.isoformat() if isinstance(obj, set): return list(obj) if isinstance(obj, DeclarativeMeta): return self.sqla_encode(obj) if isinstance(obj, SAFRSFormattedResponse): return obj.to_dict() elif isinstance(obj, UUID): # pragma: no cover return str(obj) if isinstance(obj, decimal.Decimal): # pragma: no cover return str(obj) if isinstance(obj, bytes): # pragma: no cover safrs.log.warning("bytes obj, override SAFRSJSONEncoder") return obj.hex() # We shouldn't get here in a normal setup # getting here means we already abused safrs... and we're no longer jsonapi compliant if not is_debug(): # pragma: no cover # only continue if in debug mode safrs.log.warning('JSON Encoding Error: Unknown object type "{}" for {}'.format(type(obj), obj)) return {"error": "SAFRSJSONEncoder invalid object"} return self.ghetto_encode(obj)
Example #26
Source File: bulk.py From py-mongosql with BSD 2-Clause "Simplified" License | 5 votes |
def model_primary_key_columns_and_names(Model: DeclarativeMeta) -> (Sequence[Column], List[str]): """ Get the list of primary columns and their names as two separate tuples Example: pk_columns, pk_names = model_primary_key_columns_and_names(models.User) pk_columns # -> (models.User.id, ) pk_names # -> ('id', ) """ pk_columns: Sequence[Column] = inspect(Model).primary_key pk_names: List[str] = [col.key for col in pk_columns] return pk_columns, pk_names
Example #27
Source File: bulk.py From py-mongosql with BSD 2-Clause "Simplified" License | 5 votes |
def filter_many_objects_by_list_of_primary_keys(Model: DeclarativeMeta, entity_dicts: Sequence[dict]) -> BinaryExpression: """ Build an expression to load many objects from the database by their primary keys This function uses SQL tuples to build an expression which looks like this: SELECT * FROM users WHERE (uid, login) IN ((1, 'vdmit11'), (2, 'kolypto')); Example: entity_dicts = [ {'id': 1, ...}, {'id': 2, ...}, ... ] ssn.query(models.User).filter( filter_many_objects_by_list_of_primary_keys(models.User, entity_dicts) ) Args: Model: the model to query entity_dicts: list of entity dicts to pluck the PK values from Returns: The condition for filter() Raises: KeyError: one of `entity_dicts` did not contain a full primary key set of fields """ pk_columns, pk_names = model_primary_key_columns_and_names(Model) # Build the condition: (primary-key-tuple) IN (....) # It uses sql tuples and the IN operator: (pk_col_a, pk_col_b, ...) IN ((val1, val2, ...), (val3, val4, ...), ...) # Thanks @vdmit11 for this beautiful approach! return sql_tuple(*pk_columns).in_( # Every object is represented by its primary key tuple tuple(entity_dict[pk_field] for pk_field in pk_names) for entity_dict in entity_dicts )
Example #28
Source File: bulk.py From py-mongosql with BSD 2-Clause "Simplified" License | 5 votes |
def from_entity_dicts(cls, Model: DeclarativeMeta, entity_dicts: Sequence[dict], *, preprocess: callable = None, pk_names: Sequence[str] = None) -> Sequence['EntityDictWrapper']: """ Given a list of entity dicts, create a list of EntityDictWrappers with ordinal numbers If any dicts are already wrapped with EntityDictWrapper, it's not re-wrapped; but be careful to maintain their ordinal numbers, or the client will have difficulties! Example: _, pk_names = model_primary_key_columns_and_names(Model) entity_dicts = EntityDictWrapper.from_entity_dicts(models.User, [ {'id': 1, 'login': 'kolypto'}, { 'login': 'vdmit11'}, ], pk_names=pk_names) """ # Prepare the list of primary key columns if not pk_names: _, pk_names = model_primary_key_columns_and_names(Model) # Generator: EntityDictWrappers with ordinal numbers return [entity_dict if isinstance(entity_dict, EntityDictWrapper) else cls(Model, entity_dict, ordinal_number=i, pk_names=pk_names) for i, entity_dict in enumerate(entity_dicts)] # Object states
Example #29
Source File: api.py From zvt with MIT License | 5 votes |
def get_db_name(data_schema: DeclarativeMeta) -> str: """ get db name of the domain schema :param data_schema: :type data_schema: :return: :rtype: """ for db_name, base in zvt_context.dbname_map_base.items(): if issubclass(data_schema, base): return db_name
Example #30
Source File: api.py From zvt with MIT License | 5 votes |
def table_name_to_domain_name(table_name: str) -> DeclarativeMeta: """ the rules for table_name -> domain_class :param table_name: :type table_name: :return: :rtype: """ parts = table_name.split('_') domain_name = '' for part in parts: domain_name = domain_name + part.capitalize() return domain_name