Python sqlalchemy.sql.select() Examples
The following are 30
code examples of sqlalchemy.sql.select().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sqlalchemy.sql
, or try the search function
.
Example #1
Source File: state.py From drydock with Apache License 2.0 | 8 votes |
def get_boot_action(self, action_id): """Query for a single boot action by ID. :param action_id: string ULID bootaction id """ try: with self.db_engine.connect() as conn: query = self.ba_status_tbl.select().where( self.ba_status_tbl.c.action_id == ulid2.decode_ulid_base32( action_id)) rs = conn.execute(query) r = rs.fetchone() if r is not None: ba_dict = dict(r) ba_dict['action_id'] = bytes(ba_dict['action_id']) ba_dict['identity_key'] = bytes(ba_dict['identity_key']) ba_dict['task_id'] = uuid.UUID(bytes=ba_dict['task_id']) return ba_dict else: return None except Exception as ex: self.logger.error( "Error querying boot action %s" % action_id, exc_info=ex)
Example #2
Source File: relations.py From blitzdb with MIT License | 7 votes |
def append(self,obj): with self.obj.backend.transaction(implicit = True): #if the object is not yet in a DB, we save it first. if obj.pk is None: self.obj.backend.save(obj) relationship_table = self.params['relationship_table'] condition = and_(relationship_table.c[self.params['related_pk_field_name']] == obj.pk, relationship_table.c[self.params['pk_field_name']] == self.obj.pk) s = select([func.count(text('*'))]).where(condition) result = self.obj.backend.connection.execute(s) cnt = result.first()[0] if cnt: return #the object is already inside values = { self.params['pk_field_name'] : self.obj.pk, self.params['related_pk_field_name'] : obj.pk } insert = relationship_table.insert().values(**values) self.obj.backend.connection.execute(insert) self._queryset = None
Example #3
Source File: portfolio_manager.py From Titan with GNU General Public License v3.0 | 7 votes |
def get_profit_for_pair(exchange, pair): """Iterates through all trades for given exchange pair over the course of trading. Starts by subtracting the long positions (the buys) and adding the short positions (the sells) to arrive at the difference (profit""" """The buys are always the even rows and the sells are the odd rows (buy always before sell starting from zero)""" profit = 0 counter = 0 s = select([database.TradingPositions]).where(and_(database.TradingPositions.c.Exchange == exchange, database.TradingPositions.c.Pair == pair)) result = conn.execute(s) for row in result: if counter % 2 == 0: profit = profit - row[5] counter += 1 else: profit = profit + row[5] counter += 1 return profit
Example #4
Source File: 016_uuids_everywhere.py From daf-recipes with GNU General Public License v3.0 | 7 votes |
def create_uuids(migrate_engine, primary_table_name, revision_table_name): # have changed type of cols so recreate metadata metadata = MetaData(migrate_engine) # 4 create uuids for primary entities and in related tables primary_table = Table(primary_table_name, metadata, autoload=True) if revision_table_name: revision_table = Table(revision_table_name, metadata, autoload=True) # fetchall wouldn't be optimal with really large sets of data but here <20k ids = [ res[0] for res in migrate_engine.execute(select([primary_table.c.id])).fetchall() ] for count,id in enumerate(ids): # if count % 100 == 0: print(count, id) myuuid = make_uuid() update = primary_table.update().where(primary_table.c.id==id).values(id=myuuid) migrate_engine.execute(update) if revision_table_name: # ensure each id in revision table match its continuity id. q = revision_table.update().values(id=revision_table.c.continuity_id) migrate_engine.execute(q)
Example #5
Source File: 5795c29b2c7a_.py From evesrp with BSD 2-Clause "Simplified" License | 7 votes |
def downgrade(): relative_modifier = table('relative_modifier', column('id', sa.Integer), column('value', sa.Numeric(precision=8, scale=5)), column('float_value', sa.Float)) op.add_column('relative_modifier', sa.Column('float_value', sa.Float)) conn = op.get_bind() sel = select([relative_modifier.c.id, relative_modifier.c.value]) results = conn.execute(sel) for id_, decimal_value in results: float_value = float(decimal_value) up = update(relative_modifier).where(relative_modifier.c.id == id_)\ .values({'float_value': float_value}) conn.execute(up) op.drop_column('relative_modifier', 'value') op.alter_column('relative_modifier', 'float_value', nullable=True, new_column_name='value', existing_type=sa.Float)
Example #6
Source File: 5795c29b2c7a_.py From evesrp with BSD 2-Clause "Simplified" License | 7 votes |
def upgrade(): relative_modifier = table('relative_modifier', column('id', sa.Integer), column('value', sa.Float), column('numeric_value', sa.Numeric(precision=8, scale=5))) op.add_column('relative_modifier', sa.Column('numeric_value', sa.Numeric(precision=8, scale=5))) conn = op.get_bind() sel = select([relative_modifier.c.id, relative_modifier.c.value]) results = conn.execute(sel) q = Decimal(10) ** -5 for id_, float_value in results: decimal_value = Decimal(float_value).quantize(q) up = update(relative_modifier).where(relative_modifier.c.id == id_)\ .values({'numeric_value': decimal_value}) conn.execute(up) op.drop_column('relative_modifier', 'value') op.alter_column('relative_modifier', 'numeric_value', nullable=True, new_column_name='value', existing_type=sa.Numeric(precision=8, scale=5))
Example #7
Source File: test_batch.py From alembic with MIT License | 7 votes |
def test_no_net_change_timestamp(self): t = self._timestamp_fixture() import datetime self.conn.execute( t.insert(), {"x": datetime.datetime(2012, 5, 18, 15, 32, 5)} ) with self.op.batch_alter_table("hasts") as batch_op: batch_op.alter_column("x", type_=DateTime()) eq_( self.conn.execute(select([t.c.x])).fetchall(), [(datetime.datetime(2012, 5, 18, 15, 32, 5),)], )
Example #8
Source File: state.py From drydock with Apache License 2.0 | 7 votes |
def _assemble_tasks(self, task_list=None): """Attach all the appropriate result messages to the tasks in the list. :param task_list: a list of objects.Task instances to attach result messages to """ if task_list is None: return None with self.db_engine.connect() as conn: query = sql.select([ self.result_message_tbl ]).where(self.result_message_tbl.c.task_id == sql.bindparam( 'task_id')).order_by(self.result_message_tbl.c.sequence.asc()) query.compile(self.db_engine) for t in task_list: rs = conn.execute(query, task_id=t.task_id.bytes) error_count = 0 for r in rs: msg = objects.TaskStatusMessage.from_db(dict(r)) if msg.error: error_count = error_count + 1 t.result.message_list.append(msg) t.result.error_count = error_count
Example #9
Source File: state.py From drydock with Apache License 2.0 | 7 votes |
def get_tasks(self): """Get all tasks in the database.""" try: with self.db_engine.connect() as conn: query = sql.select([self.tasks_tbl]) rs = conn.execute(query) task_list = [objects.Task.from_db(dict(r)) for r in rs] self._assemble_tasks(task_list=task_list) # add reference to this state manager to each task for t in task_list: t.statemgr = self return task_list except Exception as ex: self.logger.error("Error querying task list: %s" % str(ex)) return []
Example #10
Source File: queryset.py From blitzdb with MIT License | 6 votes |
def intersect(self,qs): #here the .self_group() is necessary to ensure the correct grouping within the INTERSECT... my_s = self.get_bare_select(columns = [self.table.c.pk.label('pk')]).alias() qs_s = qs.get_bare_select(columns = [qs.table.c.pk.label('pk')]).alias() my_pk_s = select([column('pk')]).select_from(my_s) qs_pk_s = select([column('pk')]).select_from(qs_s) condition = self.table.c.pk.in_(expression.intersect(my_pk_s,qs_pk_s)) new_qs = QuerySet(self.backend, self.table, self.cls, condition = condition, order_bys = self.order_bys, raw = self.raw, include = self.include, only = self.only) return new_qs
Example #11
Source File: test_limit_offset.py From sqlalchemy-teradata with MIT License | 6 votes |
def test_limit_offset(self): t1 = table('t1', column('c1'), column('c2'), column('c3')) s = select([t1]).limit(3).offset(5) #assert s == s = select([t1]).limit(3) #assert s == s = select([t1]).limit(3).distinct() #assert s == s = select([t1]).order_by(t1.c.c2).limit(3).offset(5).distinct() #assert s == s = select([t1]).order_by(t1.c.c2).limit(3).offset(5) #assert s == s = select([t1]).order_by(t1.c.c2).offset(5) #assert s == s = select([t1]).order_by(t1.c.c2).limit(3) #assert s == stmt = s.compile(self.engine)
Example #12
Source File: notes.py From CloudBot with GNU General Public License v3.0 | 6 votes |
def add_note(db, server, user, text): id_query = select([sqlalchemy.sql.expression.func.max(table.c.note_id).label("maxid")]) \ .where(table.c.user == user.lower()) max_id = db.execute(id_query).scalar() if max_id is None: note_id = 1 else: note_id = max_id + 1 query = table.insert().values( note_id=note_id, connection=server, user=user.lower(), text=text, deleted=False, added=datetime.today() ) db.execute(query) db.commit()
Example #13
Source File: tell.py From CloudBot with GNU General Public License v3.0 | 5 votes |
def load_cache(db): """ :type db: sqlalchemy.orm.Session """ new_cache = [] for row in db.execute(table.select().where(not_(table.c.is_read))): conn = row["connection"] target = row["target"] new_cache.append((conn, target)) tell_cache.clear() tell_cache.extend(new_cache)
Example #14
Source File: dialect.py From sqlalchemy-teradata with MIT License | 5 votes |
def get_pk_constraint(self, connection, table_name, schema=None, **kw): """ Override TODO: Check if we need PRIMARY Indices or PRIMARY KEY Indices TODO: Check for border cases (No PK Indices) """ if schema is None: schema = self.default_schema_name stmt = select([column('ColumnName'), column('IndexName')], from_obj=[text('dbc.Indices')]).where( and_(text('DatabaseName = :schema'), text('TableName=:table'), text('IndexType=:indextype')) ).order_by(asc(column('IndexNumber'))) # K for Primary Key res = connection.execute(stmt, schema=schema, table=table_name, indextype='K').fetchall() index_columns = list() index_name = None for index_column in res: index_columns.append(self.normalize_name(index_column['ColumnName'])) index_name = self.normalize_name(index_column['IndexName']) # There should be just one IndexName return { "constrained_columns": index_columns, "name": index_name }
Example #15
Source File: dialect.py From sqlalchemy-teradata with MIT License | 5 votes |
def _get_default_schema_name(self, connection): return self.normalize_name( connection.execute('select database').scalar())
Example #16
Source File: dialect.py From sqlalchemy-teradata with MIT License | 5 votes |
def get_columns(self, connection, table_name, schema=None, **kw): helpView=False if schema is None: schema = self.default_schema_name if int(self.server_version_info.split('.')[0])<16: dbc_columninfo='dbc.ColumnsV' #Check if the object us a view stmt = select([column('tablekind')],\ from_obj=[text('dbc.tablesV')]).where(\ and_(text('DatabaseName=:schema'),\ text('TableName=:table_name'),\ text("tablekind='V'"))) res = connection.execute(stmt, schema=schema, table_name=table_name).rowcount helpView = (res==1) else: dbc_columninfo='dbc.ColumnsQV' stmt = select([column('columnname'), column('columntype'),\ column('columnlength'), column('chartype'),\ column('decimaltotaldigits'), column('decimalfractionaldigits'),\ column('columnformat'),\ column('nullable'), column('defaultvalue'), column('idcoltype')],\ from_obj=[text(dbc_columninfo)]).where(\ and_(text('DatabaseName=:schema'),\ text('TableName=:table_name'))) res = connection.execute(stmt, schema=schema, table_name=table_name).fetchall() #If this is a view in pre-16 version, get types for individual columns if helpView: res=[self._get_column_help(connection, schema,table_name,r['columnname']) for r in res] return [self._get_column_info(row) for row in res]
Example #17
Source File: dialect.py From sqlalchemy-teradata with MIT License | 5 votes |
def has_table(self, connection, table_name, schema=None): if schema is None: schema=self.default_schema_name stmt = select([column('tablename')], from_obj=[text('dbc.tablesvx')]).where( and_(text('DatabaseName=:schema'), text('TableName=:table_name'))) res = connection.execute(stmt, schema=schema, table_name=table_name).fetchone() return res is not None
Example #18
Source File: test_dialect.py From sqlalchemy-teradata with MIT License | 5 votes |
def setup(self): self.conn = testing.db.connect() self.engine = self.conn.engine self.dialect = self.conn.dialect self.metadata = MetaData() self.user_name = self.engine.execute('sel user').scalar() self.db_schema = self.engine.execute('sel database').scalar() self.tbl_name = self.user_name + '_test' self.view_name = self.user_name + '_test_view' # Setup test table (user should have necessary rights to create table) self.test_table = Table(self.tbl_name, self.metadata, Column('id', Integer, primary_key=True), PrimaryKeyConstraint('id', name='my_pk')) # Setup a test view #self.test_view = CreateView(self.view_name, select([self.test_table.c.id.label('view_id')])) # Create tables self.metadata.create_all(self.engine) #Create views #self.conn.execute(self.test_view)
Example #19
Source File: dialect.py From sqlalchemy-teradata with MIT License | 5 votes |
def get_schema_names(self, connection, **kw): stmt = select([column('username')], from_obj=[text('dbc.UsersV')], order_by=[text('username')]) res = connection.execute(stmt).fetchall() return [self.normalize_name(name['username']) for name in res]
Example #20
Source File: tell.py From CloudBot with GNU General Public License v3.0 | 5 votes |
def get_unread(db, server, target): """ :type db: sqlalchemy.orm.Session :type server: str :type target: str """ query = select([table.c.sender, table.c.message, table.c.time_sent]) \ .where(table.c.connection == server.lower()) \ .where(table.c.target == target.lower()) \ .where(not_(table.c.is_read)) \ .order_by(table.c.time_sent) return db.execute(query).fetchall()
Example #21
Source File: tell.py From CloudBot with GNU General Public License v3.0 | 5 votes |
def list_disabled(db, conn): """ :type db: sqlalchemy.orm.Session :type conn: cloudbot.client.Client """ for row in db.execute(disable_table.select().where(disable_table.c.conn == conn.name.lower())): yield (row['conn'], row['target'], row['setter'], row['set_at'].ctime())
Example #22
Source File: tell.py From CloudBot with GNU General Public License v3.0 | 5 votes |
def load_ignores(db): """ :type db: sqlalchemy.orm.Session """ new_cache = defaultdict(lambda: defaultdict(list)) for row in db.execute(ignore_table.select()): new_cache[row['conn'].lower()][row['nick'].lower()].append(row['mask']) ignore_cache.clear() ignore_cache.update(new_cache)
Example #23
Source File: tell.py From CloudBot with GNU General Public License v3.0 | 5 votes |
def load_disabled(db): """ :type db: sqlalchemy.orm.Session """ new_cache = defaultdict(set) for row in db.execute(disable_table.select()): new_cache[row['conn']].add(row['target'].lower()) disable_cache.clear() disable_cache.update(new_cache)
Example #24
Source File: 4198a248c8a_.py From evesrp with BSD 2-Clause "Simplified" License | 5 votes |
def upgrade(): op.add_column('request', sa.Column('numeric_base_payout', sa.Numeric(precision=15, scale=2), default=0.0) ) request = table('request', column('id', sa.Integer), column('base_payout', sa.Float), column('numeric_base_payout', sa.Numeric(precision=15, scale=2)), ) conn = op.get_bind() requests_sel = select([request.c.id, request.c.base_payout]) requests = conn.execute(requests_sel) for request_id, float_payout in requests: decimal_payout = Decimal.from_float(float_payout) decimal_payout *= 1000000 update_stmt = update(request)\ .where(request.c.id == request_id)\ .values({ 'numeric_base_payout': decimal_payout, }) conn.execute(update_stmt) requests.close() op.drop_column('request', 'base_payout') op.alter_column('request', column_name='numeric_base_payout', new_column_name='base_payout', existing_type=sa.Numeric(precision=15, scale=2), existing_server_default=0.0)
Example #25
Source File: duckhunt.py From CloudBot with GNU General Public License v3.0 | 5 votes |
def load_optout(db): """load a list of channels duckhunt should be off in. Right now I am being lazy and not differentiating between networks this should be cleaned up later.""" new_data = defaultdict(list) chans = db.execute(optout.select()) for row in chans: chan = row["chan"] new_data[row["network"].casefold()].append(chan.casefold()) opt_out.clear() opt_out.update(new_data)
Example #26
Source File: 2976d59f286_.py From evesrp with BSD 2-Clause "Simplified" License | 5 votes |
def downgrade(): # Add type_ and value columns back op.add_column('modifier', sa.Column('type_', sa.Enum('absolute', 'percentage', name='modifier_type'))) op.add_column('modifier', sa.Column('value', sa.Float, nullable=True)) # populate type_ and value columns with data from the subclass tables abs_select = select([abs_table.c.id, abs_table.c.value]) rel_select = select([rel_table.c.id, rel_table.c.value]) conn = op.get_bind() for select_stmt in (abs_select, rel_select): modifier_rows = conn.execute(select_stmt) for modifier_id, modifier_value in modifier_rows: if select_stmt == abs_select: modifier_value = float(modifier_value / 1000000) type_ = 'absolute' else: type_ = 'percentage' update_stmt = update(modifier)\ .where(modifier.c.id == modifier_id)\ .values({ 'value': modifier_value, 'type_': type_ }) conn.execute(update_stmt) modifier_rows.close() # Drop the old _type column and the subclass tables op.drop_column('modifier', '_type') op.drop_table('absolute_modifier') op.drop_table('relative_modifier') # Add not-null constraint back to type_ op.alter_column('modifier', column_name='type_', nullable=False, existing_type=sa.Enum('absolute', 'percentage', name='modifier_type'))
Example #27
Source File: 2f22504b1e6_.py From evesrp with BSD 2-Clause "Simplified" License | 5 votes |
def upgrade(): # Add columns, but with null allowed op.add_column('request', sa.Column('constellation', sa.String(length=25), nullable=True)) op.add_column('request', sa.Column('region', sa.String(length=25), nullable=True)) op.add_column('request', sa.Column('system', sa.String(length=25), nullable=True)) op.create_index('ix_request_constellation', 'request', ['constellation'], unique=False) op.create_index('ix_request_region', 'request', ['region'], unique=False) op.create_index('ix_request_system', 'request', ['system'], unique=False) # Update existing requests conn = op.get_bind() kill_id_sel = select([request.c.id]) kill_ids = conn.execute(kill_id_sel) for kill_id in kill_ids: kill_id = kill_id[0] system_id = get_system_id(kill_id) system = systems.system_names[system_id] constellation = systems.systems_constellations[system] region = systems.constellations_regions[constellation] update_stmt = update(request)\ .where(request.c.id==op.inline_literal(kill_id))\ .values({ 'system': system, 'constellation': constellation, 'region': region, }) conn.execute(update_stmt) kill_ids.close() # Add non-null constraint op.alter_column('request', 'constellation', nullable=False, existing_server_default=None, existing_type=sa.String(length=25)) op.alter_column('request', 'region', nullable=False, existing_server_default=None, existing_type=sa.String(length=25)) op.alter_column('request', 'system', nullable=False, existing_server_default=None, existing_type=sa.String(length=25))
Example #28
Source File: 337978f8c75_restrict_request_base_payout_to_be_0.py From evesrp with BSD 2-Clause "Simplified" License | 5 votes |
def upgrade(): conn = op.get_bind() negative_base_payout_id_sel = select([request.c.id])\ .where(request.c.base_payout < 0.0) negative_ids = conn.execute(negative_base_payout_id_sel) for result_row in negative_ids: negative_id = result_row[0] update_stmt = update(request)\ .where(request.c.id == negative_id)\ .values({ 'base_payout': 0.0, }) conn.execute(update_stmt) negative_ids.close()
Example #29
Source File: 4280bf2417c_.py From evesrp with BSD 2-Clause "Simplified" License | 5 votes |
def downgrade(): # Add ship and pilot transformer columns back to division op.add_column('division', sa.Column('ship_transformer', sa.PickleType)) op.add_column('division', sa.Column('pilot_transformer', sa.PickleType)) # Convert transformerrefs back to the old columns conn = op.get_bind() columns = [ transformerref.c.division_id, transformerref.c.attribute_name, transformerref.c.transformer, ] transformer_sel = select(columns)\ .where(or_( transformerref.c.attribute_name == 'ship_type', transformerref.c.attribute_name == 'pilot', )) transformer_rows = conn.execute(transformer_sel) for division_id, attribute_name, transformer in transformer_rows: if attribute_name == 'ship_type': colname = 'ship' transformer_class = evesrp.transformers.ShipTransformer elif attribute_name == 'pilot': colname = 'pilot' transformer_class = evesrp.transformers.PilotTransformer colname += '_transformer' transformer = transformer_class(transformer.name, transformer.slug) update_stmt = update(division)\ .where(division.c.id == division_id)\ .values({ colname: transformer }) conn.execute(update_stmt) transformer_rows.close() # Drop the transformerref table. This is going to be lossy. op.drop_table('transformerref')
Example #30
Source File: 4198a248c8a_.py From evesrp with BSD 2-Clause "Simplified" License | 5 votes |
def downgrade(): op.add_column('request', sa.Column('float_base_payout', sa.Float, default=0.0) ) request = table('request', column('id', sa.Integer), column('base_payout', sa.Numeric(precision=15, scale=2)), column('float_base_payout', sa.Float), ) conn = op.get_bind() requests_sel = select([request.c.id, request.c.base_payout]) requests = conn.execute(requests_sel) for request_id, decimal_payout in requests: decimal_payout = decimal_payout / 1000000 float_payout = float(decimal_payout) update_stmt = update(request)\ .where(request.c.id == request_id)\ .values({ 'float_base_payout': float_payout, }) conn.execute(update_stmt) requests.close() op.drop_column('request', 'base_payout') op.alter_column('request', column_name='numeric_base_payout', new_column_name='base_payout', existing_type=sa.Float, existing_server_default=0.0)