Python sqlalchemy.sql.func.avg() Examples
The following are 11
code examples of sqlalchemy.sql.func.avg().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
sqlalchemy.sql.func
, or try the search function
.
Example #1
Source File: 004.py From openmoves with MIT License | 6 votes |
def calculateAverageTemperatures(): Base = declarative_base() Session = sessionmaker(bind=op.get_bind()) class Sample(Base): __tablename__ = 'sample' id = sa.Column(sa.Integer, name="id", primary_key=True) moveId = sa.Column(sa.Integer, name="move_id", nullable=False) temperature = sa.Column(sa.Float, name='temperature') class Move(Base): __tablename__ = 'move' id = sa.Column(sa.Integer, name="id", primary_key=True) temperature_avg = sa.Column(sa.Float, name='temperature_avg') session = Session() averageTemperatures = dict(session.query(Sample.moveId, func.avg(Sample.temperature)).group_by(Sample.moveId).filter(Sample.temperature > 0).all()) for move in session.query(Move): if move.id in averageTemperatures: move.temperature_avg = averageTemperatures[move.id] session.commit()
Example #2
Source File: xlsx_export.py From mma-dexter with Apache License 2.0 | 6 votes |
def keywords_worksheet(self, wb): from dexter.models.views import DocumentKeywordsView from dexter.models import DocumentKeyword ws = wb.add_worksheet('raw_keywords') # only get those that are better than the avg relevance subq = db.session.query( DocumentKeyword.doc_id, func.avg(DocumentKeyword.relevance).label('avg'))\ .filter(DocumentKeyword.doc_id.in_(self.doc_ids))\ .group_by(DocumentKeyword.doc_id)\ .subquery() rows = db.session.query(DocumentKeywordsView)\ .join(subq, DocumentKeywordsView.c.document_id == subq.columns.doc_id)\ .filter(DocumentKeywordsView.c.relevance >= subq.columns.avg)\ .all() self.write_table(ws, 'Keywords', rows)
Example #3
Source File: imports.py From openmoves with MIT License | 5 votes |
def move_import(xmlfile, filename, user, request_form): if filename.endswith('.gz'): xmlfile = gzip.GzipFile(fileobj=xmlfile, mode='rb', filename=filename) filename = filename[:-len('.gz')] extension = filename[-4:] import_functions = { '.xml': old_xml_import, '.sml': sml_import, '.gpx': gpx_import, } if extension not in import_functions: flash("unknown fileformat: '%s'" % xmlfile.name, 'error') return import_function = import_functions[extension] move = import_function(xmlfile, user, request_form) if move: move.temperature_avg, = db.session.query(func.avg(Sample.temperature)).filter(Sample.move == move, Sample.temperature > 0).one() stroke_count = 0 for events, in db.session.query(Sample.events).filter(Sample.move == move, Sample.events != None): if 'swimming' in events and events['swimming']['type'] == 'Stroke': stroke_count += 1 if 'swimming' in move.activity: assert stroke_count > 0 if stroke_count > 0: move.stroke_count = stroke_count db.session.commit() return move
Example #4
Source File: models.py From RTB-CTF-Framework with MIT License | 5 votes |
def avg_rating(id): avg_rating = ( UserMachine.query.with_entities(func.avg(UserMachine.rating)) .filter(UserMachine.machine_id == id, UserMachine.rating != 0) .scalar() ) return round(avg_rating, 1) if avg_rating else 0
Example #5
Source File: models.py From RTB-CTF-Framework with MIT License | 5 votes |
def avg_rating(id): avg_rating = ( UserChallenge.query.with_entities(func.avg(UserChallenge.rating)) .filter(UserChallenge.challenge_id == id, UserChallenge.rating != 0) .scalar() ) return round(avg_rating, 1) if avg_rating else 0 # UserChallenge: N to N relationship
Example #6
Source File: deploy.py From freight with Apache License 2.0 | 5 votes |
def get_attrs(self, item_list): apps = { a.id: a for a in App.query.filter(App.id.in_(set(i.app_id for i in item_list))) } tasks = { t.id: t for t in Task.query.filter(Task.id.in_(set(i.task_id for i in item_list))) } estimatedDurations = dict( db.session.query( Task.app_id, func.avg(Task.date_finished - Task.date_started) ) .filter( Task.date_finished > datetime.utcnow() - timedelta(days=7), Task.status == TaskStatus.finished, ) .group_by(Task.app_id) ) user_ids = set(tasks[d.task_id].user_id for d in item_list) if user_ids: user_map = {u.id: u for u in User.query.filter(User.id.in_(user_ids))} else: user_map = {} attrs = {} for item in item_list: estimatedDuration = estimatedDurations.get(tasks[item.task_id].app_id) if estimatedDuration: estimatedDuration = estimatedDuration.total_seconds() attrs[item] = { "app": apps[item.app_id], "task": tasks[item.task_id], "user": user_map.get(tasks[item.task_id].user_id), "estimatedDuration": estimatedDuration, } return attrs
Example #7
Source File: ml_model.py From ml-enabler with BSD 2-Clause "Simplified" License | 5 votes |
def get_tiles_by_quadkey(prediction_id: int, quadkeys: tuple, zoom: int): return db.session.query(func.substr(PredictionTile.quadkey, 1, zoom).label('qaudkey'), func.avg(cast(cast(PredictionTile.predictions['ml_prediction'], sqlalchemy.String), sqlalchemy.Float)).label('ml_prediction'), func.avg(cast(cast(PredictionTile.predictions['osm_building_area'], sqlalchemy.String), sqlalchemy.Float)).label('osm_building_area')).filter(PredictionTile.prediction_id == prediction_id).filter( func.substr( PredictionTile.quadkey, 1, zoom).in_(quadkeys)).group_by(func.substr(PredictionTile.quadkey, 1, zoom)).all()
Example #8
Source File: ml_model.py From ml-enabler with BSD 2-Clause "Simplified" License | 5 votes |
def get_aggregate_for_polygon(prediction_id: int, polygon: str): return db.session.query(func.avg(cast(cast(PredictionTile.predictions['ml_prediction'], sqlalchemy.String), sqlalchemy.Float)).label('ml_prediction'), func.avg(cast(cast(PredictionTile.predictions['osm_building_area'], sqlalchemy.String), sqlalchemy.Float)).label('osm_building_area')).filter( PredictionTile.prediction_id == prediction_id).filter(ST_Within(PredictionTile.centroid, ST_GeomFromText(polygon)) == 'True').one()
Example #9
Source File: dashboard.py From mma-dexter with Apache License 2.0 | 5 votes |
def markers_chart(self): counts = {} # flagged query = self.filter( db.session.query(func.count(Document.id)) .filter(Document.flagged == True)) # noqa counts['flagged'] = query.scalar() # with URL query = self.filter( db.session.query(func.count(Document.id)) .filter(Document.url != None, Document.url != '')) # noqa counts['with-url'] = query.scalar() # without URL query = self.filter( db.session.query(func.count(Document.id)) .filter(or_(Document.url == None, Document.url == ''))) # noqa counts['without-url'] = query.scalar() # average people sources per document subq = self.filter( db.session .query(func.count(DocumentSource.doc_id).label('count')) .join(Document, DocumentSource.doc_id == Document.id) .filter(DocumentSource.quoted == 1) .group_by(DocumentSource.doc_id))\ .subquery('cnt') n = float(db.session .query(func.avg(subq.c.count)) .select_from(subq) .scalar() or 0) counts['average-sources-per-document'] = round(n, 2) return { 'values': counts }
Example #10
Source File: fdi.py From mma-dexter with Apache License 2.0 | 5 votes |
def markers_chart(self): counts = {} # flagged query = self.filter( db.session.query(func.count(Document.id)) .filter(Document.flagged == True)) # noqa counts['flagged'] = query.scalar() # with URL query = self.filter( db.session.query(func.count(Document.id)) .filter(Document.url != None, Document.url != '')) # noqa counts['with-url'] = query.scalar() # without URL query = self.filter( db.session.query(func.count(Document.id)) .filter(or_(Document.url == None, Document.url == ''))) # noqa counts['without-url'] = query.scalar() # average people sources per document subq = self.filter( db.session .query(func.count(DocumentSource.doc_id).label('count')) .join(Document, DocumentSource.doc_id == Document.id) .filter(DocumentSource.quoted == 1) .group_by(DocumentSource.doc_id))\ .subquery('cnt') n = float(db.session .query(func.avg(subq.c.count)) .select_from(subq) .scalar() or 0) counts['average-sources-per-document'] = round(n, 2) return { 'values': counts }
Example #11
Source File: stats.py From zeus with Apache License 2.0 | 4 votes |
def build_queryset(stat: str, grouper, repo_id: UUID = None): # TODO(dcramer): put minimum date bounds if stat in ( "builds.aborted", "builds.failed", "builds.passed", "builds.errored", "builds.total", "builds.duration", ): if stat == "builds.failed": extra_filters = [Build.result == Result.failed] elif stat == "builds.passed": extra_filters = [Build.result == Result.passed] elif stat == "builds.aborted": extra_filters = [Build.result == Result.aborted] elif stat == "builds.errored": extra_filters = [Build.result == Result.errored] else: extra_filters = [Build.status == Status.finished] if stat == "builds.duration": value = func.avg( ( extract("epoch", Build.date_finished) - extract("epoch", Build.date_started) ) * 1000 ) extra_filters.append(Build.result == Result.passed) else: value = func.count(Build.id) queryset = ( db.session.query(grouper.label("grouper"), value.label("value")) .filter(*extra_filters) .group_by("grouper") ) if repo_id: queryset = queryset.filter(Build.repository_id == repo_id) else: queryset = ( db.session.query( grouper.label("grouper"), func.avg(ItemStat.value).label("value") ) .filter( ItemStat.item_id == Build.id, ItemStat.name == stat, Build.result == Result.passed, ) .group_by("grouper") ) if repo_id: queryset = queryset.filter(Build.repository_id == repo_id) return queryset