Python django.db.models.StdDev() Examples

The following are 7 code examples of django.db.models.StdDev(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module django.db.models , or try the search function .
Example #1
Source File: average_following.py    From canvas with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def handle(self, *args, **options):
        cutoff_date = datetime.date.today() - datetime.timedelta(days=(30 * MONTHS))
        counts = User.objects.filter(date_joined__gt=cutoff_date)
        counts = counts.annotate(follow_count=Count('following')).order_by('follow_count')

        avg = counts.aggregate(Avg('follow_count'))['follow_count__avg']

        print
        print 'Following counts for users who signed up in the last {} months'.format(MONTHS)
        print '----------------'
        print 'Average: {:.3} per user'.format(avg)

        try:
            std_dev = counts.aggregate(StdDev('follow_count'))['follow_count__stddev']
            print 'StdDev:  {:.3}'.format(std_dev)
        except DatabaseError:
            print "(can't get standard deviation with SQLite)"
        counts = counts.values_list('follow_count', flat=True)
        print 'Median: {}'.format(percentile(counts, 0.5))
        print 
Example #2
Source File: utils.py    From opensurfaces with MIT License 5 votes vote down vote up
def all_aggregations(queryset, key):
    """ Performs all available aggregations on a queryset """
    return queryset.filter(**{key + '__isnull': False}).aggregate(
        min=Min(key),
        avg=Avg(key),
        max=Max(key),
        std=StdDev(key),
        count=Count(key),
        sum=Sum(key),
    ) 
Example #3
Source File: distances.py    From protwis with Apache License 2.0 5 votes vote down vote up
def fetch_and_calculate(self, with_arr = False):
        ## REQUIRES PSQL SETTINGS TO HAVE MORE MEMORY
        # sudo nano /etc/postgresql/9.3/main/postgresql.conf
        # shared_buffers = 2GB
        # work_mem = 100MB
        # temp_buffers = 500MB
        # sudo /etc/init.d/postgresql restart
        ds_with_key = {}
        if with_arr:
            ds = list(Distance.objects.filter(structure__in=self.structures).exclude(gns_pair__contains='8x').exclude(gns_pair__contains='12x').exclude(gns_pair__contains='23x').exclude(gns_pair__contains='34x').exclude(gns_pair__contains='45x') \
                            .values('gns_pair') \
                            .annotate(mean = Avg('distance'), std = StdDev('distance'), c = Count('distance'), dis = Count('distance'),arr=ArrayAgg('distance'),arr2=ArrayAgg('structure__pdb_code__index'),arr3=ArrayAgg('gns_pair')).values_list('gns_pair','mean','std','c','dis','arr','arr2','arr3').filter(c__gte=int(0.8*len(self.structures))))
            for i,d in enumerate(ds):
                ds[i] = list(ds[i])
                ds[i][3] = d[2]/d[1]
                ds_with_key[d[0]] = ds[i]
        else:
            ds = list(Distance.objects.filter(structure__in=self.structures).exclude(gns_pair__contains='8x').exclude(gns_pair__contains='12x').exclude(gns_pair__contains='23x').exclude(gns_pair__contains='34x').exclude(gns_pair__contains='45x') \
                            .values('gns_pair') \
                            .annotate(mean = Avg('distance'), std = StdDev('distance'), c = Count('distance')).values_list('gns_pair','mean','std','c').filter(c__gte=int(len(self.structures)*0.8)))
            for i,d in enumerate(ds):
                ds[i] += (d[2]/d[1],)
                ds_with_key[d[0]] = ds[i]
        # # print(ds.query)
        # print(ds[1])
        # Assume that dispersion is always 4
        if len(self.structures)>1:
            stats_sorted = sorted(ds, key=lambda k: -k[3])
        else:
            stats_sorted = sorted(ds, key=lambda k: -k[1])
        #print(ds[1])

        self.stats_key = ds_with_key
        self.stats = stats_sorted 
Example #4
Source File: tests.py    From django-sqlserver with MIT License 5 votes vote down vote up
def test_aggregation(self):
        """
        #19360: Raise NotImplementedError when aggregating on date/time fields.
        """
        for aggregate in (Sum, Avg, Variance, StdDev):
            with self.assertRaises(NotImplementedError):
                Item.objects.all().aggregate(aggregate('time'))
            with self.assertRaises(NotImplementedError):
                Item.objects.all().aggregate(aggregate('date'))
            with self.assertRaises(NotImplementedError):
                Item.objects.all().aggregate(aggregate('last_modified'))
            with self.assertRaises(NotImplementedError):
                Item.objects.all().aggregate(
                    **{'complex': aggregate('last_modified') + aggregate('last_modified')}
                ) 
Example #5
Source File: tests.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def test_aggregation(self):
        """
        Raise NotImplementedError when aggregating on date/time fields (#19360).
        """
        for aggregate in (Sum, Avg, Variance, StdDev):
            with self.assertRaises(NotSupportedError):
                Item.objects.all().aggregate(aggregate('time'))
            with self.assertRaises(NotSupportedError):
                Item.objects.all().aggregate(aggregate('date'))
            with self.assertRaises(NotSupportedError):
                Item.objects.all().aggregate(aggregate('last_modified'))
            with self.assertRaises(NotSupportedError):
                Item.objects.all().aggregate(
                    **{'complex': aggregate('last_modified') + aggregate('last_modified')}
                ) 
Example #6
Source File: tests.py    From djongo with GNU Affero General Public License v3.0 5 votes vote down vote up
def test_aggregation(self):
        """
        Raise NotImplementedError when aggregating on date/time fields (#19360).
        """
        for aggregate in (Sum, Avg, Variance, StdDev):
            with self.assertRaises(NotSupportedError):
                Item.objects.all().aggregate(aggregate('time'))
            with self.assertRaises(NotSupportedError):
                Item.objects.all().aggregate(aggregate('date'))
            with self.assertRaises(NotSupportedError):
                Item.objects.all().aggregate(aggregate('last_modified'))
            with self.assertRaises(NotSupportedError):
                Item.objects.all().aggregate(
                    **{'complex': aggregate('last_modified') + aggregate('last_modified')}
                ) 
Example #7
Source File: generate.py    From swarfarm with Apache License 2.0 4 votes vote down vote up
def drop_report(qs, **kwargs):
    report_data = {}

    # Get querysets for each possible drop type
    drops = get_drop_querysets(qs)
    report_data['summary'] = get_report_summary(drops, qs.count(), **kwargs)

    # Clear time statistics, if supported by the qs model
    if hasattr(qs.model, 'clear_time'):
        successful_runs = qs.filter(
            Q(success=True) | Q(level__dungeon__category=Dungeon.CATEGORY_RIFT_OF_WORLDS_BEASTS)
        )

        if successful_runs.count():
            clear_time_aggs = successful_runs.aggregate(
                std_dev=StdDev(Extract(F('clear_time'), lookup_name='epoch')),
                avg=Avg('clear_time'),
                min=Min('clear_time'),
                max=Max('clear_time'),
            )

            # Use +/- 3 std deviations of clear time avg as bounds for time range in case of extreme outliers skewing chart scale
            min_time = round_timedelta(
                max(clear_time_aggs['min'], clear_time_aggs['avg'] - timedelta(seconds=clear_time_aggs['std_dev'] * 3)),
                CLEAR_TIME_BIN_WIDTH,
                direction='down',
            )
            max_time = round_timedelta(
                min(clear_time_aggs['max'], clear_time_aggs['avg'] + timedelta(seconds=clear_time_aggs['std_dev'] * 3)),
                CLEAR_TIME_BIN_WIDTH,
                direction='up',
            )
            bins = [min_time + CLEAR_TIME_BIN_WIDTH * x for x in range(0, int((max_time - min_time) / CLEAR_TIME_BIN_WIDTH))]

            # Histogram generates on entire qs, not just successful runs.
            report_data['clear_time'] = {
                'min': str(clear_time_aggs['min']),
                'max': str(clear_time_aggs['max']),
                'avg': str(clear_time_aggs['avg']),
                'chart': {
                    'type': 'histogram',
                    'width': 5,
                    'data': histogram(qs, 'clear_time', bins, slice_on='success'),
                }
            }

    # Individual drop details
    for key, qs in drops.items():
        if DROP_TYPES[key]:
            report_data[key] = DROP_TYPES[key](qs, qs.count(), **kwargs)

    return report_data