Python django.db.models.functions.TruncDay() Examples

The following are 6 code examples of django.db.models.functions.TruncDay(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module django.db.models.functions , or try the search function .
Example #1
Source File: views.py    From mooder with GNU Lesser General Public License v3.0 6 votes vote down vote up
def _get_analysis_chart(self):
        dweek = now().today() - timedelta(days=7)
        query = Post.posts \
            .filter(created_time__gte=dweek) \
            .annotate(day=TruncDay('created_time')) \
            .values('verify', 'day') \
            .annotate(cnt=Count('id')) \
            .values('day', 'verify', 'cnt') \
            .order_by()
        analysis_count = {}
        for l in list(query):
            day = l['day'].strftime('%Y-%m-%d')
            analysis_count[day] = analysis_count.get(day, {})
            analysis_count[day][l['verify']] = l['cnt']

        return analysis_count 
Example #2
Source File: tasks.py    From polemarch with GNU Affero General Public License v3.0 5 votes vote down vote up
def stats(self, last: int) -> OrderedDict:
        qs = self.filter(start_time__gte=now()-timedelta(days=last))
        qs = qs.annotate(
            day=dbfunc.TruncDay('start_time'),
            month=dbfunc.TruncMonth('start_time'),
            year=dbfunc.TruncYear('start_time'),
        )
        result = OrderedDict()
        result['day'] = self._get_history_stats_by(qs, 'day')
        result['month'] = self._get_history_stats_by(qs, 'month')
        result['year'] = self._get_history_stats_by(qs, 'year')
        return result 
Example #3
Source File: profile.py    From eoj3 with MIT License 5 votes vote down vote up
def get_heatmap_data(self):
    now = datetime.now()
    one_year_ago = now.replace(year=now.year - 1, hour=0, minute=0, second=0, microsecond=0)
    query_ret = self.user.submission_set.filter(create_time__gte=one_year_ago, visible=True). \
      annotate(date=TruncDay('create_time')).values("date").order_by("date").annotate(count=Count('id'))
    min_date = one_year_ago - timedelta(days=1)
    stat_dict = dict()
    last_week_set, last_day_set = set(), set()
    while min_date < now:
      stat_dict[min_date] = 0
      last_day = calendar.monthrange(min_date.year, min_date.month)[1]
      if min_date.day > last_day - 7:
        last_week_set.add(min_date)
      if min_date.day > last_day - 1:
        last_day_set.add(min_date)
      min_date += timedelta(days=1)
    for r in query_ret:
      stat_dict[r["date"]] += r["count"]

    ret = []
    week_number_dict = dict()
    week_to_month = dict()
    for r in sorted(stat_dict.items(), key=lambda r: r[0]):
      year, weeknumber, weekday = r[0].isocalendar()
      if (year, weeknumber) not in week_number_dict:
        sz = len(week_number_dict)
        week_number_dict[(year, weeknumber)] = sz + 1
      weeknumber = week_number_dict[(year, weeknumber)]
      if r[0].day == 10:
        week_to_month[weeknumber] = r[0].month
      ret.append({"date": r[0].strftime("%Y-%m-%d"),
                  "lastWeek": r[0] in last_week_set, "lastDay": r[0] in last_day_set,
                  "year": year, "week": weeknumber, "day": weekday - 1, "submissions": r[1]})
    return json.dumps(ret), json.dumps(week_to_month) 
Example #4
Source File: views.py    From freesound-datasets with GNU Affero General Public License v3.0 4 votes vote down vote up
def monitor_user(request, short_name, user_id):
    dataset = get_object_or_404(Dataset, short_name=short_name)
    if not dataset.user_is_maintainer(request.user):
        return HttpResponseRedirect(reverse('dataset', args=[dataset.short_name]))
    user = get_object_or_404(User, id=user_id)
    contribs = list(user.votes.filter(candidate_annotation__sound_dataset__dataset=dataset)
                        .filter(from_expert=False)
                        .annotate(day=TruncDay('created_at'))
                        .order_by("-day")
                        .values('day').annotate(count=Count('id'))
                        .values_list('day', 'count', 'candidate_annotation__taxonomy_node__name'))
    contribs_failed = list(user.votes.filter(candidate_annotation__sound_dataset__dataset=dataset)
                               .filter(test='FA')
                               .annotate(day=TruncDay('created_at'))
                               .order_by("-day")
                               .values('day').annotate(count=Count('id'))
                               .values_list('day', 'count', 'candidate_annotation__taxonomy_node__name'))
    contribs_curation_task = list(user.votes.filter(candidate_annotation__sound_dataset__dataset=dataset)
                                      .filter(from_expert=True)
                                      .annotate(day=TruncDay('created_at'))
                                      .order_by("-day")
                                      .values('day').annotate(count=Count('id'))
                                      .values_list('day', 'count', 'candidate_annotation__taxonomy_node__name', 'vote'))

    if contribs:
        contribs[0] += ('g',)
        for idx, contrib in enumerate(contribs):
            if idx>0:
                if contrib[0] == contribs[idx-1][0]:
                    contribs[idx] += (contribs[idx-1][3],)
                else:
                    contribs[idx] += ('g',) if contribs[idx-1][3] == 'w' else ('w',)

    if contribs_curation_task:
        contribs_curation_task[0] += ('g',)
        for idx, contrib in enumerate(contribs_curation_task):
            if idx>0:
                if contrib[0] == contribs_curation_task[idx-1][0]:
                    contribs_curation_task[idx] += (contribs_curation_task[idx-1][4],)
                else:
                    contribs_curation_task[idx] += ('g',) if contribs_curation_task[idx-1][4] == 'w' else ('w',)

    if contribs_failed:
        contribs_failed[0] += ('g',)
        for idx, contrib in enumerate(contribs_failed):
            if idx>0:
                if contrib[0] == contribs_failed[idx-1][0]:
                    contribs_failed[idx] += (contribs_failed[idx-1][3],)
                else:
                    contribs_failed[idx] += ('g',) if contribs_failed[idx-1][3] == 'w' else ('w',)

    return render(request, 'monitor/monitor_user.html', {'dataset': dataset,
                                                         'username': user.username,
                                                         'contribs': contribs,
                                                         'contribs_curation': contribs_curation_task,
                                                         'contribs_failed': contribs_failed}) 
Example #5
Source File: tasks.py    From freesound-datasets with GNU Affero General Public License v3.0 4 votes vote down vote up
def compute_dataset_num_contributions_per_day(store_key, dataset_id):
    logger.info('Start computing data for {0}'.format(store_key))
    try:
        dataset = Dataset.objects.get(id=dataset_id)

        contributions = Vote.objects\
            .filter(candidate_annotation__sound_dataset__dataset=dataset)\
            .filter(from_expert=False)\
            .annotate(day=TruncDay('created_at'))\
            .values('day')\
            .annotate(count=Count('id'))\
            .values('day', 'count')

        contributions_expert = Vote.objects\
            .filter(candidate_annotation__sound_dataset__dataset=dataset)\
            .filter(from_expert=True)\
            .annotate(day=TruncDay('created_at'))\
            .values('day')\
            .annotate(count=Count('id'))\
            .values('day', 'count')

        start_date = Vote.objects\
            .filter(candidate_annotation__sound_dataset__dataset=dataset)\
            .order_by('created_at')[0].created_at.replace(tzinfo=None)
        end_date = datetime.datetime.now()
        dates = [str(start_date + datetime.timedelta(days=x))[:10] for x in range(0, (end_date - start_date).days)]

        contributions_per_day = {d: 0 for d in dates}
        contributions_per_day.update({str(o['day'])[:10]: o['count'] for o in contributions})

        contributions_per_day_expert = {d: 0 for d in dates}
        contributions_per_day_expert.update({str(o['day'])[:10]: o['count'] for o in contributions_expert})

        store.set(store_key, {
            'contribution_per_day': json.dumps([[day, count]
                                                for day, count in contributions_per_day.items()]),
            'contribution_per_day_expert': json.dumps([[day, count]
                                                      for day, count in contributions_per_day_expert.items()])
        })

        logger.info('Finished computing data for {0}'.format(store_key))

    except Dataset.DoesNotExist:
        pass 
Example #6
Source File: tasks.py    From freesound-datasets with GNU Affero General Public License v3.0 4 votes vote down vote up
def compute_dataset_num_ground_truth_per_day(store_key, dataset_id):
    logger.info('Start computing data for {0}'.format(store_key))
    try:
        dataset = Dataset.objects.get(id=dataset_id)

        num_ground_truth_not_from_propagation = GroundTruthAnnotation.objects\
            .filter(sound_dataset__dataset=dataset)\
            .filter(from_propagation=False)\
            .annotate(day=TruncDay('created_at'))\
            .values('day')\
            .annotate(count=Count('id'))\
            .values('day', 'count')

        num_ground_truth_from_propagation = GroundTruthAnnotation.objects\
            .filter(sound_dataset__dataset=dataset)\
            .filter(from_propagation=True)\
            .annotate(day=TruncDay('created_at'))\
            .values('day')\
            .annotate(count=Count('id'))\
            .values('day', 'count')

        start_date = GroundTruthAnnotation.objects\
            .filter(sound_dataset__dataset=dataset)\
            .order_by('created_at')[0].created_at.replace(tzinfo=None)
        end_date = datetime.datetime.now()
        dates = [str(start_date + datetime.timedelta(days=x))[:10] for x in range(0, (end_date - start_date).days)]

        num_ground_truth_not_from_propagation_per_day = {d: 0 for d in dates}
        num_ground_truth_not_from_propagation_per_day.update({str(o['day'])[:10]: o['count']
                                                              for o in num_ground_truth_not_from_propagation})
        num_ground_truth_from_propagation_per_day = {d: 0 for d in dates}
        num_ground_truth_from_propagation_per_day.update({str(o['day'])[:10]: o['count']
                                                          for o in num_ground_truth_from_propagation})

        store.set(store_key, {
            'num_ground_truth_not_from_propagation_per_day':
                json.dumps(sorted([[day, count]
                                   for day, count in num_ground_truth_not_from_propagation_per_day.items()],
                                  key=lambda x: x[0])),
            'num_ground_truth_from_propagation_per_day':
                json.dumps(sorted([[day, count]
                                   for day, count in num_ground_truth_from_propagation_per_day.items()],
                                  key=lambda x: x[0]))
        })

        logger.info('Finished computing data for {0}'.format(store_key))

    except Dataset.DoesNotExist:
        pass