Python numpy.timedelta64() Examples
The following are 30
code examples of numpy.timedelta64().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: test_carbonara.py From gnocchi with Apache License 2.0 | 6 votes |
def test_split(self): sampling = numpy.timedelta64(5, 's') points = 100000 ts = carbonara.TimeSerie.from_data( timestamps=list(map(datetime.datetime.utcfromtimestamp, six.moves.range(points))), values=list(six.moves.range(points))) agg = self._resample(ts, sampling, 'mean') grouped_points = list(agg.split()) self.assertEqual( math.ceil((points / sampling.astype(float)) / carbonara.SplitKey.POINTS_PER_SPLIT), len(grouped_points)) self.assertEqual("0.0", str(carbonara.SplitKey(grouped_points[0][0], 0))) # 3600 × 5s = 5 hours self.assertEqual(datetime64(1970, 1, 1, 5), grouped_points[1][0]) self.assertEqual(carbonara.SplitKey.POINTS_PER_SPLIT, len(grouped_points[0][1]))
Example #2
Source File: test_aggregates.py From gnocchi with Apache License 2.0 | 6 votes |
def test_binary_operator_ts_on_right(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) self.trigger_processing() values = processor.get_measures( self.storage, [processor.MetricReference(self.metric, "mean")], ["*", 2, ["metric", str(self.metric.id), "mean"]], granularities=[numpy.timedelta64(1, 'h')]) self.assertEqual({str(self.metric.id): { "mean": [(datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(1, 'h'), 138), (datetime64(2014, 1, 1, 13, 0, 0), numpy.timedelta64(1, 'h'), 84), (datetime64(2014, 1, 1, 14, 0, 0), numpy.timedelta64(1, 'h'), 8), (datetime64(2014, 1, 1, 15, 0, 0), numpy.timedelta64(1, 'h'), 88)] }}, values)
Example #3
Source File: test_carbonara.py From gnocchi with Apache License 2.0 | 6 votes |
def test_aggregation_std_with_unique(self): ts = carbonara.TimeSerie.from_data( [datetime64(2014, 1, 1, 12, 0, 0)], [3]) ts = self._resample(ts, numpy.timedelta64(60, 's'), 'std') self.assertEqual(0, len(ts), ts.values) ts = carbonara.TimeSerie.from_data( [datetime64(2014, 1, 1, 12, 0, 0), datetime64(2014, 1, 1, 12, 0, 4), datetime64(2014, 1, 1, 12, 0, 9), datetime64(2014, 1, 1, 12, 1, 6)], [3, 6, 5, 9]) ts = self._resample(ts, numpy.timedelta64(60, 's'), "std") self.assertEqual(1, len(ts)) self.assertEqual(1.5275252316519465, ts[datetime64(2014, 1, 1, 12, 0, 0)][1])
Example #4
Source File: test_carbonara.py From gnocchi with Apache License 2.0 | 6 votes |
def _do_test_aggregation(self, name, v1, v2, v3): # NOTE(gordc): test data must have a group of odd count to properly # test 50pct test case. ts = carbonara.TimeSerie.from_data( [datetime64(2014, 1, 1, 12, 0, 0), datetime64(2014, 1, 1, 12, 0, 10), datetime64(2014, 1, 1, 12, 0, 20), datetime64(2014, 1, 1, 12, 0, 30), datetime64(2014, 1, 1, 12, 0, 40), datetime64(2014, 1, 1, 12, 1, 0), datetime64(2014, 1, 1, 12, 1, 10), datetime64(2014, 1, 1, 12, 1, 20), datetime64(2014, 1, 1, 12, 1, 30), datetime64(2014, 1, 1, 12, 1, 40), datetime64(2014, 1, 1, 12, 1, 50), datetime64(2014, 1, 1, 12, 2, 0), datetime64(2014, 1, 1, 12, 2, 10)], [3, 5, 2, 3, 5, 8, 11, 22, 10, 42, 9, 4, 2]) ts = self._resample(ts, numpy.timedelta64(60, 's'), name) self.assertEqual(3, len(ts)) self.assertEqual(v1, ts[datetime64(2014, 1, 1, 12, 0, 0)][1]) self.assertEqual(v2, ts[datetime64(2014, 1, 1, 12, 1, 0)][1]) self.assertEqual(v3, ts[datetime64(2014, 1, 1, 12, 2, 0)][1])
Example #5
Source File: test_carbonara.py From gnocchi with Apache License 2.0 | 6 votes |
def test_derived_hole(self): ts = carbonara.TimeSerie.from_data( [datetime.datetime(2014, 1, 1, 12, 0, 0), datetime.datetime(2014, 1, 1, 12, 0, 4), datetime.datetime(2014, 1, 1, 12, 1, 2), datetime.datetime(2014, 1, 1, 12, 1, 14), datetime.datetime(2014, 1, 1, 12, 1, 24), datetime.datetime(2014, 1, 1, 12, 3, 2), datetime.datetime(2014, 1, 1, 12, 3, 22), datetime.datetime(2014, 1, 1, 12, 3, 42), datetime.datetime(2014, 1, 1, 12, 4, 9)], [50, 55, 65, 66, 70, 105, 108, 200, 202]) ts = self._resample(ts, numpy.timedelta64(60, 's'), 'last', derived=True) self.assertEqual(4, len(ts)) self.assertEqual( [(datetime64(2014, 1, 1, 12, 0, 0), 5), (datetime64(2014, 1, 1, 12, 1, 0), 4), (datetime64(2014, 1, 1, 12, 3, 0), 92), (datetime64(2014, 1, 1, 12, 4, 0), 2)], list(ts.fetch( from_timestamp=datetime64(2014, 1, 1, 12))))
Example #6
Source File: test_indexer.py From gnocchi with Apache License 2.0 | 6 votes |
def test_get_archive_policy(self): ap = self.index.get_archive_policy("low") self.assertEqual({ 'back_window': 0, 'aggregation_methods': set(self.conf.archive_policy.default_aggregation_methods), 'definition': [ {u'granularity': numpy.timedelta64(5, 'm'), u'points': 12, u'timespan': numpy.timedelta64(3600, 's')}, {u'granularity': numpy.timedelta64(3600, 's'), u'points': 24, u'timespan': numpy.timedelta64(86400, 's')}, {u'granularity': numpy.timedelta64(86400, 's'), u'points': 30, u'timespan': numpy.timedelta64(2592000, 's')}], 'name': u'low'}, dict(ap))
Example #7
Source File: test_carbonara.py From gnocchi with Apache License 2.0 | 6 votes |
def test_serialize(self): ts = {'sampling': numpy.timedelta64(500, 'ms'), 'agg': 'mean'} tsb = carbonara.BoundTimeSerie(block_size=ts['sampling']) tsb.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, 0, 0, 1234), 3), (datetime64(2014, 1, 1, 12, 0, 0, 321), 6), (datetime64(2014, 1, 1, 12, 1, 4, 234), 5), (datetime64(2014, 1, 1, 12, 1, 9, 32), 7), (datetime64(2014, 1, 1, 12, 2, 12, 532), 1)], dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) key = ts['return'].get_split_key() o, s = ts['return'].serialize(key) self.assertEqual(ts['return'], carbonara.AggregatedTimeSerie.unserialize( s, key, ts['return'].aggregation))
Example #8
Source File: test_carbonara.py From gnocchi with Apache License 2.0 | 6 votes |
def test_no_truncation(self): ts = {'sampling': numpy.timedelta64(60, 's'), 'agg': 'mean'} tsb = carbonara.BoundTimeSerie() for i in six.moves.range(1, 11): tsb.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, i, i), float(i))], dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) tsb.set_values(numpy.array([ (datetime64(2014, 1, 1, 12, i, i + 1), float(i + 1))], dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=ts)) self.assertEqual(i, len(list(ts['return'].fetch())))
Example #9
Source File: _dtype.py From recruit with Apache License 2.0 | 6 votes |
def _name_get(dtype): # provides dtype.name.__get__ if dtype.isbuiltin == 2: # user dtypes don't promise to do anything special return dtype.type.__name__ # Builtin classes are documented as returning a "bit name" name = dtype.type.__name__ # handle bool_, str_, etc if name[-1] == '_': name = name[:-1] # append bit counts to str, unicode, and void if np.issubdtype(dtype, np.flexible) and not _isunsized(dtype): name += "{}".format(dtype.itemsize * 8) # append metadata to datetimes elif dtype.type in (np.datetime64, np.timedelta64): name += _datetime_metadata_str(dtype) return name
Example #10
Source File: test_storage.py From gnocchi with Apache License 2.0 | 6 votes |
def test_get_splits_and_unserialize(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() aggregation = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(5, 'm')) results = self.storage._get_splits_and_unserialize({ self.metric: { aggregation: [ carbonara.SplitKey( numpy.datetime64(1387800000, 's'), numpy.timedelta64(5, 'm')), ], }, })[self.metric][aggregation] self.assertEqual(1, len(results)) self.assertIsInstance(results[0], carbonara.AggregatedTimeSerie) # Assert it's not empty one since corrupted self.assertGreater(len(results[0]), 0) self.assertEqual(results[0].aggregation, aggregation)
Example #11
Source File: carbonara.py From gnocchi with Apache License 2.0 | 6 votes |
def truncate(self, oldest_point=None): """Truncate the time series up to oldest_point excluded. :param oldest_point: Oldest point to keep from, this excluded. Default is the aggregation timespan. :type oldest_point: numpy.datetime64 or numpy.timedelta64 :return: The oldest point that could have been kept. """ last = self.last if last is None: return if oldest_point is None: oldest_point = self.aggregation.timespan if oldest_point is None: return if isinstance(oldest_point, numpy.timedelta64): oldest_point = last - oldest_point index = numpy.searchsorted(self.ts['timestamps'], oldest_point, side='right') self.ts = self.ts[index:] return oldest_point
Example #12
Source File: test_data_loader.py From aospy with Apache License 2.0 | 6 votes |
def test_maybe_apply_time_shift(data_loader, ds_with_time_bounds, ds_inst, var_name, generate_file_set_args): ds = xr.decode_cf(ds_with_time_bounds) da = ds[var_name] result = data_loader._maybe_apply_time_shift( da.copy(), **generate_file_set_args)[TIME_STR] assert result.identical(da[TIME_STR]) offset = data_loader._maybe_apply_time_shift( da.copy(), {'days': 1}, **generate_file_set_args) result = offset[TIME_STR] expected = da[TIME_STR] + np.timedelta64(1, 'D') expected[TIME_STR] = expected assert result.identical(expected)
Example #13
Source File: test_data_loader.py From aospy with Apache License 2.0 | 6 votes |
def test_maybe_apply_time_shift_inst(gfdl_data_loader, ds_inst, var_name, generate_file_set_args): ds_inst = xr.decode_cf(ds_inst) generate_file_set_args['dtype_in_time'] = 'inst' generate_file_set_args['intvl_in'] = '3hr' da = ds_inst[var_name] result = gfdl_data_loader._maybe_apply_time_shift( da.copy(), **generate_file_set_args)[TIME_STR] expected = da[TIME_STR] + np.timedelta64(-3, 'h') expected[TIME_STR] = expected assert result.identical(expected) generate_file_set_args['intvl_in'] = 'daily' da = ds_inst[var_name] result = gfdl_data_loader._maybe_apply_time_shift( da.copy(), **generate_file_set_args)[TIME_STR] expected = da[TIME_STR] expected[TIME_STR] = expected assert result.identical(expected)
Example #14
Source File: test_aggregates.py From gnocchi with Apache License 2.0 | 6 votes |
def test_aggregated_different_archive_no_overlap(self): tsc1 = {'sampling': numpy.timedelta64(60, 's'), 'size': 50, 'agg': 'mean', "name": "all"} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) tsc2 = {'sampling': numpy.timedelta64(60, 's'), 'size': 50, 'agg': 'mean', "name": "all"} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 11, 46, 4), 4)], dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values(numpy.array([(datetime64(2014, 1, 1, 9, 1, 4), 4)], dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc2)) dtfrom = datetime64(2014, 1, 1, 11, 0, 0) self.assertRaises(exceptions.UnAggregableTimeseries, processor.aggregated, [tsc1['return'], tsc2['return']], from_timestamp=dtfrom, operations=["aggregate", "mean", [ "metric", ["all", "mean"]]])
Example #15
Source File: test_carbonara.py From gnocchi with Apache License 2.0 | 6 votes |
def test_split_key(self): self.assertEqual( numpy.datetime64("2014-10-07"), carbonara.SplitKey.from_timestamp_and_sampling( numpy.datetime64("2015-01-01T15:03"), numpy.timedelta64(3600, 's'))) self.assertEqual( numpy.datetime64("2014-12-31 18:00"), carbonara.SplitKey.from_timestamp_and_sampling( numpy.datetime64("2015-01-01 15:03:58"), numpy.timedelta64(58, 's'))) key = carbonara.SplitKey.from_timestamp_and_sampling( numpy.datetime64("2015-01-01 15:03"), numpy.timedelta64(3600, 's')) self.assertGreater(key, numpy.datetime64("1970")) self.assertGreaterEqual(key, numpy.datetime64("1970"))
Example #16
Source File: carbonara.py From gnocchi with Apache License 2.0 | 6 votes |
def __init__(self, ts, granularity, start=None): # NOTE(sileht): The whole class assumes ts is ordered and don't have # duplicate timestamps, it uses numpy.unique that sorted list, but # we always assume the orderd to be the same as the input. self.granularity = granularity self.can_derive = isinstance(granularity, numpy.timedelta64) self.start = start if start is None: self._ts = ts self._ts_for_derive = ts else: self._ts = ts[numpy.searchsorted(ts['timestamps'], start):] if self.can_derive: start_derive = start - granularity self._ts_for_derive = ts[ numpy.searchsorted(ts['timestamps'], start_derive): ] if self.can_derive: self.indexes = round_timestamp(self._ts['timestamps'], granularity) elif calendar.GROUPINGS.get(granularity): self.indexes = calendar.GROUPINGS.get(granularity)( self._ts['timestamps']) self.tstamps, self.counts = numpy.unique(self.indexes, return_counts=True)
Example #17
Source File: test_aggregates.py From gnocchi with Apache License 2.0 | 6 votes |
def test_aggregated_different_archive_no_overlap2(self): tsc1 = {'sampling': numpy.timedelta64(60, 's'), 'size': 50, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) tsc2 = carbonara.AggregatedTimeSerie( carbonara.Aggregation('mean', numpy.timedelta64(60, 's'), None)) tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 3, 0), 4)], dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) metric = mock.Mock(id=str(uuid.uuid4())) ref = processor.MetricReference(metric, "mean") self.assertRaises(exceptions.UnAggregableTimeseries, processor.aggregated, [tsc1['return'], (ref, tsc2)], operations=["aggregate", "mean", ["metric", tsc1['return'][0].lookup_key, ref.lookup_key]])
Example #18
Source File: test_storage.py From gnocchi with Apache License 2.0 | 6 votes |
def test_corrupted_split(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() aggregation = self.metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(5, 'm')) with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize', side_effect=carbonara.InvalidData()): results = self.storage._get_splits_and_unserialize({ self.metric: { aggregation: [ carbonara.SplitKey( numpy.datetime64(1387800000, 's'), numpy.timedelta64(5, 'm')) ], }, })[self.metric][aggregation] self.assertEqual(1, len(results)) self.assertIsInstance(results[0], carbonara.AggregatedTimeSerie) # Assert it's an empty one since corrupted self.assertEqual(0, len(results[0])) self.assertEqual(results[0].aggregation, aggregation)
Example #19
Source File: test_aggregates.py From gnocchi with Apache License 2.0 | 5 votes |
def test_add_and_get_measures_with_holes(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 5, 31), 8), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 42), ]) self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 2), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 6), incoming.Measure(datetime64(2014, 1, 1, 12, 13, 10), 2), ]) self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, [processor.MetricReference(self.metric, 'mean'), processor.MetricReference(metric2, 'mean')], operations=["aggregate", "mean", [ "metric", [str(self.metric.id), "mean"], [str(metric2.id), "mean"], ]])["aggregated"] self.assertEqual([ (datetime64(2014, 1, 1, 0, 0, 0), numpy.timedelta64(1, 'D'), 18.875), (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(1, 'h'), 18.875), (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 'm'), 39.0), (datetime64(2014, 1, 1, 12, 5, 0), numpy.timedelta64(5, 'm'), 11.0), (datetime64(2014, 1, 1, 12, 10, 0), numpy.timedelta64(5, 'm'), 22.0) ], values)
Example #20
Source File: test_aggregates.py From gnocchi with Apache License 2.0 | 5 votes |
def test_resample(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), ]) self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, [processor.MetricReference(self.metric, "mean"), processor.MetricReference(metric2, "mean")], ["resample", "mean", numpy.timedelta64(1, 'D'), ["metric", [str(self.metric.id), "mean"], [str(metric2.id), "mean"]]], granularities=[numpy.timedelta64(1, 'h')]) self.assertEqual({ str(self.metric.id): { "mean": [(datetime64(2014, 1, 1, 0, 0, 0), numpy.timedelta64(1, 'D'), 39.75)] }, str(metric2.id): { "mean": [(datetime64(2014, 1, 1, 0, 0, 0), numpy.timedelta64(1, 'D'), 4.75)] } }, values)
Example #21
Source File: test_aggregates.py From gnocchi with Apache License 2.0 | 5 votes |
def test_resample_minus_2_on_right(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), ]) self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, [processor.MetricReference(self.metric, "mean"), processor.MetricReference(metric2, "mean")], ["-", ["resample", "mean", numpy.timedelta64(1, 'D'), ["metric", [str(self.metric.id), "mean"], [str(metric2.id), "mean"]]], 2], granularities=[numpy.timedelta64(1, 'h')]) self.assertEqual({ str(self.metric.id): { "mean": [(datetime64(2014, 1, 1, 0, 0, 0), numpy.timedelta64(1, 'D'), 37.75)] }, str(metric2.id): { "mean": [(datetime64(2014, 1, 1, 0, 0, 0), numpy.timedelta64(1, 'D'), 2.75)] } }, values)
Example #22
Source File: test_aggregates.py From gnocchi with Apache License 2.0 | 5 votes |
def test_resample_minus_2_on_left(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) self.incoming.add_measures(metric2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 41), 2), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 10), 4), ]) self.trigger_processing([self.metric, metric2]) values = processor.get_measures( self.storage, [processor.MetricReference(self.metric, "mean"), processor.MetricReference(metric2, "mean")], ["-", 2, ["resample", "mean", numpy.timedelta64(1, 'D'), ["metric", [str(self.metric.id), "mean"], [str(metric2.id), "mean"]]]], granularities=[numpy.timedelta64(1, 'h')]) self.assertEqual({ str(self.metric.id): { "mean": [(datetime64(2014, 1, 1, 0, 0, 0), numpy.timedelta64(1, 'D'), -37.75)] }, str(metric2.id): { "mean": [(datetime64(2014, 1, 1, 0, 0, 0), numpy.timedelta64(1, 'D'), -2.75)] } }, values)
Example #23
Source File: test_aggregates.py From gnocchi with Apache License 2.0 | 5 votes |
def test_binary_operator_ts_on_left(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) self.trigger_processing() values = processor.get_measures( self.storage, [processor.MetricReference(self.metric, "mean")], ["*", ["metric", str(self.metric.id), "mean"], 2], granularities=[numpy.timedelta64(1, 'h')]) self.assertEqual({str(self.metric.id): { "mean": [ (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(1, 'h'), 138), (datetime64(2014, 1, 1, 13, 0, 0), numpy.timedelta64(1, 'h'), 84), (datetime64(2014, 1, 1, 14, 0, 0), numpy.timedelta64(1, 'h'), 8), (datetime64(2014, 1, 1, 15, 0, 0), numpy.timedelta64(1, 'h'), 88)] }}, values)
Example #24
Source File: test_carbonara.py From gnocchi with Apache License 2.0 | 5 votes |
def test_resample(self): ts = carbonara.TimeSerie.from_data( [datetime64(2014, 1, 1, 12, 0, 0), datetime64(2014, 1, 1, 12, 0, 4), datetime64(2014, 1, 1, 12, 0, 9), datetime64(2014, 1, 1, 12, 0, 11), datetime64(2014, 1, 1, 12, 0, 12)], [3, 5, 6, 2, 4]) agg_ts = self._resample(ts, numpy.timedelta64(5, 's'), 'mean') self.assertEqual(3, len(agg_ts)) agg_ts = agg_ts.resample(numpy.timedelta64(10, 's')) self.assertEqual(2, len(agg_ts)) self.assertEqual(5, agg_ts[0][1]) self.assertEqual(3, agg_ts[1][1])
Example #25
Source File: test_aggregates.py From gnocchi with Apache License 2.0 | 5 votes |
def test_ternary_operator_clip_min_max_ts_on_left(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 13, 1, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 14, 2, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 15, 3, 45), 44), ]) self.trigger_processing() values = processor.get_measures( self.storage, [processor.MetricReference(self.metric, "mean")], ["clip", ["metric", str(self.metric.id), "mean"], 5, 60], granularities=[numpy.timedelta64(1, 'h')]) self.assertEqual({str(self.metric.id): { "mean": [ (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(1, 'h'), 60), (datetime64(2014, 1, 1, 13, 0, 0), numpy.timedelta64(1, 'h'), 42), (datetime64(2014, 1, 1, 14, 0, 0), numpy.timedelta64(1, 'h'), 5), (datetime64(2014, 1, 1, 15, 0, 0), numpy.timedelta64(1, 'h'), 44)] }}, values)
Example #26
Source File: test_aggregates.py From gnocchi with Apache License 2.0 | 5 votes |
def test_aggregated_different_archive_overlap_edge_missing2(self): tsc1 = {'sampling': numpy.timedelta64(60, 's'), 'size': 10, 'agg': 'mean'} tsb1 = carbonara.BoundTimeSerie(block_size=tsc1['sampling']) tsc2 = {'sampling': numpy.timedelta64(60, 's'), 'size': 10, 'agg': 'mean'} tsb2 = carbonara.BoundTimeSerie(block_size=tsc2['sampling']) tsb1.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 3, 0), 4)], dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc1)) tsb2.set_values(numpy.array([(datetime64(2014, 1, 1, 11, 0, 0), 4), (datetime64(2014, 1, 1, 12, 3, 0), 4)], dtype=carbonara.TIMESERIES_ARRAY_DTYPE), before_truncate_callback=functools.partial( self._resample_and_merge, agg_dict=tsc2)) output = processor.aggregated( [tsc1['return'], tsc2['return']], operations=["aggregate", "mean", [ "metric", tsc1['return'][0].lookup_key, tsc2['return'][0].lookup_key ]])["aggregated"] self.assertEqual([ (datetime64( 2014, 1, 1, 12, 3, 0 ), numpy.timedelta64(60000000000, 'ns'), 4.0), ], list(output))
Example #27
Source File: test_archive_policy.py From gnocchi with Apache License 2.0 | 5 votes |
def test_max_block_size(self): ap = archive_policy.ArchivePolicy("foobar", 0, [(20, 60), (10, 300), (10, 5)], ["-mean", "-last"]) self.assertEqual(ap.max_block_size, numpy.timedelta64(300, 's'))
Example #28
Source File: test_arrayprint.py From recruit with Apache License 2.0 | 5 votes |
def test_0d_arrays(self): unicode = type(u'') assert_equal(unicode(np.array(u'café', '<U4')), u'café') if sys.version_info[0] >= 3: assert_equal(repr(np.array('café', '<U4')), "array('café', dtype='<U4')") else: assert_equal(repr(np.array(u'café', '<U4')), "array(u'caf\\xe9', dtype='<U4')") assert_equal(str(np.array('test', np.str_)), 'test') a = np.zeros(1, dtype=[('a', '<i4', (3,))]) assert_equal(str(a[0]), '([0, 0, 0],)') assert_equal(repr(np.datetime64('2005-02-25')[...]), "array('2005-02-25', dtype='datetime64[D]')") assert_equal(repr(np.timedelta64('10', 'Y')[...]), "array(10, dtype='timedelta64[Y]')") # repr of 0d arrays is affected by printoptions x = np.array(1) np.set_printoptions(formatter={'all':lambda x: "test"}) assert_equal(repr(x), "array(test)") # str is unaffected assert_equal(str(x), "1") # check `style` arg raises assert_warns(DeprecationWarning, np.array2string, np.array(1.), style=repr) # but not in legacy mode np.array2string(np.array(1.), style=repr, legacy='1.13') # gh-10934 style was broken in legacy mode, check it works np.array2string(np.array(1.), legacy='1.13')
Example #29
Source File: test_deprecations.py From recruit with Apache License 2.0 | 5 votes |
def test_3_tuple(self): for cls in (np.datetime64, np.timedelta64): # two valid uses - (unit, num) and (unit, num, den, None) self.assert_not_deprecated(cls, args=(1, ('ms', 2))) self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None))) # trying to use the event argument, removed in 1.7.0, is deprecated # it used to be a uint8 self.assert_deprecated(cls, args=(1, ('ms', 2, 'event'))) self.assert_deprecated(cls, args=(1, ('ms', 2, 63))) self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event'))) self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
Example #30
Source File: test_datetime.py From recruit with Apache License 2.0 | 5 votes |
def test_compare_generic_nat(self): # regression tests for gh-6452 assert_(np.datetime64('NaT') != np.datetime64('2000') + np.timedelta64('NaT')) assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us')) assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))