Python numpy.ediff1d() Examples
The following are 30
code examples of numpy.ediff1d().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: test_arraysetops.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_ediff1d(self): zero_elem = np.array([]) one_elem = np.array([1]) two_elem = np.array([1, 2]) assert_array_equal([], ediff1d(zero_elem)) assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) assert_array_equal([0], ediff1d(zero_elem, to_end=0)) assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) assert_array_equal([], ediff1d(one_elem)) assert_array_equal([1], ediff1d(two_elem)) assert_array_equal([7,1,9], ediff1d(two_elem, to_begin=7, to_end=9)) assert_array_equal([5,6,1,7,8], ediff1d(two_elem, to_begin=[5,6], to_end=[7,8])) assert_array_equal([1,9], ediff1d(two_elem, to_end=9)) assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8])) assert_array_equal([7,1], ediff1d(two_elem, to_begin=7)) assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
Example #2
Source File: test_arraysetops.py From recruit with Apache License 2.0 | 6 votes |
def test_ediff1d(self): zero_elem = np.array([]) one_elem = np.array([1]) two_elem = np.array([1, 2]) assert_array_equal([], ediff1d(zero_elem)) assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) assert_array_equal([0], ediff1d(zero_elem, to_end=0)) assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) assert_array_equal([], ediff1d(one_elem)) assert_array_equal([1], ediff1d(two_elem)) assert_array_equal([7,1,9], ediff1d(two_elem, to_begin=7, to_end=9)) assert_array_equal([5,6,1,7,8], ediff1d(two_elem, to_begin=[5,6], to_end=[7,8])) assert_array_equal([1,9], ediff1d(two_elem, to_end=9)) assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8])) assert_array_equal([7,1], ediff1d(two_elem, to_begin=7)) assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
Example #3
Source File: ljcluster.py From PyChemia with MIT License | 6 votes |
def get_duplicates(self, ids, tolerance=None, fast=True): ret = {} selection = self.ids_sorted(ids) values = np.array([self.value(i) for i in selection]) if len(values) == 0: return ret diffs = np.ediff1d(values) for i in range(len(diffs)): idiff = diffs[i] if idiff < self.value_tol: ident1 = selection[i] ident2 = selection[i + 1] pcm_log.debug('Testing distances between %s and %s' % (str(ident1), str(ident2))) distance = self.distance(ident1, ident2) if distance < self.distance_tolerance: pcm_log.debug('Distance %7.3f < %7.3f' % (distance, self.distance_tolerance)) ret[ident2] = ident1 if len(ret) > 0: pcm_log.debug('Number of duplicates %d' % len(ret)) return ret
Example #4
Source File: MatrixFactorization_Cython.py From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 | 6 votes |
def _estimate_user_factors(self, ITEM_factors_Y): profile_length = np.ediff1d(self.URM_train.indptr) profile_length_sqrt = np.sqrt(profile_length) # Estimating the USER_factors using ITEM_factors_Y if self.verbose: print("{}: Estimating user factors... ".format(self.algorithm_name)) USER_factors = self.URM_train.dot(ITEM_factors_Y) #Divide every row for the sqrt of the profile length for user_index in range(self.n_users): if profile_length_sqrt[user_index] > 0: USER_factors[user_index, :] /= profile_length_sqrt[user_index] if self.verbose: print("{}: Estimating user factors... done!".format(self.algorithm_name)) return USER_factors
Example #5
Source File: test_arraysetops.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 6 votes |
def test_ediff1d(self): zero_elem = np.array([]) one_elem = np.array([1]) two_elem = np.array([1, 2]) assert_array_equal([], ediff1d(zero_elem)) assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) assert_array_equal([0], ediff1d(zero_elem, to_end=0)) assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) assert_array_equal([], ediff1d(one_elem)) assert_array_equal([1], ediff1d(two_elem)) assert_array_equal([7,1,9], ediff1d(two_elem, to_begin=7, to_end=9)) assert_array_equal([5,6,1,7,8], ediff1d(two_elem, to_begin=[5,6], to_end=[7,8])) assert_array_equal([1,9], ediff1d(two_elem, to_end=9)) assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8])) assert_array_equal([7,1], ediff1d(two_elem, to_begin=7)) assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
Example #6
Source File: SpectralCF_RecommenderWrapper.py From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 | 6 votes |
def __init__(self, URM_train, batch_size): self.batch_size = batch_size URM_train = sps.csr_matrix(URM_train) self.n_users, self.n_items = URM_train.shape self.R = np.zeros((self.n_users, self.n_items), dtype=np.float32) self._users_with_interactions = np.ediff1d(URM_train.indptr)>=1 self._users_with_interactions = np.arange(self.n_users, dtype=np.int64)[self._users_with_interactions] self._users_with_interactions = list(self._users_with_interactions) self.train_items, self.test_set = {}, {} for user_index in range(self.n_users): start_pos = URM_train.indptr[user_index] end_pos = URM_train.indptr[user_index+1] train_items = URM_train.indices[start_pos:end_pos] self.R[user_index][train_items] = 1 self.train_items[user_index] = list(train_items)
Example #7
Source File: test_base_execute.py From mars with Apache License 2.0 | 6 votes |
def testEdiff1d(self): data = np.array([1, 2, 4, 7, 0]) x = tensor(data, chunk_size=2) t = ediff1d(x) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.ediff1d(data) np.testing.assert_equal(res, expected) to_begin = tensor(-99, chunk_size=2) to_end = tensor([88, 99], chunk_size=2) t = ediff1d(x, to_begin=to_begin, to_end=to_end) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.ediff1d(data, to_begin=-99, to_end=np.array([88, 99])) np.testing.assert_equal(res, expected) data = [[1, 2, 4], [1, 6, 24]] t = ediff1d(tensor(data, chunk_size=2)) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.ediff1d(data) np.testing.assert_equal(res, expected)
Example #8
Source File: dynamics.py From pyMeteo with BSD 3-Clause "New" or "Revised" License | 6 votes |
def integral_dt(i, t): n = len(t) iavg = np.empty((n-1), np.float32) dt = np.empty((n-1), np.float32) iavg[0:n-1] = 0.5 * (i[0:n-1] + i[1:n]) dt = np.ediff1d(t) integral = np.sum(iavg * dt) return integral # helper functions
Example #9
Source File: recom_most_pop.py From cornac with Apache License 2.0 | 6 votes |
def fit(self, train_set, val_set=None): """Fit the model to observations. Parameters ---------- train_set: :obj:`cornac.data.Dataset`, required User-Item preference data as well as additional modalities. val_set: :obj:`cornac.data.Dataset`, optional, default: None User-Item preference data for model selection purposes (e.g., early stopping). Returns ------- self : object """ Recommender.fit(self, train_set, val_set) self.item_pop = np.ediff1d(train_set.csc_matrix.indptr) return self
Example #10
Source File: run_IJCAI_17_DMF.py From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 | 6 votes |
def cold_items_statistics(URM_train, URM_validation, URM_test, URM_test_negative): # Cold items experiment import scipy.sparse as sps URM_train_validation = URM_train + URM_validation n_users, n_items = URM_train_validation.shape item_in_train_flag = np.ediff1d(sps.csc_matrix(URM_train_validation).indptr) > 0 item_in_test_flag = np.ediff1d(sps.csc_matrix(URM_test).indptr) > 0 test_item_not_in_train_flag = np.logical_and(item_in_test_flag, np.logical_not(item_in_train_flag)) test_item_in_train_flag = np.logical_and(item_in_test_flag, item_in_train_flag) print("The test data contains {} unique items, {} ({:.2f} %) of them never appear in train data".format( item_in_test_flag.sum(), test_item_not_in_train_flag.sum(), test_item_not_in_train_flag.sum()/item_in_test_flag.sum()*100, ))
Example #11
Source File: test_arraysetops.py From coffeegrindsize with MIT License | 6 votes |
def test_ediff1d(self): zero_elem = np.array([]) one_elem = np.array([1]) two_elem = np.array([1, 2]) assert_array_equal([], ediff1d(zero_elem)) assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) assert_array_equal([0], ediff1d(zero_elem, to_end=0)) assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) assert_array_equal([], ediff1d(one_elem)) assert_array_equal([1], ediff1d(two_elem)) assert_array_equal([7,1,9], ediff1d(two_elem, to_begin=7, to_end=9)) assert_array_equal([5,6,1,7,8], ediff1d(two_elem, to_begin=[5,6], to_end=[7,8])) assert_array_equal([1,9], ediff1d(two_elem, to_end=9)) assert_array_equal([1,7,8], ediff1d(two_elem, to_end=[7,8])) assert_array_equal([7,1], ediff1d(two_elem, to_begin=7)) assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
Example #12
Source File: quantity.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def ediff1d(self, to_end=None, to_begin=None): return self._wrap_function(np.ediff1d, to_end, to_begin)
Example #13
Source File: test_interaction.py From twitter-stock-recommendation with MIT License | 5 votes |
def test_ediff1d_matrix(): # 2018-04-29: moved here from core.tests.test_arraysetops. assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix)) assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix))
Example #14
Source File: test_interaction.py From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License | 5 votes |
def test_ediff1d_matrix(): # 2018-04-29: moved here from core.tests.test_arraysetops. assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix)) assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix))
Example #15
Source File: __init__.py From CO2MPAS-TA with European Union Public License 1.1 | 5 votes |
def calculate_gear_shifts(gears): """ Returns when there is a gear shifting [-]. :param gears: Gear vector [-]. :type gears: numpy.array :return: When there is a gear shifting [-]. :rtype: numpy.array """ return np.ediff1d(gears, to_begin=[0]) != 0
Example #16
Source File: _thermal.py From CO2MPAS-TA with European Union Public License 1.1 | 5 votes |
def __call__(self, times, on_engine, velocities, engine_speeds_out, accelerations, initial_temperature=23, max_temp=100.0): t, temp = initial_temperature, np.zeros_like(times, dtype=float) it = enumerate(zip( np.ediff1d(times, to_begin=0), on_engine, velocities, accelerations, engine_speeds_out, )) x, t0, hot = np.array([[.0] * 5]), self.thermostat + self.ntemp, False for i, (dt, b, v, a, s) in it: hot |= t > self.thermostat x[:] = v, t0 - t, hot, s, a t += (self.on(x[:, 1:]) if b else self.off(x[:, :2])) * dt temp[i] = t = min(t, max_temp) return temp
Example #17
Source File: test_quantity_non_ufuncs.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_ediff1d(self): # ediff1d works always as it calls the Quantity method. self.check1(np.ediff1d) x = np.arange(10.) * u.m out = np.ediff1d(x, to_begin=-12.5*u.cm, to_end=1*u.km) expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.) * x.unit assert_array_equal(out, expected)
Example #18
Source File: test_interaction.py From coffeegrindsize with MIT License | 5 votes |
def test_ediff1d_matrix(): # 2018-04-29: moved here from core.tests.test_arraysetops. assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix)) assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix))
Example #19
Source File: logarithmic.py From Carnets with BSD 3-Clause "New" or "Revised" License | 5 votes |
def ediff1d(self, to_end=None, to_begin=None): return self._wrap_function(np.ediff1d, to_end, to_begin, unit=self.unit._copy(dimensionless_unscaled))
Example #20
Source File: premade_lib.py From lattice with Apache License 2.0 | 5 votes |
def build_output_calibration_layer(output_calibration_input, model_config, dtype): """Creates a monotonic output calibration layer with inputs range [0, 1]. Args: output_calibration_input: Input to the output calibration layer. model_config: Model configuration object describing model architecture. Should be one of the model configs in `tfl.configs`. dtype: dtype Returns: A `tfl.layers.PWLCalibration` instance. """ # kernel format: bias followed by diffs between consecutive keypoint outputs. kernel_init_values = np.ediff1d( model_config.output_initialization, to_begin=model_config.output_initialization[0]) input_keypoints = np.linspace(0.0, 1.0, num=len(kernel_init_values)) kernel_initializer = tf.keras.initializers.Constant(kernel_init_values) kernel_regularizer = _output_calibration_regularizers(model_config) return pwl_calibration_layer.PWLCalibration( input_keypoints=input_keypoints, output_min=model_config.output_min, output_max=model_config.output_max, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, monotonicity=1, dtype=dtype, name=OUTPUT_CALIB_LAYER_NAME)( output_calibration_input)
Example #21
Source File: BaseRecommender.py From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 | 5 votes |
def set_URM_train(self, URM_train_new, **kwargs): assert self.URM_train.shape == URM_train_new.shape, "{}: set_URM_train old and new URM train have different shapes".format(self.RECOMMENDER_NAME) if len(kwargs)>0: self._print("set_URM_train keyword arguments not supported for this recommender class. Received: {}".format(kwargs)) self.URM_train = check_matrix(URM_train_new.copy(), 'csr', dtype=np.float32) self.URM_train.eliminate_zeros() self._cold_user_mask = np.ediff1d(self.URM_train.indptr) == 0 if self._cold_user_mask.any(): self._print("Detected {} ({:.2f} %) cold users.".format( self._cold_user_mask.sum(), self._cold_user_mask.sum()/len(self._cold_user_mask)*100))
Example #22
Source File: rebin.py From PCWG with MIT License | 5 votes |
def midpoints(xx): """Return midpoints of edges in xx.""" return xx[:-1] + 0.5*np.ediff1d(xx)
Example #23
Source File: metrics.py From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 | 5 votes |
def __init__(self, URM_train): super(Novelty, self).__init__() URM_train = sps.csc_matrix(URM_train) URM_train.eliminate_zeros() self.item_popularity = np.ediff1d(URM_train.indptr) self.novelty = 0.0 self.n_evaluated_users = 0 self.n_items = len(self.item_popularity) self.n_interactions = self.item_popularity.sum()
Example #24
Source File: BaseCBFRecommender.py From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 | 5 votes |
def __init__(self, URM_train, UCM_train, verbose = True): super(BaseUserCBFRecommender, self).__init__(URM_train, verbose = verbose) assert self.n_users == UCM_train.shape[0], "{}: URM_train has {} users but UCM_train has {}".format(self.RECOMMENDER_NAME, self.n_items, UCM_train.shape[0]) self.UCM_train = check_matrix(UCM_train.copy(), 'csr', dtype=np.float32) self.UCM_train.eliminate_zeros() _, self.n_features = self.UCM_train.shape self._cold_user_CBF_mask = np.ediff1d(self.UCM_train.indptr) == 0 if self._cold_user_CBF_mask.any(): print("{}: UCM Detected {} ({:.2f} %) cold users.".format( self.RECOMMENDER_NAME, self._cold_user_CBF_mask.sum(), self._cold_user_CBF_mask.sum()/self.n_users*100))
Example #25
Source File: DataReader_utils.py From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 | 5 votes |
def remove_features(ICM, min_occurrence = 5, max_percentage_occurrence = 0.30, reconcile_mapper = None): """ The function eliminates the values associated to feature occurring in less than the minimal percentage of items or more then the max. Shape of ICM is reduced deleting features. :param ICM: :param minPercOccurrence: :param max_percentage_occurrence: :param reconcile_mapper: DICT mapper [token] -> index :return: ICM :return: deletedFeatures :return: DICT mapper [token] -> index """ ICM = check_matrix(ICM, 'csc') n_items = ICM.shape[0] cols = ICM.indptr numOccurrences = np.ediff1d(cols) feature_mask = np.logical_and(numOccurrences >= min_occurrence, numOccurrences <= n_items * max_percentage_occurrence) ICM = ICM[:,feature_mask] deletedFeatures = np.arange(0, len(feature_mask))[np.logical_not(feature_mask)] print("RemoveFeatures: removed {} features with less then {} occurrences, removed {} features with more than {} occurrencies".format( sum(numOccurrences < min_occurrence), min_occurrence, sum(numOccurrences > n_items * max_percentage_occurrence), int(n_items * max_percentage_occurrence) )) if reconcile_mapper is not None: reconcile_mapper = reconcile_mapper_with_removed_tokens(reconcile_mapper, deletedFeatures) return ICM, deletedFeatures, reconcile_mapper return ICM, deletedFeatures
Example #26
Source File: DataReader_utils.py From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 | 5 votes |
def remove_empty_rows_and_cols(URM, ICM = None): URM = check_matrix(URM, "csr") rows = URM.indptr numRatings = np.ediff1d(rows) user_mask = numRatings >= 1 URM = URM[user_mask,:] cols = URM.tocsc().indptr numRatings = np.ediff1d(cols) item_mask = numRatings >= 1 URM = URM[:,item_mask] removedUsers = np.arange(0, len(user_mask))[np.logical_not(user_mask)] removedItems = np.arange(0, len(item_mask))[np.logical_not(item_mask)] if ICM is not None: ICM = ICM[item_mask,:] return URM.tocsr(), ICM.tocsr(), removedUsers, removedItems return URM.tocsr(), removedUsers, removedItems
Example #27
Source File: Utility.py From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 | 5 votes |
def filter_urm(urm, user_min_number_ratings=1, item_min_number_ratings=1): # keep only users with at least n ratings, same for the items # NOTE: this operation re index both users and items (we get a more compact URM) urm = sps.csr_matrix(urm) urm.eliminate_zeros() users_to_select_mask = np.ediff1d(urm.indptr) >= user_min_number_ratings urm = urm[users_to_select_mask, :] urm = sps.csc_matrix(urm) items_to_select_mask = np.ediff1d(urm.indptr) >= item_min_number_ratings urm = urm[:, items_to_select_mask] return urm.tocsr()
Example #28
Source File: MatrixFactorization_Cython.py From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 | 5 votes |
def set_URM_train(self, URM_train_new, estimate_item_similarity_for_cold_users = False, **kwargs): """ :param URM_train_new: :param estimate_item_similarity_for_cold_users: Set to TRUE if you want to estimate the USER_factors for cold users :param kwargs: :return: """ assert self.URM_train.shape == URM_train_new.shape, "{}: set_URM_train old and new URM train have different shapes".format(self.RECOMMENDER_NAME) if len(kwargs)>0: self._print("set_URM_train keyword arguments not supported for this recommender class. Received: {}".format(kwargs)) self.URM_train = check_matrix(URM_train_new.copy(), 'csr', dtype=np.float32) self.URM_train.eliminate_zeros() # No need to ever use a knn model self._cold_user_KNN_model_available = False self._cold_user_mask = np.ediff1d(self.URM_train.indptr) == 0 if estimate_item_similarity_for_cold_users: self._print("Estimating USER_factors for cold users...") self.USER_factors = self._estimate_user_factors(self.ITEM_factors_Y_best) self._print("Estimating USER_factors for cold users... done!")
Example #29
Source File: relaxstructures.py From PyChemia with MIT License | 5 votes |
def check_duplicates(self, ids): """ Computes duplicate structures measuring its distance when their value is larger than value_tol. If the distance is lower than 'distance_tol' the structures will be cosidered as duplicates. :param ids: :return: (dict) Dictionary of duplicates, the keys are the ids of the duplicates and the value is the structure from which the structure is duplicated. In general the energy of the 'value' is lower than the 'key' """ ret = {} selection = self.ids_sorted(ids) values = np.array([self.value(i) for i in selection]) if len(values) == 0: return ret diffs = np.ediff1d(values) for i in range(len(diffs)): idiff = diffs[i] if idiff < self.value_tol: ident1 = selection[i] ident2 = selection[i + 1] pcm_log.debug('Testing distances between %s and %s' % (str(ident1), str(ident2))) distance = self.distance(ident1, ident2) # print 'Distance = ', distance if distance < self.distance_tolerance: pcm_log.debug('Distance %7.3f < %7.3f' % (distance, self.distance_tolerance)) ret[ident2] = ident1 if len(ret) > 0: pcm_log.debug('Number of duplicates %d' % len(ret)) return ret
Example #30
Source File: confound_prep.py From fmridenoise with Apache License 2.0 | 5 votes |
def calc_temp_deriv(signal): """Calculates discrete version of temporal derivative of a timecourse. Args: signal (pd.Series): Timecourse of interest. Returns: np.array: Vector of differences between subsequent values in signal. """ return np.ediff1d(signal, to_begin=0)