Python numpy.isin() Examples
The following are 30
code examples of numpy.isin().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.
Example #1
Source File: test_extras.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 6 votes |
def test_isin(self): # the tests for in1d cover most of isin's behavior # if in1d is removed, would need to change those tests to test # isin instead. a = np.arange(24).reshape([2, 3, 4]) mask = np.zeros([2, 3, 4]) mask[1, 2, 0] = 1 a = array(a, mask=mask) b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) ec = zeros((2, 3, 4), dtype=bool) ec[0, 0, 0] = True ec[0, 0, 1] = True ec[0, 2, 3] = True c = isin(a, b) assert_(isinstance(c, MaskedArray)) assert_array_equal(c, ec) #compare results of np.isin to ma.isin d = np.isin(a, b[~b.mask]) & ~a.mask assert_array_equal(c, d)
Example #2
Source File: emg_activation.py From NeuroKit with MIT License | 6 votes |
def _emg_activation_activations(activity, duration_min=0.05): activations = events_find(activity, threshold=0.5, threshold_keep="above", duration_min=duration_min) activations["offset"] = activations["onset"] + activations["duration"] baseline = events_find(activity == 0, threshold=0.5, threshold_keep="above", duration_min=duration_min) baseline["offset"] = baseline["onset"] + baseline["duration"] # Cross-comparison valid = np.isin(activations["onset"], baseline["offset"]) onsets = activations["onset"][valid] offsets = activations["offset"][valid] new_activity = np.array([]) for x, y in zip(onsets, offsets): activated = np.arange(x, y) new_activity = np.append(new_activity, activated) # Prepare Output. info = {"EMG_Onsets": onsets, "EMG_Offsets": offsets, "EMG_Activity": new_activity} return info
Example #3
Source File: mesh_io.py From simnibs with GNU General Public License v3.0 | 6 votes |
def find_all_elements_with_node(self, node_nr): """ Finds all elements that have a given node Parameters ----------------- node_nr: int number of node Returns --------------- elm_nr: np.ndarray array with indices of element numbers """ elm_with_node = np.any( np.isin(self.node_number_list, node_nr), axis=1) return self.elm_number[elm_with_node]
Example #4
Source File: mesh_io.py From simnibs with GNU General Public License v3.0 | 6 votes |
def nodes_with_tag(self, tags): ''' Gets all nodes indexes that are part of at least one element with the given tags Parameters ----------- tags: list Integer tags to search Returns ------------- nodes: ndarray of integer Indexes of nodes with given tag ''' nodes = np.unique(self[np.isin(self.tag1, tags)].reshape(-1)) nodes = nodes[nodes > 0] return nodes
Example #5
Source File: mesh_io.py From simnibs with GNU General Public License v3.0 | 6 votes |
def mean_field_norm(self): ''' Calculates V*w/sum(w) Where V is the norm of the field, and w is the volume or area of the mesh where the field is defined. This can be used as a focality metric. It should give out small values when the field is focal and Returns ---------- eff_area: float Area or volume of mesh, weighted by the field ''' self._test_msh() if np.all(np.isin([2, 4], self.mesh.elm.elm_type)): warnings.warn('Calculating effective volume/area of fields in meshes with' ' triangles and tetrahedra can give misleading results') norm = self._norm() weights = self._weights() return np.sum(norm * weights) / np.sum(weights)
Example #6
Source File: test_extras.py From recruit with Apache License 2.0 | 6 votes |
def test_isin(self): # the tests for in1d cover most of isin's behavior # if in1d is removed, would need to change those tests to test # isin instead. a = np.arange(24).reshape([2, 3, 4]) mask = np.zeros([2, 3, 4]) mask[1, 2, 0] = 1 a = array(a, mask=mask) b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) ec = zeros((2, 3, 4), dtype=bool) ec[0, 0, 0] = True ec[0, 0, 1] = True ec[0, 2, 3] = True c = isin(a, b) assert_(isinstance(c, MaskedArray)) assert_array_equal(c, ec) #compare results of np.isin to ma.isin d = np.isin(a, b[~b.mask]) & ~a.mask assert_array_equal(c, d)
Example #7
Source File: correlator.py From pytim with GNU General Public License v3.0 | 6 votes |
def _sample_intermittent(self, group): # we need to collect also the residence # function # the residence function (1 if in the reference group, 0 otherwise) mask = np.isin(self.reference, group) # append the residence function to its timeseries self.maskseries.append(list(mask)) if self.observable is not None: # this copies a vector of zeros with the correct shape sampled = self.reference_obs.copy() obs = self.observable.compute(group) sampled[np.where(mask)] = obs self.timeseries.append(list(sampled.flatten())) else: self.timeseries = self.maskseries if self.shape is None: self.shape = (1, ) sampled = mask return sampled
Example #8
Source File: test_sampler.py From pytorch_geometric with MIT License | 6 votes |
def test_sampler(): torch.manual_seed(12345) edge_index = erdos_renyi_graph(num_nodes=10, edge_prob=0.5) E = edge_index.size(1) loader = NeighborSampler(edge_index, sizes=[2, 4], batch_size=2) assert loader.__repr__() == 'NeighborSampler(sizes=[2, 4])' assert len(loader) == 5 for batch_size, n_id, adjs in loader: assert batch_size == 2 assert all(np.isin(n_id, torch.arange(10)).tolist()) assert n_id.unique().size(0) == n_id.size(0) for (edge_index, e_id, size) in adjs: assert int(edge_index[0].max() + 1) <= size[0] assert int(edge_index[1].max() + 1) <= size[1] assert all(np.isin(e_id, torch.arange(E)).tolist()) assert e_id.unique().size(0) == e_id.size(0) assert size[0] >= size[1] out = loader.sample([1, 2]) assert len(out) == 3
Example #9
Source File: raster_manipulation.py From pyeo with GNU General Public License v3.0 | 6 votes |
def create_mask_from_class_map(class_map_path, out_path, classes_of_interest, buffer_size=0, out_resolution=None): """Creates a mask from a classification mask: 1 for each pixel containing one of classes_of_interest, otherwise 0""" # TODO: pull this out of the above function class_image = gdal.Open(class_map_path) class_array = class_image.GetVirtualMemArray() mask_array = np.isin(class_array, classes_of_interest) out_mask = create_matching_dataset(class_image, out_path, datatype=gdal.GDT_Byte) out_array = out_mask.GetVirtualMemArray(eAccess=gdal.GA_Update) np.copyto(out_array, mask_array) class_array = None class_image = None out_array = None out_mask = None if out_resolution: resample_image_in_place(out_path, out_resolution) if buffer_size: buffer_mask_in_place(out_path, buffer_size) return out_path
Example #10
Source File: raster_manipulation.py From pyeo with GNU General Public License v3.0 | 6 votes |
def create_mask_from_fmask(in_l1_dir, out_path): log = logging.getLogger(__name__) log.info("Creating fmask for {}".format(in_l1_dir)) with TemporaryDirectory() as td: temp_fmask_path = os.path.join(td, "fmask.tif") apply_fmask(in_l1_dir, temp_fmask_path) fmask_image = gdal.Open(temp_fmask_path) fmask_array = fmask_image.GetVirtualMemArray() out_image = create_matching_dataset(fmask_image, out_path, datatype=gdal.GDT_Byte) out_array = out_image.GetVirtualMemArray(eAccess=gdal.GA_Update) log.info("fmask created, converting to binary cloud/shadow mask") out_array[:,:] = np.isin(fmask_array, (2, 3, 4), invert=True) out_array = None out_image = None fmask_array = None fmask_image = None resample_image_in_place(out_path, 10)
Example #11
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def dataframe_select(df, *cols, **filters): ''' dataframe_select(df, k1=v1, k2=v2...) yields df after selecting all the columns in which the given keys (k1, k2, etc.) have been selected such that the associated columns in the dataframe contain only the rows whose cells match the given values. dataframe_select(df, col1, col2...) selects the given columns. dataframe_select(df, col1, col2..., k1=v1, k2=v2...) selects both. If a value is a tuple/list of 2 elements, then it is considered a range where cells must fall between the values. If value is a tuple/list of more than 2 elements or is a set of any length then it is a list of values, any one of which can match the cell. ''' ii = np.ones(len(df), dtype='bool') for (k,v) in six.iteritems(filters): vals = df[k].values if pimms.is_set(v): jj = np.isin(vals, list(v)) elif pimms.is_vector(v) and len(v) == 2: jj = (v[0] <= vals) & (vals < v[1]) elif pimms.is_vector(v): jj = np.isin(vals, list(v)) else: jj = (vals == v) ii = np.logical_and(ii, jj) if len(ii) != np.sum(ii): df = df.loc[ii] if len(cols) > 0: df = df[list(cols)] return df
Example #12
Source File: rules.py From seagull with MIT License | 6 votes |
def life_rule(X: np.ndarray, rulestring: str) -> np.ndarray: """A generalized life rule that accepts a rulestring in B/S notation Rulestrings are commonly expressed in the B/S notation where B (birth) is a list of all numbers of live neighbors that cause a dead cell to come alive, and S (survival) is a list of all the numbers of live neighbors that cause a live cell to remain alive. Parameters ---------- X : np.ndarray The input board matrix rulestring : str The rulestring in B/S notation Returns ------- np.ndarray Updated board after applying the rule """ birth_req, survival_req = _parse_rulestring(rulestring) neighbors = _count_neighbors(X) birth_rule = (X == 0) & (np.isin(neighbors, birth_req)) survival_rule = (X == 1) & (np.isin(neighbors, survival_req)) return birth_rule | survival_rule
Example #13
Source File: rfsm.py From pysheds with GNU General Public License v3.0 | 6 votes |
def set_node_transfers(self): for index, mapping in enumerate(self.tmap): for pair, node in mapping.items(): i, j = pair comm = int(node.comm) comm_elev = node.elev neighbors = Grid._select_surround_ravel(self, comm, self.dem.shape) ser = pd.DataFrame(np.column_stack([neighbors, self.dem.flat[neighbors], self.ws[index].flat[neighbors]])) ser = ser[ser[2].isin(list(pair))] g = ser.groupby(2).idxmin()[1].apply(lambda x: ser.loc[x, 0]) fullix = self.drop.flat[g.values.astype(int)] lv = self.dropmap.loc[fullix][0].values nm = self.dropmap.loc[fullix][1].values g = pd.DataFrame(np.column_stack([lv, nm]), index=g.index.values.astype(int), columns=['level', 'name']).to_dict(orient='index') # Children will always be in numeric order from left to right lt, rt = g[j], g[i] node.l.t = self.nodes[lt['level']][lt['name']] node.r.t = self.nodes[rt['level']][rt['name']] self.set_singleton_transfer(self.root)
Example #14
Source File: test_extras.py From vnpy_crypto with MIT License | 6 votes |
def test_isin(self): # the tests for in1d cover most of isin's behavior # if in1d is removed, would need to change those tests to test # isin instead. a = np.arange(24).reshape([2, 3, 4]) mask = np.zeros([2, 3, 4]) mask[1, 2, 0] = 1 a = array(a, mask=mask) b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) ec = zeros((2, 3, 4), dtype=bool) ec[0, 0, 0] = True ec[0, 0, 1] = True ec[0, 2, 3] = True c = isin(a, b) assert_(isinstance(c, MaskedArray)) assert_array_equal(c, ec) #compare results of np.isin to ma.isin d = np.isin(a, b[~b.mask]) & ~a.mask assert_array_equal(c, d)
Example #15
Source File: iem.py From brainiak with Apache License 2.0 | 6 votes |
def _check_params(self): if self.range_start >= self.range_stop: raise ValueError("range_start {} must be less than " "{} range_stop.".format(self.range_start, self.range_stop)) if self.stimulus_mode == 'halfcircular': if (self.range_stop - self.range_start) != 180.: raise ValueError("For half-circular feature spaces," "the range must be 180 degrees, " "not {}".format(self.range_stop - self.range_start)) elif self.stimulus_mode == 'circular': if (self.range_stop - self.range_start) != 360.: raise ValueError("For circular feature spaces, the" " range must be 360 degrees" "not {}".format(self.range_stop - self.range_start)) if self.n_channels < 2: raise ValueError("Insufficient number of channels.") if not np.isin(self.stimulus_mode, ['circular', 'halfcircular']): raise ValueError("Stimulus mode must be one of these: " "'circular', 'halfcircular'")
Example #16
Source File: test_time.py From georinex with MIT License | 6 votes |
def test_tlim(fn, tlim, tref, tlen): """ Important test, be sure it's runnable on all systems """ dat = gr.load(fn, tlim=tlim) times = gr.to_datetime(dat.time) assert (times == tref).all() if dat.rinextype == 'obs' and 2 <= dat.version < 3: assert dat.fast_processing alltimes = gr.gettime(fn) assert isinstance(alltimes, np.ndarray) assert alltimes.size == tlen assert np.isin(times, alltimes).size == times.size # %% currently, interval is only for OBS2 and OBS3
Example #17
Source File: test_extras.py From GraphicDesignPatternByPython with MIT License | 6 votes |
def test_isin(self): # the tests for in1d cover most of isin's behavior # if in1d is removed, would need to change those tests to test # isin instead. a = np.arange(24).reshape([2, 3, 4]) mask = np.zeros([2, 3, 4]) mask[1, 2, 0] = 1 a = array(a, mask=mask) b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) ec = zeros((2, 3, 4), dtype=bool) ec[0, 0, 0] = True ec[0, 0, 1] = True ec[0, 2, 3] = True c = isin(a, b) assert_(isinstance(c, MaskedArray)) assert_array_equal(c, ec) #compare results of np.isin to ma.isin d = np.isin(a, b[~b.mask]) & ~a.mask assert_array_equal(c, d)
Example #18
Source File: RandomSequenceMask.py From tape-neurips2019 with MIT License | 6 votes |
def _generate_bert_mask(self, inputs): def _numpy_generate_contiguous_mask(array): mask = np.random.random(array.shape) < (1 / self.avg_seq_len) mask = np.cumsum(mask, 1) seqvals = np.max(mask) mask_prob = self.percentage * array.shape[1] / seqvals # increase probability because fewer sequences vals_to_mask = np.arange(seqvals)[np.random.random((seqvals,)) < mask_prob] indices_to_mask = np.isin(mask, vals_to_mask) mask[indices_to_mask] = 1 mask[~indices_to_mask] = 0 return np.asarray(mask, np.bool) bert_mask = tf.py_func(_numpy_generate_contiguous_mask, [inputs], tf.bool) bert_mask.set_shape(inputs.shape) return bert_mask
Example #19
Source File: dataset.py From batchflow with Apache License 2.0 | 6 votes |
def create_subset(self, index): """ Create a dataset based on the given subset of indices Parameters ---------- index : DatasetIndex or np.array Returns ------- Dataset Raises ------ IndexError When a user wants to create a subset from source dataset it is necessary to be confident that the index of new subset lies in the range of source dataset's index. If the index lies out of the source dataset index's range, the IndexError is raised. """ indices = index.indices if isinstance(index, DatasetIndex) else index if not np.isin(indices, self.indices).all(): raise IndexError return type(self).from_dataset(self, self.index.create_subset(index))
Example #20
Source File: test_core.py From gym-electric-motor with MIT License | 6 votes |
def test_reward(self, monkeypatch, physical_system, reference_generator, observed_state_idx, violated_state_idx): observed_states = list(np.array(physical_system.state_names)[observed_state_idx]) rf = RewardFunction(observed_states) rf.set_modules(physical_system, reference_generator) monkeypatch.setattr(rf, "_reward", self.mock_standard_reward) monkeypatch.setattr(rf, "_limit_violation_reward", self.mock_limit_violation_reward) state = np.ones_like(physical_system.state_names, dtype=float) * 0.5 state[violated_state_idx] = 1.5 reward, done = rf.reward(state, None) if np.any(np.isin(observed_state_idx, violated_state_idx)): assert reward == -1 assert done else: assert reward == 1 assert not done # Test negative limit violations state[violated_state_idx] = -1.5 reward, done = rf.reward(state, None) if np.any(np.isin(observed_state_idx, violated_state_idx)): assert reward == -1 assert done else: assert reward == 1 assert not done
Example #21
Source File: test_extras.py From Mastering-Elasticsearch-7.0 with MIT License | 6 votes |
def test_isin(self): # the tests for in1d cover most of isin's behavior # if in1d is removed, would need to change those tests to test # isin instead. a = np.arange(24).reshape([2, 3, 4]) mask = np.zeros([2, 3, 4]) mask[1, 2, 0] = 1 a = array(a, mask=mask) b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) ec = zeros((2, 3, 4), dtype=bool) ec[0, 0, 0] = True ec[0, 0, 1] = True ec[0, 2, 3] = True c = isin(a, b) assert_(isinstance(c, MaskedArray)) assert_array_equal(c, ec) #compare results of np.isin to ma.isin d = np.isin(a, b[~b.mask]) & ~a.mask assert_array_equal(c, d)
Example #22
Source File: rfsm.py From pysheds with GNU General Public License v3.0 | 6 votes |
def set_cumulative_capacities(self, node): if node.l: self.set_cumulative_capacities(node.l) if node.r: self.set_cumulative_capacities(node.r) if node.parent: if node.name: elevdiff = node.parent.elev - self.dem[self.ws[node.level] == node.name] vol = abs(np.asscalar(elevdiff[elevdiff > 0].sum()) * self.x * self.y) node.vol = vol else: leaves = [] self.enumerate_leaves(node, level=node.level, stack=leaves) mask = np.isin(self.ws[node.level], leaves) boundary = list(chain.from_iterable([self.b[node.level].setdefault(pair, []) for pair in combinations(leaves, 2)])) mask.flat[boundary] = True elevdiff = node.parent.elev - self.dem[mask] vol = abs(np.asscalar(elevdiff[elevdiff > 0].sum()) * self.x * self.y) node.vol = vol
Example #23
Source File: data_object.py From BrainSpace with BSD 3-Clause "New" or "Revised" License | 6 votes |
def GetPolys2D(self): """Returns the polys as a 2D VTKArray instance. Returns ------- polys : 2D ndarray, shape = (n_points, n) PolyData polys. Raises ------ ValueError If PolyData has different poly types. """ v = self.GetPolys() if v is None: return v ct = self.cell_types if np.isin([VTK_QUAD, VTK_TRIANGLE], ct).all() or VTK_POLYGON in ct: raise ValueError('PolyData contains different poly types') return v.reshape(-1, v[0] + 1)[:, 1:]
Example #24
Source File: rfsm.py From pysheds with GNU General Public License v3.0 | 5 votes |
def compute_vol(self, z, node, target_vol): under_vol = node.cumulative_vol - node.vol if node.name: mask = (self.ws[node.level] == node.name) full = z - self.dem[mask] else: leaves = [] self.enumerate_leaves(node, level=node.level, stack=leaves) mask = np.isin(self.ws[node.level], leaves) boundary = list(chain.from_iterable([self.b[node.level].setdefault(pair, []) for pair in combinations(leaves, 2)])) mask.flat[boundary] = True full = z - self.dem[mask] vol = abs(np.asscalar(full[full > 0].sum()) * self.x * self.y) return vol - target_vol - under_vol
Example #25
Source File: test_core.py From gym-electric-motor with MIT License | 5 votes |
def test_set_modules_combined_observed_states(self, monkeypatch, reward_function, physical_system, reference_generator, expected_observed_states): physical_states = ['i_a', 'i_e', 'u_a', 'omega', 'torque'] physical_system._state_names = physical_states reward_function.set_modules(physical_system, reference_generator) assert np.all( reward_function._observed_states == np.isin(physical_system.state_names, expected_observed_states))
Example #26
Source File: test_core.py From gym-electric-motor with MIT License | 5 votes |
def test_set_modules(self, monkeypatch, reward_function, physical_system, reference_generator, expected_observed_states): reward_function.set_modules(physical_system, reference_generator) assert np.all( reward_function._observed_states == np.isin(physical_system.state_names, expected_observed_states))
Example #27
Source File: utils.py From pyuvdata with BSD 2-Clause "Simplified" License | 5 votes |
def get_lst_for_time(jd_array, latitude, longitude, altitude): """ Get the lsts for a set of jd times at an earth location. Parameters ---------- jd_array : ndarray of float JD times to get lsts for. latitude : float Latitude of location to get lst for in degrees. longitude : float Longitude of location to get lst for in degrees. altitude : float Altitude of location to get lst for in meters. Returns ------- ndarray of float LSTs in radians corresponding to the jd_array. """ lst_array = np.zeros_like(jd_array) jd, reverse_inds = np.unique(jd_array, return_inverse=True) times = Time( jd, format="jd", location=(Angle(longitude, unit="deg"), Angle(latitude, unit="deg")), ) if iers.conf.auto_max_age is None: # pragma: no cover delta, status = times.get_delta_ut1_utc(return_status=True) if np.any( np.isin(status, (iers.TIME_BEFORE_IERS_RANGE, iers.TIME_BEYOND_IERS_RANGE)) ): warnings.warn( "time is out of IERS range, setting delta ut1 utc to " "extrapolated value" ) times.delta_ut1_utc = delta lst_array = times.sidereal_time("apparent").radian[reverse_inds] return lst_array
Example #28
Source File: manipulation.py From multi-agent-emergence-environments with MIT License | 5 votes |
def unlock_objs(self): sim = self.unwrapped.sim joints_to_unlock = np.isin(sim.model.jnt_bodyid, self.obj_body_idxs[np.arange(self.n_obj)]) objs_to_darken = np.isin(sim.model.geom_bodyid, self.obj_body_idxs) sim.model.mat_emission[sim.model.geom_matid[objs_to_darken]] = 0 sim.model.jnt_limited[joints_to_unlock] = 0
Example #29
Source File: rfsm.py From pysheds with GNU General Public License v3.0 | 5 votes |
def volume_to_level(self, node, waterlevel): if node.current_vol > 0: maxelev = node.parent.elev if node.elev: minelev = node.elev else: # TODO: This bound could be a lot better minelev = np.nanmin(self.dem) target_vol = node.current_vol elev = optimize.bisect(self.compute_vol, minelev, maxelev, args=(node, target_vol)) if node.name: mask = self.ws[node.level] == node.name else: leaves = [] self.enumerate_leaves(node, level=node.level, stack=leaves) mask = np.isin(self.ws[node.level], leaves) boundary = list(chain.from_iterable([self.b[node.level].setdefault(pair, []) for pair in combinations(leaves, 2)])) mask.flat[boundary] = True mask = np.flatnonzero(mask & (self.dem < elev)) waterlevel.flat[mask] = elev else: if node.l: self.volume_to_level(node.l, waterlevel) if node.r: self.volume_to_level(node.r, waterlevel)
Example #30
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 5 votes |
def dataframe_except(df, *cols, **filters): ''' dataframe_except(df, k1=v1, k2=v2...) yields df after selecting all the columns in which the given keys (k1, k2, etc.) have been selected such that the associated columns in the dataframe contain only the rows whose cells match the given values. dataframe_except(df, col1, col2...) selects all columns except for the given columns. dataframe_except(df, col1, col2..., k1=v1, k2=v2...) selects on both conditions. The dataframe_except() function is identical to the dataframe_select() function with the single difference being that the column names provided to dataframe_except() are dropped from the result while column names passed to dataframe_select() are kept. If a value is a tuple/list of 2 elements, then it is considered a range where cells must fall between the values. If value is a tuple/list of more than 2 elements or is a set of any length then it is a list of values, any one of which can match the cell. ''' ii = np.ones(len(df), dtype='bool') for (k,v) in six.iteritems(filters): vals = df[k].values if pimms.is_set(v): jj = np.isin(vals, list(v)) elif pimms.is_vector(v) and len(v) == 2: jj = (v[0] <= vals) & (vals < v[1]) elif pimms.is_vector(v): jj = np.isin(vals, list(v)) else: jj = (vals == v) ii = np.logical_and(ii, jj) if len(ii) != np.sum(ii): df = df.loc[ii] if len(cols) > 0: df = df.drop(list(cols), axis=1, inplace=False) return df