Python more_itertools.unique_everseen() Examples
The following are 24
code examples of more_itertools.unique_everseen().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
more_itertools
, or try the search function
.
Example #1
Source File: plots.py From nxviz with MIT License | 5 votes |
def compute_node_colors(self): """Compute the node colors. Also computes the colorbar.""" data = [self.graph.nodes[n][self.node_color] for n in self.nodes] if self.group_order == "alphabetically": data_reduced = sorted(list(set(data))) elif self.group_order == "default": data_reduced = list(unique_everseen(data)) dtype = infer_data_type(data) n_grps = num_discrete_groups(data) if dtype == "categorical" or dtype == "ordinal": if n_grps <= 8: cmap = get_cmap( cmaps["Accent_{0}".format(n_grps)].mpl_colormap ) else: cmap = n_group_colorpallet(n_grps) elif dtype == "continuous" and not is_data_diverging(data): cmap = get_cmap(cmaps["continuous"].mpl_colormap) elif dtype == "continuous" and is_data_diverging(data): cmap = get_cmap(cmaps["diverging"].mpl_colormap) for d in data: idx = data_reduced.index(d) / n_grps self.node_colors.append(cmap(idx)) # Add colorbar if required.ListedColormap logging.debug("length of data_reduced: {0}".format(len(data_reduced))) logging.debug("dtype: {0}".format(dtype)) if len(data_reduced) > 1 and dtype == "continuous": self.sm = plt.cm.ScalarMappable( cmap=cmap, norm=plt.Normalize( vmin=min(data_reduced), vmax=max(data_reduced), # noqa # noqa ), ) self.sm._A = []
Example #2
Source File: cluster_specs.py From Distributed-TensorFlow-Using-MPI with MIT License | 5 votes |
def main(): f = open(FLAGS.hosts_file,'r') hosts_list = [] for line in f: hosts_list.append(line.strip()) f.close() hosts_list = list(unique_everseen(hosts_list)) # all hosts other than ps are all treated as workers, .ten.osc.edu is for owens, for other clusters, you may change correspondingly ps_hosts = [hosts_list[i] + ".ten.osc.edu:2222" for i in range(FLAGS.num_ps_hosts)] worker_hosts = [hosts_list[i] + ".ten.osc.edu:2222" for i in range(len(ps_hosts), len(hosts_list))] print(','.join(ps_hosts), ','.join(worker_hosts))
Example #3
Source File: test_recipes.py From Tautulli with GNU General Public License v3.0 | 5 votes |
def test_unhashable_key(self): """ensure things work for unhashable items with a custom key""" iterable = ['a', [1, 2, 3], [1, 2, 3], 'a'] u = mi.unique_everseen(iterable, key=lambda x: x) self.assertEqual(list(u), ['a', [1, 2, 3]])
Example #4
Source File: test_recipes.py From Tautulli with GNU General Public License v3.0 | 5 votes |
def test_unhashable(self): """ensure things work for unhashable items""" iterable = ['a', [1, 2, 3], [1, 2, 3], 'a'] u = mi.unique_everseen(iterable) self.assertEqual(list(u), ['a', [1, 2, 3]])
Example #5
Source File: test_recipes.py From Tautulli with GNU General Public License v3.0 | 5 votes |
def test_custom_key(self): """ensure the custom key comparison works""" u = mi.unique_everseen('aAbACCc', key=str.lower) self.assertEqual(list('abC'), list(u))
Example #6
Source File: test_recipes.py From Tautulli with GNU General Public License v3.0 | 5 votes |
def test_everseen(self): """ensure duplicate elements are ignored""" u = mi.unique_everseen('AAAABBBBCCDAABBB') self.assertEqual( ['A', 'B', 'C', 'D'], list(u) )
Example #7
Source File: record_merger.py From recordexpungPDX with MIT License | 5 votes |
def merge_dispositions(same_charges: List[Charge]) -> Disposition: if len(list(unique_everseen([charge.disposition for charge in same_charges]))) == 2: return DispositionCreator.empty() else: return same_charges[0].disposition
Example #8
Source File: record_merger.py From recordexpungPDX with MIT License | 5 votes |
def merge_time_eligibilities(time_eligibilities: Optional[List[TimeEligibility]]) -> Optional[TimeEligibility]: if time_eligibilities: status = RecordMerger.compute_time_eligibility_status(time_eligibilities) reasons = [time_eligibility.reason for time_eligibility in time_eligibilities] reason = " ⬥ ".join(list(unique_everseen(reasons))) date_will_be_eligible = time_eligibilities[0].date_will_be_eligible if len(set([time_eligibility.date_will_be_eligible for time_eligibility in time_eligibilities])) == 1: unique_date = True else: unique_date = False return TimeEligibility( status=status, reason=reason, date_will_be_eligible=date_will_be_eligible, unique_date=unique_date ) else: return None
Example #9
Source File: record_merger.py From recordexpungPDX with MIT License | 5 votes |
def merge_type_eligibilities(same_charges: List[Charge]) -> TypeEligibility: status = RecordMerger.compute_type_eligibility_status(same_charges) reasons = [charge.type_eligibility.reason for charge in same_charges] reason = " ⬥ ".join(list(unique_everseen(reasons))) return TypeEligibility(status=status, reason=reason)
Example #10
Source File: typesystem.py From dkpro-cassis with Apache License 2.0 | 5 votes |
def all_features(self) -> Iterator[Feature]: """ Returns an iterator over the features of this type. Inherited features are included. To just retrieve immediate features, use `features`. Returns: An iterator over all features of this type, including inherited ones """ # We use `unique_everseen` here, as children could redefine parent types (Issue #56) return unique_everseen(chain(self._features.values(), self._inherited_features.values()))
Example #11
Source File: test_recipes.py From python-netsurv with MIT License | 5 votes |
def test_everseen(self): """ensure duplicate elements are ignored""" u = mi.unique_everseen('AAAABBBBCCDAABBB') self.assertEqual( ['A', 'B', 'C', 'D'], list(u) )
Example #12
Source File: test_recipes.py From pipenv with MIT License | 5 votes |
def test_unhashable_key(self): """ensure things work for unhashable items with a custom key""" iterable = ['a', [1, 2, 3], [1, 2, 3], 'a'] u = mi.unique_everseen(iterable, key=lambda x: x) self.assertEqual(list(u), ['a', [1, 2, 3]])
Example #13
Source File: test_recipes.py From pipenv with MIT License | 5 votes |
def test_unhashable(self): """ensure things work for unhashable items""" iterable = ['a', [1, 2, 3], [1, 2, 3], 'a'] u = mi.unique_everseen(iterable) self.assertEqual(list(u), ['a', [1, 2, 3]])
Example #14
Source File: test_recipes.py From pipenv with MIT License | 5 votes |
def test_custom_key(self): """ensure the custom key comparison works""" u = mi.unique_everseen('aAbACCc', key=str.lower) self.assertEqual(list('abC'), list(u))
Example #15
Source File: test_recipes.py From pipenv with MIT License | 5 votes |
def test_everseen(self): """ensure duplicate elements are ignored""" u = mi.unique_everseen('AAAABBBBCCDAABBB') self.assertEqual( ['A', 'B', 'C', 'D'], list(u) )
Example #16
Source File: zipp.py From pipenv with MIT License | 5 votes |
def _implied_dirs(names): return more_itertools.unique_everseen( parent + "/" for name in names for parent in _parents(name) if parent + "/" not in names )
Example #17
Source File: test_recipes.py From python-netsurv with MIT License | 5 votes |
def test_unhashable_key(self): """ensure things work for unhashable items with a custom key""" iterable = ['a', [1, 2, 3], [1, 2, 3], 'a'] u = mi.unique_everseen(iterable, key=lambda x: x) self.assertEqual(list(u), ['a', [1, 2, 3]])
Example #18
Source File: test_recipes.py From python-netsurv with MIT License | 5 votes |
def test_unhashable(self): """ensure things work for unhashable items""" iterable = ['a', [1, 2, 3], [1, 2, 3], 'a'] u = mi.unique_everseen(iterable) self.assertEqual(list(u), ['a', [1, 2, 3]])
Example #19
Source File: test_recipes.py From python-netsurv with MIT License | 5 votes |
def test_everseen(self): """ensure duplicate elements are ignored""" u = mi.unique_everseen('AAAABBBBCCDAABBB') self.assertEqual( ['A', 'B', 'C', 'D'], list(u) )
Example #20
Source File: test_recipes.py From python-netsurv with MIT License | 5 votes |
def test_unhashable_key(self): """ensure things work for unhashable items with a custom key""" iterable = ['a', [1, 2, 3], [1, 2, 3], 'a'] u = mi.unique_everseen(iterable, key=lambda x: x) self.assertEqual(list(u), ['a', [1, 2, 3]])
Example #21
Source File: test_recipes.py From python-netsurv with MIT License | 5 votes |
def test_unhashable(self): """ensure things work for unhashable items""" iterable = ['a', [1, 2, 3], [1, 2, 3], 'a'] u = mi.unique_everseen(iterable) self.assertEqual(list(u), ['a', [1, 2, 3]])
Example #22
Source File: test_recipes.py From python-netsurv with MIT License | 5 votes |
def test_custom_key(self): """ensure the custom key comparison works""" u = mi.unique_everseen('aAbACCc', key=str.lower) self.assertEqual(list('abC'), list(u))
Example #23
Source File: DVSIterator.py From snn_toolbox with MIT License | 4 votes |
def get_frames_from_sequence(event_list, num_events_per_frame, data_format, frame_gen_method, is_x_first, is_x_flipped, is_y_flipped, maxpool_subsampling, do_clip_three_sigma, chip_size, target_shape=None): """ Extract ``num_events_per_frame`` events from a one-dimensional sequence of AER-events. The events are spatially subsampled to ``target_shape``, and standardized to [0, 1] using 3-sigma normalization. The resulting events are binned into a frame. The function operates on the events in ``xaddr`` etc sequentially until all are processed into frames. """ from more_itertools import unique_everseen if target_shape is None: target_shape = chip_size scale = None else: scale = [np.true_divide((t - 1), (c - 1)) for t, c in zip(target_shape, chip_size)] num_frames = int(len(event_list) / num_events_per_frame) frames = np.zeros([num_frames] + list(target_shape), 'float32') print("Extracting {} frames from DVS event sequence.".format(num_frames)) # Iterate for as long as there are events in the sequence. for sample_idx in range(num_frames): sample = frames[sample_idx] event_idxs = slice(num_events_per_frame * sample_idx, num_events_per_frame * (sample_idx + 1)) # Loop over ``num_events_per_frame`` events frame_event_list = [] for x, y, t, p in event_list[event_idxs]: if scale is not None: # Subsample from 240x180 to e.g. 64x64 x = int(x * scale[0]) y = int(y * scale[1]) pp = p if frame_gen_method == 'signed_sum' else 1 frame_event_list.append((x, y, t, pp)) if maxpool_subsampling: frame_event_list = list(unique_everseen(frame_event_list)) for x, y, t, p in frame_event_list: add_event_to_frame(sample, x, y, p, frame_gen_method, is_x_first, is_x_flipped, is_y_flipped) # sample = scale_event_frames(sample, frame_gen_method) if do_clip_three_sigma: frames[sample_idx] = clip_three_sigma(sample, frame_gen_method) else: frames[sample_idx] = sample frames = scale_event_frames(frames) channel_axis = 1 if data_format == 'channels_first' else -1 return np.expand_dims(frames, channel_axis)
Example #24
Source File: record_merger.py From recordexpungPDX with MIT License | 4 votes |
def merge( ambiguous_record: AmbiguousRecord, ambiguous_charge_id_to_time_eligibility_list: List[Dict[str, TimeEligibility]], charge_ids_with_question: List[str], ) -> Record: ambiguous_charge_id_to_time_eligibilities: Dict[str, List[TimeEligibility]] = collections.defaultdict(list) for charge_id_to_time_eligibility in ambiguous_charge_id_to_time_eligibility_list: for k, v in charge_id_to_time_eligibility.items(): if v not in ambiguous_charge_id_to_time_eligibilities[k]: ambiguous_charge_id_to_time_eligibilities[k].append(v) charges = list(flatten([record.charges for record in ambiguous_record])) record = ambiguous_record[0] new_case_list: List[Case] = [] for case in record.cases: new_charges = [] for charge in case.charges: time_eligibilities = ambiguous_charge_id_to_time_eligibilities.get( charge.ambiguous_charge_id ) # TODO: Review whether this can return None sorted_time_eligibility = ( sorted(time_eligibilities, key=lambda e: e.date_will_be_eligible) if time_eligibilities else None ) same_charges = list(filter(lambda c: c.ambiguous_charge_id == charge.ambiguous_charge_id, charges)) romeo_and_juliet_exception = RecordMerger._is_romeo_and_juliet_exception(same_charges) merged_type_eligibility = RecordMerger.merge_type_eligibilities(same_charges) merged_time_eligibility = RecordMerger.merge_time_eligibilities(sorted_time_eligibility) if charge.ambiguous_charge_id in charge_ids_with_question: charge_eligibility = ChargeEligibility( ChargeEligibilityStatus.NEEDS_MORE_ANALYSIS, "Needs More Analysis" ) else: charge_eligibility = RecordMerger.compute_charge_eligibility( merged_type_eligibility, sorted_time_eligibility, romeo_and_juliet_exception ) if "open" in charge_eligibility.label.lower(): charge_eligibility = replace( charge_eligibility, label=f"Eligibility Timeframe Dependent On Open Charge: {charge_eligibility.label}", ) expungement_result = ExpungementResult( type_eligibility=merged_type_eligibility, time_eligibility=merged_time_eligibility, charge_eligibility=charge_eligibility, ) merged_type_name = " ⬥ ".join( list(unique_everseen([charge.charge_type.type_name for charge in same_charges])) ) merged_charge_type = replace(charge.charge_type, type_name=merged_type_name) merged_disposition = RecordMerger.merge_dispositions(same_charges) new_charge: Charge = replace( charge, charge_type=merged_charge_type, expungement_result=expungement_result, disposition=merged_disposition, ) new_charges.append(new_charge) new_case = replace(case, charges=tuple(new_charges)) new_case_list.append(new_case) return replace(record, cases=tuple(new_case_list))