Python itertools.chain.from_iterable() Examples
The following are 30
code examples of itertools.chain.from_iterable().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
itertools.chain
, or try the search function
.
Example #1
Source File: engine.py From hiku with BSD 3-Clause "New" or "Revised" License | 6 votes |
def link_result_to_ids(from_list, link_type, result): if from_list: if link_type is Maybe: return [i for i in result if i is not Nothing] elif link_type is One: if any(i is Nothing for i in result): raise TypeError('Non-optional link should not return Nothing: ' '{!r}'.format(result)) return result elif link_type is Many: return list(chain.from_iterable(result)) else: if link_type is Maybe: return [] if result is Nothing else [result] elif link_type is One: if result is Nothing: raise TypeError('Non-optional link should not return Nothing') return [result] elif link_type is Many: return result raise TypeError(repr([from_list, link_type]))
Example #2
Source File: sumbt.py From ConvLab with MIT License | 6 votes |
def predict(self, query): cache_query_key = ''.join(str(list(chain.from_iterable(query[0])))) if cache_query_key in self.cached_res.keys(): return self.cached_res[cache_query_key] input_ids, input_len = query input_ids = torch.tensor(input_ids).to(self.device).unsqueeze(0) input_len = torch.tensor(input_len).to(self.device).unsqueeze(0) labels = None _, pred_slot = self.sumbt_model(input_ids, input_len, labels) pred_slot_t = pred_slot[0][-1].tolist() predict_belief = [] for idx,i in enumerate(pred_slot_t): predict_belief.append(f'{self.target_slot[idx]}-{self.label_map_inv[idx][i]}') self.cached_res[cache_query_key] = predict_belief return predict_belief
Example #3
Source File: http_test.py From threat_intel with MIT License | 6 votes |
def test_multi_get_max_retry(self): """Tests the case when the number of the maximum retries is reached, due to the unsuccessful responses. Request is repeated 3 times (based on `max_retry`), each time there is only one successful response. Eventually the call to `multi_get` returns the responses among which one is unsuccessful (`None`). """ number_of_requests = 4 query_params = [{'John Fitzgerald': 'Tom Hardy'}] * number_of_requests responses_to_calls = [ self.mock_ok_responses(number_of_requests), self.mock_ok_responses(number_of_requests - 1), self.mock_ok_responses(number_of_requests - 2), ] # mock unsuccessful responses to the first call self.mock_unsuccessful_responses(responses_to_calls[0][0:3]) # mock unsuccessful responses to the second call self.mock_unsuccessful_responses(responses_to_calls[1][1:3]) # mock unsuccessful response to the third call self.mock_unsuccessful_response(responses_to_calls[2][1]) get_mock = self.mock_request_futures(chain.from_iterable(responses_to_calls)) actual_responses = MultiRequest(max_retry=3).multi_get('example.com', query_params) T.assert_equal(get_mock.call_count, 9) T.assert_is(actual_responses[2], None)
Example #4
Source File: misc.py From verge3d-blender-addon with GNU General Public License v3.0 | 6 votes |
def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.items())) # Override dict methods where necessary
Example #5
Source File: relate.py From StormOnline with Apache License 2.0 | 6 votes |
def _get_all_related_objects(self, local_only=False, include_hidden=False, include_proxy_eq=False): """ Returns a list of related fields (also many to many) :param local_only: :param include_hidden: :return: list """ include_parents = True if local_only is False else PROXY_PARENTS fields = self.opts._get_fields( forward=False, reverse=True, include_parents=include_parents, include_hidden=include_hidden ) if include_proxy_eq: children = chain.from_iterable(c._relation_tree for c in self.opts.concrete_model._meta.proxied_children if c is not self.opts) relations = (f.remote_field for f in children if include_hidden or not f.remote_field.field.remote_field.is_hidden()) fields = chain(fields, relations) return list(fields)
Example #6
Source File: base_mab.py From mabwiser with Apache License 2.0 | 6 votes |
def _parallel_predict(self, contexts: np.ndarray, is_predict: bool): # Total number of contexts to predict n_contexts = len(contexts) # Partition contexts by job n_jobs, n_contexts, starts = self._partition_contexts(n_contexts) total_contexts = sum(n_contexts) # Get seed value for each context seeds = self.rng.randint(np.iinfo(np.int32).max, size=total_contexts) # Perform parallel predictions predictions = Parallel(n_jobs=n_jobs, backend=self.backend)( delayed(self._predict_contexts)( contexts[starts[i]:starts[i + 1]], is_predict, seeds[starts[i]:starts[i + 1]], starts[i]) for i in range(n_jobs)) # Reduce predictions = list(chain.from_iterable(t for t in predictions)) return predictions if len(predictions) > 1 else predictions[0]
Example #7
Source File: graph.py From hiku with BSD 3-Clause "New" or "Revised" License | 6 votes |
def visit_graph_items(self, items): invalid = [f for f in items if not isinstance(f, self._graph_accept_types)] if invalid: self.errors.report( 'Graph can not contain these types: {}' .format(self._format_types(invalid)) ) return root = Root(list(chain.from_iterable(e.fields for e in items if e.name is None))) self.visit(root) for item in items: if item.name is not None: self.visit(item) duplicates = self._get_duplicates(e.name for e in items if e.name is not None) if duplicates: self.errors.report('Duplicated nodes found in the graph: {}' .format(self._format_names(duplicates)))
Example #8
Source File: stacks.py From pydfs-lineup-optimizer with MIT License | 6 votes |
def build_stacks(self, players: List[Player], optimizer: 'LineupOptimizer') -> List[OptimizerStack]: players_by_teams = get_players_grouped_by_teams(players, for_teams=self.for_teams) all_positions = tuple(set(chain.from_iterable(self.positions))) positions_for_optimizer = Counter(self.positions) positions_for_optimizer[all_positions] = len(self.positions) all_groups = [] # type: List[BaseGroup] for team_name, team_players in players_by_teams.items(): groups = [] for positions, total in positions_for_optimizer.items(): groups.append(PlayersGroup( players=[player for player in team_players if list_intersection(player.positions, positions)], min_from_group=total, )) nested_group = NestedPlayersGroup( groups=groups, max_exposure=self.max_exposure_per_team.get(team_name, self.max_exposure), ) all_groups.append(nested_group) return [OptimizerStack(groups=all_groups)]
Example #9
Source File: rules.py From pydfs-lineup-optimizer with MIT License | 6 votes |
def apply(self, solver, players_dict): optimizer = self.optimizer positions, spacing = optimizer.spacing_positions, optimizer.spacing if not spacing or not positions: return players_by_roster_positions = defaultdict(list) # type: Dict[int, List[Tuple[Player, Any]]] for player, variable in players_dict.items(): if player.roster_order is None or not list_intersection(player.positions, positions): continue players_by_roster_positions[player.roster_order].append((player, variable)) for roster_position, players in players_by_roster_positions.items(): next_restricted_roster_position = roster_position + spacing restricted_players = chain.from_iterable( players for players_spacing, players in players_by_roster_positions.items() if players_spacing >= next_restricted_roster_position ) for first_player, first_variable in restricted_players: for second_player, second_variable in players: if first_player.team != second_player.team: continue solver.add_constraint([first_variable, second_variable], None, SolverSign.LTE, 1)
Example #10
Source File: test_migration_reports_cancer_400_to_600_to_400.py From GelReportModels with Apache License 2.0 | 6 votes |
def test_migrate_cancer_interpreted_genome(self, fill_nullables=True): assembly = Assembly.GRCh38 original_ig = self.get_valid_object( object_type=reports_4_0_0.CancerInterpretedGenome, version=self.version_4_0_0, fill_nullables=fill_nullables, genomeAssemblyVersion=assembly, interpretGenome=True, reportedStructuralVariants=[], versionControl=reports_4_0_0.ReportVersionControl(gitVersionControl='4.0.0') ) valid_cancer_origins = ['germline_variant', 'somatic_variant'] for reported_variant in original_ig.reportedVariants: if reported_variant.alleleOrigins[0] not in valid_cancer_origins: reported_variant.alleleOrigins[0] = random.choice(valid_cancer_origins) # migration requires there is exactly one tumour sample migrated, round_tripped = MigrationRunner().roundtrip_cancer_ig(original_ig, assembly) self.assertFalse(self.diff_round_tripped(original_ig, round_tripped, ignore_fields=[ "analysisId", "actions", "additionalTextualVariantAnnotations", "commonAf"])) # NOTE: not all fields in actions are kept and the order is not maintained, thus we ignore it in the # dictionary comparison and then here manually check them expected_report_events = chain.from_iterable( map(lambda v: [re for re in v.reportedVariantCancer.reportEvents], original_ig.reportedVariants)) observed_report_events = chain.from_iterable( map(lambda v: [re for re in v.reportedVariantCancer.reportEvents], round_tripped.reportedVariants)) self.assertFalse(self.diff_actions(chain(expected_report_events, observed_report_events)))
Example #11
Source File: test_migration_reports_cancer_400_to_600_to_400.py From GelReportModels with Apache License 2.0 | 6 votes |
def test_migrate_cancer_interpretation_request(self, fill_nullables=True): # get original IR in version 4.0.0 assembly = Assembly.GRCh38 original_ir = self.get_valid_object( object_type=reports_4_0_0.CancerInterpretationRequest, version=self.version_4_0_0, fill_nullables=fill_nullables, genomeAssemblyVersion=assembly, structuralTieredVariants=[], versionControl=reports_4_0_0.ReportVersionControl(gitVersionControl='4.0.0') ) valid_cancer_origins = ['germline_variant', 'somatic_variant'] for tiered_variant in original_ir.tieredVariants: if tiered_variant.alleleOrigins[0] not in valid_cancer_origins: tiered_variant.alleleOrigins[0] = random.choice(valid_cancer_origins) # # migration requires there is exactly one tumour sample original_ir.cancerParticipant.tumourSamples = [original_ir.cancerParticipant.tumourSamples[0]] migrated, round_tripped = MigrationRunner().roundtrip_cancer_ir(original_ir, assembly) self.assertFalse(self.diff_round_tripped(original_ir, round_tripped, ignore_fields=[ "TNMStageVersion", "TNMStageGrouping", "actions", "additionalTextualVariantAnnotations", "matchedSamples", "commonAf", "additionalInfo"])) # NOTE: not all fields in actions are kept and the order is not maintained, thus we ignore it in the # dictionary comparison and then here manually check them expected_report_events = chain.from_iterable( map(lambda v: [re for re in v.reportedVariantCancer.reportEvents], original_ir.tieredVariants)) observed_report_events = chain.from_iterable( map(lambda v: [re for re in v.reportedVariantCancer.reportEvents], round_tripped.tieredVariants)) self.assertFalse(self.diff_actions(chain(expected_report_events, observed_report_events)))
Example #12
Source File: misc.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.items())) # Override dict methods where necessary
Example #13
Source File: misc.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element's count has been set to zero or is a negative number, elements() will ignore it. ''' # Emulate Bag.do from Smalltalk and Multiset.begin from C++. return _chain.from_iterable(_starmap(_repeat, self.items())) # Override dict methods where necessary
Example #14
Source File: prepro.py From neural_japanese_transliterator with Apache License 2.0 | 5 votes |
def build_vocab(): # Make romaji and surface (hiragana/katakana/kanji) sentences that are valid. romaji_sents, surface_sents = [], [] for line in codecs.open('preprocessed/ja.tsv', 'r', 'utf-8'): try: idx, romaji_sent, surface_sent = line.strip().split("\t") except ValueError: continue if len(romaji_sent) < hp.max_len: romaji_sents.append(romaji_sent) surface_sents.append(surface_sent) # Make Romaji vocabulary with codecs.open('preprocessed/vocab.romaji.txt', 'w', 'utf-8') as fout: fout.write("E\t\nU\t\nS\t\n") #E: Empty, U: Unkown roma2cnt = Counter(chain.from_iterable(romaji_sents)) for roma, cnt in roma2cnt.most_common(len(roma2cnt)): fout.write(u"{}\t{}\n".format(roma, cnt)) # Make surface vocabulary with codecs.open('preprocessed/vocab.surface.txt', 'w', 'utf-8') as fout: fout.write("E\t\nU\t\nS\t\n") #E: Empty, U: Unkown surf2cnt = Counter(chain.from_iterable(surface_sents)) for surf, cnt in surf2cnt.most_common(len(surface_sents)): fout.write(u"{}\t{}\n".format(surf, cnt))
Example #15
Source File: migration_test_real_data_cancer.py From GelReportModels with Apache License 2.0 | 5 votes |
def _check_actions(self, original_reported_variants, round_tripped_reported_variants): expected_report_events = chain.from_iterable( map(lambda v: [re for re in v.reportedVariantCancer.reportEvents], original_reported_variants)) observed_report_events = chain.from_iterable( map(lambda v: [re for re in v.reportedVariantCancer.reportEvents], round_tripped_reported_variants)) return self.migration_runner.diff_actions(chain(expected_report_events, observed_report_events))
Example #16
Source File: header.py From segpy with GNU Affero General Public License v3.0 | 5 votes |
def __new__(mcs, name, bases, namespace): # TODO: This is a good point to validate that the fields are in order and that the # TODO: format specification is valid. We shouldn't even build the class otherwise. # TODO: Also validate existence of LENGTH_IN_BYTES namespace['_ordered_field_names'] = tuple(name for name, attr in namespace.items() if isinstance(attr, HeaderFieldDescriptor)) transitive_bases = set(chain.from_iterable(type(base).mro(base) for base in bases)) if BaseHeader not in transitive_bases: bases = (BaseHeader,) + bases for attr_name, attr in namespace.items(): # This shenanigans is necessary so we can have all the following work is a useful way # help(class), help(instance), help(class.property) and help(instance.property) # Set the _name attribute of the field instance if it hasn't already been set if isinstance(attr, HeaderFieldDescriptor): if attr._name is None: attr._name = attr_name return super().__new__(mcs, name, bases, namespace)
Example #17
Source File: responses.py From pycomm3 with MIT License | 5 votes |
def parse_read_reply(data, data_type, elements): if data[:2] == STRUCTURE_READ_REPLY: data = data[4:] size = data_type['data_type']['template']['structure_size'] dt_name = data_type['data_type']['name'] if elements > 1: value = [parse_read_reply_struct(data[i: i + size], data_type['data_type']) for i in range(0, len(data), size)] else: value = parse_read_reply_struct(data, data_type['data_type']) else: datatype = DATA_TYPE[unpack_uint(data[:2])] dt_name = datatype if elements > 1: func = UNPACK_DATA_FUNCTION[datatype] size = DATA_TYPE_SIZE[datatype] data = data[2:] value = [func(data[i:i + size]) for i in range(0, len(data), size)] if datatype == 'DWORD': value = list(chain.from_iterable(dword_to_bool_array(val) for val in value)) else: value = UNPACK_DATA_FUNCTION[datatype](data[2:]) if datatype == 'DWORD': value = dword_to_bool_array(value) if dt_name == 'DWORD': dt_name = f'BOOL[{elements * 32}]' elif elements > 1: dt_name = f'{dt_name}[{elements}]' return value, dt_name
Example #18
Source File: make_wordvectors.py From wordvectors with MIT License | 5 votes |
def get_min_count(sents): ''' Args: sents: A list of lists. E.g., [["I", "am", "a", "boy", "."], ["You", "are", "a", "girl", "."]] Returns: min_count: A uint. Should be set as the parameter value of word2vec `min_count`. ''' global vocab_size from itertools import chain fdist = nltk.FreqDist(chain.from_iterable(sents)) min_count = fdist.most_common(vocab_size)[-1][1] # the count of the the top-kth word return min_count
Example #19
Source File: simulator.py From mabwiser with Apache License 2.0 | 5 votes |
def calculate_distances(self, contexts: np.ndarray): # Partition contexts by job n_jobs, n_contexts, starts = self._partition_contexts(len(contexts)) # Calculate distances in parallel distances = Parallel(n_jobs=n_jobs, backend=self.backend)( delayed(self._calculate_distances_of_batch)( contexts[starts[i]:starts[i + 1]]) for i in range(n_jobs)) # Reduce self.distances = list(chain.from_iterable(t for t in distances)) return self.distances
Example #20
Source File: venv.py From tox with MIT License | 5 votes |
def run_install_command(self, packages, action, options=()): def expand(val): # expand an install command if val == "{packages}": for package in packages: yield package elif val == "{opts}": for opt in options: yield opt else: yield val cmd = list(chain.from_iterable(expand(val) for val in self.envconfig.install_command)) env = self._get_os_environ() self.ensure_pip_os_environ_ok(env) old_stdout = sys.stdout sys.stdout = codecs.getwriter("utf8")(sys.stdout) try: self._pcall( cmd, cwd=self.envconfig.config.toxinidir, action=action, redirect=reporter.verbosity() < reporter.Verbosity.DEBUG, env=env, ) finally: sys.stdout = old_stdout
Example #21
Source File: loader.py From zdict with GNU General Public License v3.0 | 5 votes |
def get_dictionary_map(): ''' Auto discover dictionaries in package ``dictionaries``. Each dictionary class MUST be the subclass of ``DictBase`` :return: a dict with {provider_name: cls} SomeDict.provider as key, the class as value ''' package = 'zdict.dictionaries' exclude_files = ('template.py',) return { cls(None).provider: cls for _, cls in ( chain.from_iterable( getmembers(mod, predicate=_is_dict) for mod in ( import_module( '{}.{}'.format(package, f.partition('.py')[0])) for f in os.listdir(dictionaries.__path__[0]) if (not f.startswith('_') and f.endswith('.py') and f not in exclude_files) ) ) ) }
Example #22
Source File: test_client.py From pylxd with Apache License 2.0 | 5 votes |
def test_events_type_filter(self): """The websocket client can filter events by type.""" an_client = client.Client() # from the itertools recipes documentation def powerset(types): from itertools import chain, combinations pwset = [combinations(types, r) for r in range(len(types) + 1)] return chain.from_iterable(pwset) event_path = '/1.0/events' for types in powerset(client.EventType): ws_client = an_client.events(event_types=set(types)) actual_resource = parse.urlparse(ws_client.resource) expect_resource = parse.urlparse(event_path) if types and client.EventType.All not in types: type_csl = ','.join([t.value for t in types]) query = parse.parse_qs(expect_resource.query) query.update({'type': type_csl}) qs = parse.urlencode(query) expect_resource = expect_resource._replace(query=qs) self.assertEqual(expect_resource.path, actual_resource.path) if types and client.EventType.All not in types: qdict = parse.parse_qs(expect_resource.query) expect_types = set(qdict['type'][0].split(',')) qdict = parse.parse_qs(actual_resource.query) actual_types = set(qdict['type'][0].split(',')) self.assertEqual(expect_types, actual_types) else: self.assertEqual(expect_resource.query, actual_resource.query)
Example #23
Source File: string.py From aioredis with MIT License | 5 votes |
def mset(self, *args): """Set multiple keys to multiple values or unpack dict to keys & values. :raises TypeError: if len of args is not event number :raises TypeError: if len of args equals 1 and it is not a dict """ data = args if len(args) == 1: if not isinstance(args[0], dict): raise TypeError("if one arg it should be a dict") data = chain.from_iterable(args[0].items()) elif len(args) % 2 != 0: raise TypeError("length of pairs must be even number") fut = self.execute(b'MSET', *data) return wait_ok(fut)
Example #24
Source File: hash.py From aioredis with MIT License | 5 votes |
def hmset_dict(self, key, *args, **kwargs): """Set multiple hash fields to multiple values. dict can be passed as first positional argument: >>> await redis.hmset_dict( ... 'key', {'field1': 'value1', 'field2': 'value2'}) or keyword arguments can be used: >>> await redis.hmset_dict( ... 'key', field1='value1', field2='value2') or dict argument can be mixed with kwargs: >>> await redis.hmset_dict( ... 'key', {'field1': 'value1'}, field2='value2') .. note:: ``dict`` and ``kwargs`` not get mixed into single dictionary, if both specified and both have same key(s) -- ``kwargs`` will win: >>> await redis.hmset_dict('key', {'foo': 'bar'}, foo='baz') >>> await redis.hget('key', 'foo', encoding='utf-8') 'baz' """ if not args and not kwargs: raise TypeError("args or kwargs must be specified") pairs = () if len(args) > 1: raise TypeError("single positional argument allowed") elif len(args) == 1: if not isinstance(args[0], dict): raise TypeError("args[0] must be dict") elif not args[0] and not kwargs: raise ValueError("args[0] is empty dict") pairs = chain.from_iterable(args[0].items()) kwargs_pairs = chain.from_iterable(kwargs.items()) return wait_ok(self.execute( b'HMSET', key, *chain(pairs, kwargs_pairs)))
Example #25
Source File: object.py From beavy with Mozilla Public License 2.0 | 5 votes |
def by_capability(self, aborting=True, abort_code=404, *caps): caps = set(chain.from_iterable(map(lambda c: getattr(Object.TypesForCapability, getattr(c, 'value', c), []), caps))) if not caps: # No types found, break right here. if aborting: raise abort(abort_code) return self.filter("1=0") return self.filter(Object.discriminator.in_(caps))
Example #26
Source File: events.py From olympe with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, contexts, combine_method, policy=None, marker=None): self._contexts = list(contexts) self._combine_method = " {} ".format(combine_method) super(MultipleEventContext, self).__init__( list(chain.from_iterable(map(lambda c: c.events(), self._contexts))), policy=policy, marker=marker, )
Example #27
Source File: noniterators.py From misp42splunk with GNU Lesser General Public License v3.0 | 5 votes |
def flatmap(f, items): return chain.from_iterable(map(f, items))
Example #28
Source File: sortedlist.py From misp42splunk with GNU Lesser General Public License v3.0 | 5 votes |
def __reversed__(self): """Return a reverse iterator over the sorted list. ``sl.__reversed__()`` <==> ``reversed(sl)`` Iterating the sorted list while adding or deleting values may raise a :exc:`RuntimeError` or fail to iterate over all values. """ return chain.from_iterable(map(reversed, reversed(self._lists)))
Example #29
Source File: sortedlist.py From misp42splunk with GNU Lesser General Public License v3.0 | 5 votes |
def __iter__(self): """Return an iterator over the sorted list. ``sl.__iter__()`` <==> ``iter(sl)`` Iterating the sorted list while adding or deleting values may raise a :exc:`RuntimeError` or fail to iterate over all values. """ return chain.from_iterable(self._lists)
Example #30
Source File: sortedlist.py From misp42splunk with GNU Lesser General Public License v3.0 | 5 votes |
def update(self, iterable): """Update sorted list by adding all values from `iterable`. Runtime complexity: `O(k*log(n))` -- approximate. >>> sl = SortedList() >>> sl.update([3, 1, 2]) >>> sl SortedList([1, 2, 3]) :param iterable: iterable of values to add """ _lists = self._lists _maxes = self._maxes values = sorted(iterable) if _maxes: if len(values) * 4 >= self._len: values.extend(chain.from_iterable(_lists)) values.sort() self._clear() else: _add = self.add for val in values: _add(val) return _load = self._load _lists.extend(values[pos:(pos + _load)] for pos in range(0, len(values), _load)) _maxes.extend(sublist[-1] for sublist in _lists) self._len = len(values) del self._index[:]