Python future.builtins.range() Examples
The following are 30
code examples of future.builtins.range().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
future.builtins
, or try the search function
.
Example #1
Source File: test_http_cookiejar.py From kgsgo-dataset-preprocessor with Mozilla Public License 2.0 | 6 votes |
def test_escape_path(self): cases = [ # quoted safe ("/foo%2f/bar", "/foo%2F/bar"), ("/foo%2F/bar", "/foo%2F/bar"), # quoted % ("/foo%%/bar", "/foo%%/bar"), # quoted unsafe ("/fo%19o/bar", "/fo%19o/bar"), ("/fo%7do/bar", "/fo%7Do/bar"), # unquoted safe ("/foo/bar&", "/foo/bar&"), ("/foo//bar", "/foo//bar"), ("\176/foo/bar", "\176/foo/bar"), # unquoted unsafe ("/foo\031/bar", "/foo%19/bar"), ("/\175foo/bar", "/%7Dfoo/bar"), # unicode, latin-1 range ("/foo/bar\u00fc", "/foo/bar%C3%BC"), # UTF-8 encoded # unicode ("/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded ] for arg, result in cases: self.assertEqual(escape_path(arg), result)
Example #2
Source File: base64mime.py From verge3d-blender-addon with GNU General Public License v3.0 | 6 votes |
def body_encode(s, maxlinelen=76, eol=NL): r"""Encode a string with base64. Each line will be wrapped at, at most, maxlinelen characters (defaults to 76 characters). Each line of encoded text will end with eol, which defaults to "\n". Set this to "\r\n" if you will be using the result of this function directly in an email. """ if not s: return s encvec = [] max_unencoded = maxlinelen * 3 // 4 for i in range(0, len(s), max_unencoded): # BAW: should encode() inherit b2a_base64()'s dubious behavior in # adding a newline to the encoded string? enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii") if enc.endswith(NL) and eol != NL: enc = enc[:-1] + eol encvec.append(enc) return EMPTYSTRING.join(encvec)
Example #3
Source File: parse.py From verge3d-blender-addon with GNU General Public License v3.0 | 6 votes |
def unquote(string, encoding='utf-8', errors='replace'): """Replace %xx escapes by their single-character equivalent. The optional encoding and errors parameters specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. By default, percent-encoded sequences are decoded with UTF-8, and invalid sequences are replaced by a placeholder character. unquote('abc%20def') -> 'abc def'. """ if '%' not in string: string.split return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' bits = _asciire.split(string) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote_to_bytes(bits[i]).decode(encoding, errors)) append(bits[i + 1]) return ''.join(res)
Example #4
Source File: feedparser.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def push(self, data): """Push some new data into this object.""" # Handle any previous leftovers data, self._partial = self._partial + data, '' # Crack into lines, but preserve the newlines on the end of each parts = NLCRE_crack.split(data) # The *ahem* interesting behaviour of re.split when supplied grouping # parentheses is that the last element of the resulting list is the # data after the final RE. In the case of a NL/CR terminated string, # this is the empty string. self._partial = parts.pop() #GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r: # is there a \n to follow later? if not self._partial and parts and parts[-1].endswith('\r'): self._partial = parts.pop(-2)+parts.pop() # parts is a list of strings, alternating between the line contents # and the eol character(s). Gather up a list of lines after # re-attaching the newlines. lines = [] for i in range(len(parts) // 2): lines.append(parts[i*2] + parts[i*2+1]) self.pushlines(lines)
Example #5
Source File: base64mime.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def body_encode(s, maxlinelen=76, eol=NL): r"""Encode a string with base64. Each line will be wrapped at, at most, maxlinelen characters (defaults to 76 characters). Each line of encoded text will end with eol, which defaults to "\n". Set this to "\r\n" if you will be using the result of this function directly in an email. """ if not s: return s encvec = [] max_unencoded = maxlinelen * 3 // 4 for i in range(0, len(s), max_unencoded): # BAW: should encode() inherit b2a_base64()'s dubious behavior in # adding a newline to the encoded string? enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii") if enc.endswith(NL) and eol != NL: enc = enc[:-1] + eol encvec.append(enc) return EMPTYSTRING.join(encvec)
Example #6
Source File: feedparser.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def push(self, data): """Push some new data into this object.""" # Handle any previous leftovers data, self._partial = self._partial + data, '' # Crack into lines, but preserve the newlines on the end of each parts = NLCRE_crack.split(data) # The *ahem* interesting behaviour of re.split when supplied grouping # parentheses is that the last element of the resulting list is the # data after the final RE. In the case of a NL/CR terminated string, # this is the empty string. self._partial = parts.pop() #GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r: # is there a \n to follow later? if not self._partial and parts and parts[-1].endswith('\r'): self._partial = parts.pop(-2)+parts.pop() # parts is a list of strings, alternating between the line contents # and the eol character(s). Gather up a list of lines after # re-attaching the newlines. lines = [] for i in range(len(parts) // 2): lines.append(parts[i*2] + parts[i*2+1]) self.pushlines(lines)
Example #7
Source File: parse.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def unquote(string, encoding='utf-8', errors='replace'): """Replace %xx escapes by their single-character equivalent. The optional encoding and errors parameters specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. By default, percent-encoded sequences are decoded with UTF-8, and invalid sequences are replaced by a placeholder character. unquote('abc%20def') -> 'abc def'. """ if '%' not in string: string.split return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' bits = _asciire.split(string) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote_to_bytes(bits[i]).decode(encoding, errors)) append(bits[i + 1]) return ''.join(res)
Example #8
Source File: message.py From verge3d-blender-addon with GNU General Public License v3.0 | 6 votes |
def replace_header(self, _name, _value): """Replace a header. Replace the first matching header found in the message, retaining header order and case. If no matching header was found, a KeyError is raised. """ _name = _name.lower() for i, (k, v) in zip(range(len(self._headers)), self._headers): if k.lower() == _name: self._headers[i] = self.policy.header_store_parse(k, _value) break else: raise KeyError(_name) # # Use these three methods instead of the three above. #
Example #9
Source File: message.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def replace_header(self, _name, _value): """Replace a header. Replace the first matching header found in the message, retaining header order and case. If no matching header was found, a KeyError is raised. """ _name = _name.lower() for i, (k, v) in zip(range(len(self._headers)), self._headers): if k.lower() == _name: self._headers[i] = self.policy.header_store_parse(k, _value) break else: raise KeyError(_name) # # Use these three methods instead of the three above. #
Example #10
Source File: base64mime.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def body_encode(s, maxlinelen=76, eol=NL): r"""Encode a string with base64. Each line will be wrapped at, at most, maxlinelen characters (defaults to 76 characters). Each line of encoded text will end with eol, which defaults to "\n". Set this to "\r\n" if you will be using the result of this function directly in an email. """ if not s: return s encvec = [] max_unencoded = maxlinelen * 3 // 4 for i in range(0, len(s), max_unencoded): # BAW: should encode() inherit b2a_base64()'s dubious behavior in # adding a newline to the encoded string? enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii") if enc.endswith(NL) and eol != NL: enc = enc[:-1] + eol encvec.append(enc) return EMPTYSTRING.join(encvec)
Example #11
Source File: parse.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def unquote(string, encoding='utf-8', errors='replace'): """Replace %xx escapes by their single-character equivalent. The optional encoding and errors parameters specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. By default, percent-encoded sequences are decoded with UTF-8, and invalid sequences are replaced by a placeholder character. unquote('abc%20def') -> 'abc def'. """ if '%' not in string: string.split return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' bits = _asciire.split(string) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote_to_bytes(bits[i]).decode(encoding, errors)) append(bits[i + 1]) return ''.join(res)
Example #12
Source File: preprocessor.py From imitation-learning with MIT License | 6 votes |
def run_pipeline(): for label, subdir in [(VAL_SPLIT, 'SeqVal'), (TRAIN_SPLIT, 'SeqTrain')]: # get list of h5 files to preprocess h5_files = get_files(subdir, data_dir=FLAGS.data_dir, num_files=FLAGS.h5_files_per_split) random.shuffle(h5_files) for index in range(0, len(h5_files), BATCHSIZE): output_filename = '{}-{:05d}.tfrecord.gz'.format(label, index / BATCHSIZE) output_filepath = os.path.join(FLAGS.preproc_output_dir, output_filename) upper_index = min(index + BATCHSIZE, len(h5_files)) some_h5_files = h5_files[index:upper_index] write_tfrecord_file(output_filepath, some_h5_files)
Example #13
Source File: __init__.py From py-mysql-elasticsearch-sync with MIT License | 6 votes |
def _bulker(self, bulk_size): """ Example: u = bulker() u.send(None) #for generator initialize u.send(json_str) # input json item u.send(another_json_str) # input json item ... u.send(None) force finish bulk and post """ while True: data = "" for i in range(bulk_size): item = yield if item: data = data + item + "\n" else: break # print(data) print('-'*10) if data: self._post_to_es(data)
Example #14
Source File: base64mime.py From deepWordBug with Apache License 2.0 | 6 votes |
def body_encode(s, maxlinelen=76, eol=NL): r"""Encode a string with base64. Each line will be wrapped at, at most, maxlinelen characters (defaults to 76 characters). Each line of encoded text will end with eol, which defaults to "\n". Set this to "\r\n" if you will be using the result of this function directly in an email. """ if not s: return s encvec = [] max_unencoded = maxlinelen * 3 // 4 for i in range(0, len(s), max_unencoded): # BAW: should encode() inherit b2a_base64()'s dubious behavior in # adding a newline to the encoded string? enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii") if enc.endswith(NL) and eol != NL: enc = enc[:-1] + eol encvec.append(enc) return EMPTYSTRING.join(encvec)
Example #15
Source File: feedparser.py From deepWordBug with Apache License 2.0 | 6 votes |
def push(self, data): """Push some new data into this object.""" # Handle any previous leftovers data, self._partial = self._partial + data, '' # Crack into lines, but preserve the newlines on the end of each parts = NLCRE_crack.split(data) # The *ahem* interesting behaviour of re.split when supplied grouping # parentheses is that the last element of the resulting list is the # data after the final RE. In the case of a NL/CR terminated string, # this is the empty string. self._partial = parts.pop() #GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r: # is there a \n to follow later? if not self._partial and parts and parts[-1].endswith('\r'): self._partial = parts.pop(-2)+parts.pop() # parts is a list of strings, alternating between the line contents # and the eol character(s). Gather up a list of lines after # re-attaching the newlines. lines = [] for i in range(len(parts) // 2): lines.append(parts[i*2] + parts[i*2+1]) self.pushlines(lines)
Example #16
Source File: message.py From deepWordBug with Apache License 2.0 | 6 votes |
def replace_header(self, _name, _value): """Replace a header. Replace the first matching header found in the message, retaining header order and case. If no matching header was found, a KeyError is raised. """ _name = _name.lower() for i, (k, v) in zip(range(len(self._headers)), self._headers): if k.lower() == _name: self._headers[i] = self.policy.header_store_parse(k, _value) break else: raise KeyError(_name) # # Use these three methods instead of the three above. #
Example #17
Source File: parse.py From deepWordBug with Apache License 2.0 | 6 votes |
def unquote(string, encoding='utf-8', errors='replace'): """Replace %xx escapes by their single-character equivalent. The optional encoding and errors parameters specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. By default, percent-encoded sequences are decoded with UTF-8, and invalid sequences are replaced by a placeholder character. unquote('abc%20def') -> 'abc def'. """ if '%' not in string: string.split return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' bits = _asciire.split(string) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote_to_bytes(bits[i]).decode(encoding, errors)) append(bits[i + 1]) return ''.join(res)
Example #18
Source File: sphere.py From s2sphere with MIT License | 6 votes |
def contains(self, other): if isinstance(other, self.__class__): if self.is_full() or other.is_empty(): return True return (self.angle().radians >= self.axis().angle(other.axis()) + other.angle().radians) elif isinstance(other, Point): assert is_unit_length(other) return (self.axis() - other).norm2() <= 2 * self.height() elif isinstance(other, Cell): vertices = [] for k in range(4): vertices.append(other.get_vertex(k)) if not self.contains(vertices[k]): return False return not self.complement().intersects(other, vertices) else: raise NotImplementedError()
Example #19
Source File: sphere.py From s2sphere with MIT License | 6 votes |
def _init_lookup_cell(level, i, j, orig_orientation, pos, orientation): if level == LOOKUP_BITS: ij = (i << LOOKUP_BITS) + j LOOKUP_POS[(ij << 2) + orig_orientation] = (pos << 2) + orientation LOOKUP_IJ[(pos << 2) + orig_orientation] = (ij << 2) + orientation else: level = level + 1 i <<= 1 j <<= 1 pos <<= 2 r = POS_TO_IJ[orientation] for index in range(4): _init_lookup_cell( level, i + (r[index] >> 1), j + (r[index] & 1), orig_orientation, pos + index, orientation ^ POS_TO_ORIENTATION[index], )
Example #20
Source File: sphere.py From s2sphere with MIT License | 6 votes |
def contains(self, *args): if len(args) == 1 and isinstance(args[0], Cell): return self.contains(args[0].id()) elif len(args) == 1 and isinstance(args[0], CellId): cell_id = args[0] index = bisect.bisect_left(self.__cell_ids, cell_id) if index < len(self.__cell_ids) \ and self.__cell_ids[index].range_min() <= cell_id: return True return (index != 0 and self.__cell_ids[index - 1].range_max() >= cell_id) elif len(args) == 1 and isinstance(args[0], Point): return self.contains(CellId.from_point(args[0])) elif len(args) == 1 and isinstance(args[0], self.__class__): cell_union = args[0] for i in range(cell_union.num_cells()): if not self.contains(cell_union.cell_id(i)): return False return True else: raise NotImplementedError()
Example #21
Source File: sphere.py From s2sphere with MIT License | 6 votes |
def __get_initial_candidates(self): if self.__max_cells >= 4: cap = self.__region.get_cap_bound() level = min(CellId.min_width().get_max_level( 2 * cap.angle().radians), min(self.__max_level, CellId.MAX_LEVEL - 1)) if self.__level_mod > 1 and level > self.__min_level: level -= (level - self.__min_level) % self.__level_mod if level > 0: cell_id = CellId.from_point(cap.axis()) vertex_neighbors = cell_id.get_vertex_neighbors(level) for neighbor in vertex_neighbors: self.__add_candidate(self.__new_candidate(Cell(neighbor))) return for face in range(6): self.__add_candidate(self.__new_candidate(FACE_CELLS[face]))
Example #22
Source File: doujinshi.py From nhentai with MIT License | 6 votes |
def download(self): logger.info('Starting to download doujinshi: %s' % self.name) if self.downloader: download_queue = [] if len(self.ext) != self.pages: logger.warning('Page count and ext count do not equal') for i in range(1, min(self.pages, len(self.ext)) + 1): download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i, self.ext[i-1])) self.downloader.download(download_queue, self.filename) ''' for i in range(len(self.ext)): download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i+1, EXT_MAP[self.ext[i]])) ''' else: logger.critical('Downloader has not been loaded')
Example #23
Source File: base64mime.py From kgsgo-dataset-preprocessor with Mozilla Public License 2.0 | 6 votes |
def body_encode(s, maxlinelen=76, eol=NL): r"""Encode a string with base64. Each line will be wrapped at, at most, maxlinelen characters (defaults to 76 characters). Each line of encoded text will end with eol, which defaults to "\n". Set this to "\r\n" if you will be using the result of this function directly in an email. """ if not s: return s encvec = [] max_unencoded = maxlinelen * 3 // 4 for i in range(0, len(s), max_unencoded): # BAW: should encode() inherit b2a_base64()'s dubious behavior in # adding a newline to the encoded string? enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii") if enc.endswith(NL) and eol != NL: enc = enc[:-1] + eol encvec.append(enc) return EMPTYSTRING.join(encvec)
Example #24
Source File: feedparser.py From kgsgo-dataset-preprocessor with Mozilla Public License 2.0 | 6 votes |
def push(self, data): """Push some new data into this object.""" # Handle any previous leftovers data, self._partial = self._partial + data, '' # Crack into lines, but preserve the newlines on the end of each parts = NLCRE_crack.split(data) # The *ahem* interesting behaviour of re.split when supplied grouping # parentheses is that the last element of the resulting list is the # data after the final RE. In the case of a NL/CR terminated string, # this is the empty string. self._partial = parts.pop() #GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r: # is there a \n to follow later? if not self._partial and parts and parts[-1].endswith('\r'): self._partial = parts.pop(-2)+parts.pop() # parts is a list of strings, alternating between the line contents # and the eol character(s). Gather up a list of lines after # re-attaching the newlines. lines = [] for i in range(len(parts) // 2): lines.append(parts[i*2] + parts[i*2+1]) self.pushlines(lines)
Example #25
Source File: message.py From kgsgo-dataset-preprocessor with Mozilla Public License 2.0 | 6 votes |
def replace_header(self, _name, _value): """Replace a header. Replace the first matching header found in the message, retaining header order and case. If no matching header was found, a KeyError is raised. """ _name = _name.lower() for i, (k, v) in zip(range(len(self._headers)), self._headers): if k.lower() == _name: self._headers[i] = self.policy.header_store_parse(k, _value) break else: raise KeyError(_name) # # Use these three methods instead of the three above. #
Example #26
Source File: parse.py From kgsgo-dataset-preprocessor with Mozilla Public License 2.0 | 6 votes |
def unquote(string, encoding='utf-8', errors='replace'): """Replace %xx escapes by their single-character equivalent. The optional encoding and errors parameters specify how to decode percent-encoded sequences into Unicode characters, as accepted by the bytes.decode() method. By default, percent-encoded sequences are decoded with UTF-8, and invalid sequences are replaced by a placeholder character. unquote('abc%20def') -> 'abc def'. """ if '%' not in string: string.split return string if encoding is None: encoding = 'utf-8' if errors is None: errors = 'replace' bits = _asciire.split(string) res = [bits[0]] append = res.append for i in range(1, len(bits), 2): append(unquote_to_bytes(bits[i]).decode(encoding, errors)) append(bits[i + 1]) return ''.join(res)
Example #27
Source File: test_range.py From kgsgo-dataset-preprocessor with Mozilla Public License 2.0 | 6 votes |
def test_slice_range(self): r = range(8) self.assertEqual(r[:], range(8)) self.assertEqual(r[:2], range(2)) self.assertEqual(r[:-2], range(6)) self.assertEqual(r[2:], range(2, 8)) self.assertEqual(r[-2:], range(6, 8)) self.assertEqual(r[2:-2], range(2, 6)) self.assertEqual(r[-2:2:-1], range(6, 2, -1)) r = r[::-1] self.assertEqual(r, range(7, -1, -1)) self.assertEqual(r[:], range(7, -1, -1)) self.assertEqual(r[:2], range(7, 5, -1)) self.assertEqual(r[:-2], range(7, 1, -1)) self.assertEqual(r[2:], range(5, -1, -1)) self.assertEqual(r[-2:], range(1, -1, -1)) self.assertEqual(r[2:-2], range(5, 1, -1)) self.assertEqual(r[-2:2:-1], range(1, 5))
Example #28
Source File: test_urllib.py From kgsgo-dataset-preprocessor with Mozilla Public License 2.0 | 6 votes |
def test_unquoting(self): # Make sure unquoting of all ASCII values works escape_list = [] for num in range(128): given = hexescape(chr(num)) expect = chr(num) result = urllib_parse.unquote(given) self.assertEqual(expect, result, "using unquote(): %r != %r" % (expect, result)) result = urllib_parse.unquote_plus(given) self.assertEqual(expect, result, "using unquote_plus(): %r != %r" % (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result = urllib_parse.unquote(escape_string) self.assertEqual(result.count('%'), 1, "using unquote(): not all characters escaped: " "%s" % result) self.assertRaises((TypeError, AttributeError), urllib_parse.unquote, None) self.assertRaises((TypeError, AttributeError), urllib_parse.unquote, ()) with support.check_warnings(('', BytesWarning), quiet=True): self.assertRaises((TypeError, AttributeError), urllib_parse.unquote, bytes(b''))
Example #29
Source File: feedparser.py From verge3d-blender-addon with GNU General Public License v3.0 | 6 votes |
def push(self, data): """Push some new data into this object.""" # Handle any previous leftovers data, self._partial = self._partial + data, '' # Crack into lines, but preserve the newlines on the end of each parts = NLCRE_crack.split(data) # The *ahem* interesting behaviour of re.split when supplied grouping # parentheses is that the last element of the resulting list is the # data after the final RE. In the case of a NL/CR terminated string, # this is the empty string. self._partial = parts.pop() #GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r: # is there a \n to follow later? if not self._partial and parts and parts[-1].endswith('\r'): self._partial = parts.pop(-2)+parts.pop() # parts is a list of strings, alternating between the line contents # and the eol character(s). Gather up a list of lines after # re-attaching the newlines. lines = [] for i in range(len(parts) // 2): lines.append(parts[i*2] + parts[i*2+1]) self.pushlines(lines)
Example #30
Source File: support.py From kgsgo-dataset-preprocessor with Mozilla Public License 2.0 | 5 votes |
def modules_cleanup(oldmodules): # Encoders/decoders are registered permanently within the internal # codec cache. If we destroy the corresponding modules their # globals will be set to None which will trip up the cached functions. encodings = [(k, v) for k, v in sys.modules.items() if k.startswith('encodings.')] # Was: # sys.modules.clear() # Py2-compatible: for i in range(len(sys.modules)): sys.modules.pop() sys.modules.update(encodings) # XXX: This kind of problem can affect more than just encodings. In particular # extension modules (such as _ssl) don't cope with reloading properly. # Really, test modules should be cleaning out the test specific modules they # know they added (ala test_runpy) rather than relying on this function (as # test_importhooks and test_pkg do currently). # Implicitly imported *real* modules should be left alone (see issue 10556). sys.modules.update(oldmodules) #======================================================================= # Backported versions of threading_setup() and threading_cleanup() which don't refer # to threading._dangling (not available on Py2.7). # Threading support to prevent reporting refleaks when running regrtest.py -R # NOTE: we use thread._count() rather than threading.enumerate() (or the # moral equivalent thereof) because a threading.Thread object is still alive # until its __bootstrap() method has returned, even after it has been # unregistered from the threading module. # thread._count(), on the other hand, only gets decremented *after* the # __bootstrap() method has returned, which gives us reliable reference counts # at the end of a test run.