Python tornado.iostream.IOStream() Examples
The following are 30
code examples of tornado.iostream.IOStream().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tornado.iostream
, or try the search function
.
Example #1
Source File: tcpclient.py From tornado-zh with MIT License | 7 votes |
def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, max_buffer_size=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). """ addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, self.io_loop, functools.partial(self._create_stream, max_buffer_size)) af, addr, stream = yield connector.start() # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: stream = yield stream.start_tls(False, ssl_options=ssl_options, server_hostname=host) raise gen.Return(stream)
Example #2
Source File: tcpclient.py From opendevops with GNU General Public License v3.0 | 6 votes |
def __init__( self, addrinfo: List[Tuple], connect: Callable[ [socket.AddressFamily, Tuple], Tuple[IOStream, "Future[IOStream]"] ], ) -> None: self.io_loop = IOLoop.current() self.connect = connect self.future = ( Future() ) # type: Future[Tuple[socket.AddressFamily, Any, IOStream]] self.timeout = None # type: Optional[object] self.connect_timeout = None # type: Optional[object] self.last_error = None # type: Optional[Exception] self.remaining = len(addrinfo) self.primary_addrs, self.secondary_addrs = self.split(addrinfo) self.streams = set() # type: Set[IOStream]
Example #3
Source File: TTornado.py From galaxy-sdk-python with Apache License 2.0 | 6 votes |
def open(self, timeout=None): logger.debug('socket connecting') sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) self.stream = iostream.IOStream(sock) try: connect = self.stream.connect((self.host, self.port)) if timeout is not None: yield self.with_timeout(timeout, connect) else: yield connect except (socket.error, IOError, ioloop.TimeoutError) as e: message = 'could not connect to {}:{} ({})'.format(self.host, self.port, e) raise TTransportException( type=TTransportException.NOT_OPEN, message=message) raise gen.Return(self)
Example #4
Source File: tcpserver_test.py From opendevops with GNU General Public License v3.0 | 6 votes |
def test_handle_stream_native_coroutine(self): # handle_stream may be a native coroutine. class TestServer(TCPServer): async def handle_stream(self, stream, address): stream.write(b"data") stream.close() sock, port = bind_unused_port() server = TestServer() server.add_socket(sock) client = IOStream(socket.socket()) yield client.connect(("localhost", port)) result = yield client.read_until_close() self.assertEqual(result, b"data") server.stop() client.close()
Example #5
Source File: httpserver_test.py From tornado-zh with MIT License | 6 votes |
def test_body_size_override_reset(self): # The max_body_size override is reset between requests. stream = IOStream(socket.socket()) try: yield stream.connect(('127.0.0.1', self.get_http_port())) # Use a raw stream so we can make sure it's all on one connection. stream.write(b'PUT /streaming?expected_size=10240 HTTP/1.1\r\n' b'Content-Length: 10240\r\n\r\n') stream.write(b'a' * 10240) headers, response = yield gen.Task(read_stream_body, stream) self.assertEqual(response, b'10240') # Without the ?expected_size parameter, we get the old default value stream.write(b'PUT /streaming HTTP/1.1\r\n' b'Content-Length: 10240\r\n\r\n') with ExpectLog(gen_log, '.*Content-Length too long'): data = yield stream.read_until_close() self.assertEqual(data, b'') finally: stream.close()
Example #6
Source File: httpserver_test.py From opendevops with GNU General Public License v3.0 | 6 votes |
def test_body_size_override_reset(self): # The max_body_size override is reset between requests. stream = IOStream(socket.socket()) try: yield stream.connect(("127.0.0.1", self.get_http_port())) # Use a raw stream so we can make sure it's all on one connection. stream.write( b"PUT /streaming?expected_size=10240 HTTP/1.1\r\n" b"Content-Length: 10240\r\n\r\n" ) stream.write(b"a" * 10240) start_line, headers, response = yield read_stream_body(stream) self.assertEqual(response, b"10240") # Without the ?expected_size parameter, we get the old default value stream.write( b"PUT /streaming HTTP/1.1\r\n" b"Content-Length: 10240\r\n\r\n" ) with ExpectLog(gen_log, ".*Content-Length too long"): data = yield stream.read_until_close() self.assertEqual(data, b"HTTP/1.1 400 Bad Request\r\n\r\n") finally: stream.close()
Example #7
Source File: tcpclient.py From tornado-zh with MIT License | 6 votes |
def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, max_buffer_size=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). """ addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, self.io_loop, functools.partial(self._create_stream, max_buffer_size)) af, addr, stream = yield connector.start() # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: stream = yield stream.start_tls(False, ssl_options=ssl_options, server_hostname=host) raise gen.Return(stream)
Example #8
Source File: http1connection.py From opendevops with GNU General Public License v3.0 | 6 votes |
def __init__( self, stream: iostream.IOStream, params: HTTP1ConnectionParameters = None, context: object = None, ) -> None: """ :arg stream: an `.IOStream` :arg params: a `.HTTP1ConnectionParameters` or None :arg context: an opaque application-defined object that is accessible as ``connection.context`` """ self.stream = stream if params is None: params = HTTP1ConnectionParameters() self.params = params self.context = context self._serving_future = None # type: Optional[Future[None]]
Example #9
Source File: httpclient_test.py From tornado-zh with MIT License | 6 votes |
def test_multi_line_headers(self): # Multi-line http headers are rare but rfc-allowed # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 sock, port = bind_unused_port() with closing(sock): def write_response(stream, request_data): if b"HTTP/1." not in request_data: self.skipTest("requires HTTP/1.x") stream.write(b"""\ HTTP/1.1 200 OK X-XSS-Protection: 1; \tmode=block """.replace(b"\n", b"\r\n"), callback=stream.close) def accept_callback(conn, address): stream = IOStream(conn, io_loop=self.io_loop) stream.read_until(b"\r\n\r\n", functools.partial(write_response, stream)) netutil.add_accept_handler(sock, accept_callback, self.io_loop) self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop) resp = self.wait() resp.rethrow() self.assertEqual(resp.headers['X-XSS-Protection'], "1; mode=block") self.io_loop.remove_handler(sock.fileno())
Example #10
Source File: httpserver_test.py From viewfinder with Apache License 2.0 | 6 votes |
def test_unix_socket(self): sockfile = os.path.join(self.tmpdir, "test.sock") sock = netutil.bind_unix_socket(sockfile) app = Application([("/hello", HelloWorldRequestHandler)]) server = HTTPServer(app, io_loop=self.io_loop) server.add_socket(sock) stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop) stream.connect(sockfile, self.stop) self.wait() stream.write(b"GET /hello HTTP/1.0\r\n\r\n") stream.read_until(b"\r\n", self.stop) response = self.wait() self.assertEqual(response, b"HTTP/1.0 200 OK\r\n") stream.read_until(b"\r\n\r\n", self.stop) headers = HTTPHeaders.parse(self.wait().decode('latin1')) stream.read_bytes(int(headers["Content-Length"]), self.stop) body = self.wait() self.assertEqual(body, b"Hello world") stream.close() server.stop()
Example #11
Source File: httpclient_test.py From tornado-zh with MIT License | 6 votes |
def test_multi_line_headers(self): # Multi-line http headers are rare but rfc-allowed # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 sock, port = bind_unused_port() with closing(sock): def write_response(stream, request_data): if b"HTTP/1." not in request_data: self.skipTest("requires HTTP/1.x") stream.write(b"""\ HTTP/1.1 200 OK X-XSS-Protection: 1; \tmode=block """.replace(b"\n", b"\r\n"), callback=stream.close) def accept_callback(conn, address): stream = IOStream(conn, io_loop=self.io_loop) stream.read_until(b"\r\n\r\n", functools.partial(write_response, stream)) netutil.add_accept_handler(sock, accept_callback, self.io_loop) self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop) resp = self.wait() resp.rethrow() self.assertEqual(resp.headers['X-XSS-Protection'], "1; mode=block") self.io_loop.remove_handler(sock.fileno())
Example #12
Source File: httpclient_test.py From opendevops with GNU General Public License v3.0 | 5 votes |
def test_chunked_close(self): # test case in which chunks spread read-callback processing # over several ioloop iterations, but the connection is already closed. sock, port = bind_unused_port() with closing(sock): @gen.coroutine def accept_callback(conn, address): # fake an HTTP server using chunked encoding where the final chunks # and connection close all happen at once stream = IOStream(conn) request_data = yield stream.read_until(b"\r\n\r\n") if b"HTTP/1." not in request_data: self.skipTest("requires HTTP/1.x") yield stream.write( b"""\ HTTP/1.1 200 OK Transfer-Encoding: chunked 1 1 1 2 0 """.replace( b"\n", b"\r\n" ) ) stream.close() netutil.add_accept_handler(sock, accept_callback) # type: ignore resp = self.fetch("http://127.0.0.1:%d/" % port) resp.rethrow() self.assertEqual(resp.body, b"12") self.io_loop.remove_handler(sock.fileno())
Example #13
Source File: http1connection.py From opendevops with GNU General Public License v3.0 | 5 votes |
def _on_connection_close(self) -> None: # Note that this callback is only registered on the IOStream # when we have finished reading the request and are waiting for # the application to produce its response. if self._close_callback is not None: callback = self._close_callback self._close_callback = None callback() if not self._finish_future.done(): future_set_result_unless_cancelled(self._finish_future, None) self._clear_callbacks()
Example #14
Source File: httpclient_test.py From opendevops with GNU General Public License v3.0 | 5 votes |
def test_multi_line_headers(self): # Multi-line http headers are rare but rfc-allowed # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 sock, port = bind_unused_port() with closing(sock): @gen.coroutine def accept_callback(conn, address): stream = IOStream(conn) request_data = yield stream.read_until(b"\r\n\r\n") if b"HTTP/1." not in request_data: self.skipTest("requires HTTP/1.x") yield stream.write( b"""\ HTTP/1.1 200 OK X-XSS-Protection: 1; \tmode=block """.replace( b"\n", b"\r\n" ) ) stream.close() netutil.add_accept_handler(sock, accept_callback) # type: ignore resp = self.fetch("http://127.0.0.1:%d/" % port) resp.rethrow() self.assertEqual(resp.headers["X-XSS-Protection"], "1; mode=block") self.io_loop.remove_handler(sock.fileno())
Example #15
Source File: web_test.py From opendevops with GNU General Public License v3.0 | 5 votes |
def connect(self, url, connection_close): # Use a raw connection so we can control the sending of data. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) s.connect(("127.0.0.1", self.get_http_port())) stream = IOStream(s) stream.write(b"GET " + url + b" HTTP/1.1\r\n") if connection_close: stream.write(b"Connection: close\r\n") stream.write(b"Transfer-Encoding: chunked\r\n\r\n") return stream
Example #16
Source File: concurrent_test.py From opendevops with GNU General Public License v3.0 | 5 votes |
def capitalize(self, request_data): logging.debug("capitalize") stream = IOStream(socket.socket()) logging.debug("connecting") yield stream.connect(("127.0.0.1", self.port)) stream.write(utf8(request_data + "\n")) logging.debug("reading") data = yield stream.read_until(b"\n") logging.debug("returning") stream.close() raise gen.Return(self.process_response(data))
Example #17
Source File: httpserver_test.py From opendevops with GNU General Public License v3.0 | 5 votes |
def raw_fetch(self, headers, body, newline=b"\r\n"): with closing(IOStream(socket.socket())) as stream: self.io_loop.run_sync( lambda: stream.connect(("127.0.0.1", self.get_http_port())) ) stream.write( newline.join(headers + [utf8("Content-Length: %d" % len(body))]) + newline + newline + body ) start_line, headers, body = self.io_loop.run_sync( lambda: read_stream_body(stream) ) return body
Example #18
Source File: http1connection.py From opendevops with GNU General Public License v3.0 | 5 votes |
def detach(self) -> iostream.IOStream: """Take control of the underlying stream. Returns the underlying `.IOStream` object and stops all further HTTP processing. May only be called during `.HTTPMessageDelegate.headers_received`. Intended for implementing protocols like websockets that tunnel over an HTTP handshake. """ self._clear_callbacks() stream = self.stream self.stream = None # type: ignore if not self._finish_future.done(): future_set_result_unless_cancelled(self._finish_future, None) return stream
Example #19
Source File: httpserver_test.py From viewfinder with Apache License 2.0 | 5 votes |
def test_100_continue(self): # Run through a 100-continue interaction by hand: # When given Expect: 100-continue, we get a 100 response after the # headers, and then the real response after the body. stream = IOStream(socket.socket(), io_loop=self.io_loop) stream.connect(("localhost", self.get_http_port()), callback=self.stop) self.wait() stream.write(b"\r\n".join([b"POST /hello HTTP/1.1", b"Content-Length: 1024", b"Expect: 100-continue", b"Connection: close", b"\r\n"]), callback=self.stop) self.wait() stream.read_until(b"\r\n\r\n", self.stop) data = self.wait() self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data) stream.write(b"a" * 1024) stream.read_until(b"\r\n", self.stop) first_line = self.wait() self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line) stream.read_until(b"\r\n\r\n", self.stop) header_data = self.wait() headers = HTTPHeaders.parse(native_str(header_data.decode('latin1'))) stream.read_bytes(int(headers["Content-Length"]), self.stop) body = self.wait() self.assertEqual(body, b"Got 1024 bytes in POST") stream.close()
Example #20
Source File: simple_httpclient.py From viewfinder with Apache License 2.0 | 5 votes |
def initialize(self, io_loop, max_clients=10, hostname_mapping=None, max_buffer_size=104857600, resolver=None, defaults=None): """Creates a AsyncHTTPClient. Only a single AsyncHTTPClient instance exists per IOLoop in order to provide limitations on the number of pending connections. force_instance=True may be used to suppress this behavior. max_clients is the number of concurrent requests that can be in progress. Note that this arguments are only used when the client is first created, and will be ignored when an existing client is reused. hostname_mapping is a dictionary mapping hostnames to IP addresses. It can be used to make local DNS changes when modifying system-wide settings like /etc/hosts is not possible or desirable (e.g. in unittests). max_buffer_size is the number of bytes that can be read by IOStream. It defaults to 100mb. """ super(SimpleAsyncHTTPClient, self).initialize(io_loop, defaults=defaults) self.max_clients = max_clients self.queue = collections.deque() self.active = {} self.max_buffer_size = max_buffer_size if resolver: self.resolver = resolver self.own_resolver = False else: self.resolver = Resolver(io_loop=io_loop) self.own_resolver = True if hostname_mapping is not None: self.resolver = OverrideResolver(resolver=self.resolver, mapping=hostname_mapping)
Example #21
Source File: httpserver_test.py From viewfinder with Apache License 2.0 | 5 votes |
def connect(self): self.stream = IOStream(socket.socket(), io_loop=self.io_loop) self.stream.connect(('localhost', self.get_http_port()), self.stop) self.wait()
Example #22
Source File: http1connection_test.py From opendevops with GNU General Public License v3.0 | 5 votes |
def asyncSetUp(self): listener, port = bind_unused_port() event = Event() def accept_callback(conn, addr): self.server_stream = IOStream(conn) self.addCleanup(self.server_stream.close) event.set() add_accept_handler(listener, accept_callback) self.client_stream = IOStream(socket.socket()) self.addCleanup(self.client_stream.close) yield [self.client_stream.connect(("127.0.0.1", port)), event.wait()] self.io_loop.remove_handler(listener) listener.close()
Example #23
Source File: tcpserver.py From viewfinder with Apache License 2.0 | 5 votes |
def _handle_connection(self, connection, address): if self.ssl_options is not None: assert ssl, "Python 2.6+ and OpenSSL required for SSL" try: connection = ssl_wrap_socket(connection, self.ssl_options, server_side=True, do_handshake_on_connect=False) except ssl.SSLError as err: if err.args[0] == ssl.SSL_ERROR_EOF: return connection.close() else: raise except socket.error as err: # If the connection is closed immediately after it is created # (as in a port scan), we can get one of several errors. # wrap_socket makes an internal call to getpeername, # which may return either EINVAL (Mac OS X) or ENOTCONN # (Linux). If it returns ENOTCONN, this error is # silently swallowed by the ssl module, so we need to # catch another error later on (AttributeError in # SSLIOStream._do_ssl_handshake). # To test this behavior, try nmap with the -sT flag. # https://github.com/facebook/tornado/pull/750 if err.args[0] in (errno.ECONNABORTED, errno.EINVAL): return connection.close() else: raise try: if self.ssl_options is not None: stream = SSLIOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size) else: stream = IOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size) self.handle_stream(stream, address) except Exception: app_log.error("Error in connection callback", exc_info=True)
Example #24
Source File: tcpclient.py From opendevops with GNU General Public License v3.0 | 5 votes |
def _create_stream( self, max_buffer_size: int, af: socket.AddressFamily, addr: Tuple, source_ip: str = None, source_port: int = None, ) -> Tuple[IOStream, "Future[IOStream]"]: # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. source_port_bind = source_port if isinstance(source_port, int) else 0 source_ip_bind = source_ip if source_port_bind and not source_ip: # User required a specific port, but did not specify # a certain source IP, will bind to the default loopback. source_ip_bind = "::1" if af == socket.AF_INET6 else "127.0.0.1" # Trying to use the same address family as the requested af socket: # - 127.0.0.1 for IPv4 # - ::1 for IPv6 socket_obj = socket.socket(af) set_close_exec(socket_obj.fileno()) if source_port_bind or source_ip_bind: # If the user requires binding also to a specific IP/port. try: socket_obj.bind((source_ip_bind, source_port_bind)) except socket.error: socket_obj.close() # Fail loudly if unable to use the IP/port. raise try: stream = IOStream(socket_obj, max_buffer_size=max_buffer_size) except socket.error as e: fu = Future() # type: Future[IOStream] fu.set_exception(e) return stream, fu else: return stream, stream.connect(addr)
Example #25
Source File: tcpclient.py From opendevops with GNU General Public License v3.0 | 5 votes |
def on_connect_done( self, addrs: Iterator[Tuple[socket.AddressFamily, Tuple]], af: socket.AddressFamily, addr: Tuple, future: "Future[IOStream]", ) -> None: self.remaining -= 1 try: stream = future.result() except Exception as e: if self.future.done(): return # Error: try again (but remember what happened so we have an # error to raise in the end) self.last_error = e self.try_connect(addrs) if self.timeout is not None: # If the first attempt failed, don't wait for the # timeout to try an address from the secondary queue. self.io_loop.remove_timeout(self.timeout) self.on_timeout() return self.clear_timeouts() if self.future.done(): # This is a late arrival; just drop it. stream.close() else: self.streams.discard(stream) self.future.set_result((af, addr, stream)) self.close_streams()
Example #26
Source File: tcpclient.py From opendevops with GNU General Public License v3.0 | 5 votes |
def start( self, timeout: float = _INITIAL_CONNECT_TIMEOUT, connect_timeout: Union[float, datetime.timedelta] = None, ) -> "Future[Tuple[socket.AddressFamily, Any, IOStream]]": self.try_connect(iter(self.primary_addrs)) self.set_timeout(timeout) if connect_timeout is not None: self.set_connect_timeout(connect_timeout) return self.future
Example #27
Source File: simple_httpclient.py From viewfinder with Apache License 2.0 | 5 votes |
def initialize(self, io_loop, max_clients=10, hostname_mapping=None, max_buffer_size=104857600, resolver=None, defaults=None): """Creates a AsyncHTTPClient. Only a single AsyncHTTPClient instance exists per IOLoop in order to provide limitations on the number of pending connections. force_instance=True may be used to suppress this behavior. max_clients is the number of concurrent requests that can be in progress. Note that this arguments are only used when the client is first created, and will be ignored when an existing client is reused. hostname_mapping is a dictionary mapping hostnames to IP addresses. It can be used to make local DNS changes when modifying system-wide settings like /etc/hosts is not possible or desirable (e.g. in unittests). max_buffer_size is the number of bytes that can be read by IOStream. It defaults to 100mb. """ super(SimpleAsyncHTTPClient, self).initialize(io_loop, defaults=defaults) self.max_clients = max_clients self.queue = collections.deque() self.active = {} self.max_buffer_size = max_buffer_size if resolver: self.resolver = resolver self.own_resolver = False else: self.resolver = Resolver(io_loop=io_loop) self.own_resolver = True if hostname_mapping is not None: self.resolver = OverrideResolver(resolver=self.resolver, mapping=hostname_mapping)
Example #28
Source File: simple_httpclient.py From opendevops with GNU General Public License v3.0 | 5 votes |
def _create_connection(self, stream: IOStream) -> HTTP1Connection: stream.set_nodelay(True) connection = HTTP1Connection( stream, True, HTTP1ConnectionParameters( no_keep_alive=True, max_header_size=self.max_header_size, max_body_size=self.max_body_size, decompress=bool(self.request.decompress_response), ), self._sockaddr, ) return connection
Example #29
Source File: httpclient_test.py From viewfinder with Apache License 2.0 | 5 votes |
def test_chunked_close(self): # test case in which chunks spread read-callback processing # over several ioloop iterations, but the connection is already closed. sock, port = bind_unused_port() with closing(sock): def write_response(stream, request_data): stream.write(b"""\ HTTP/1.1 200 OK Transfer-Encoding: chunked 1 1 1 2 0 """.replace(b"\n", b"\r\n"), callback=stream.close) def accept_callback(conn, address): # fake an HTTP server using chunked encoding where the final chunks # and connection close all happen at once stream = IOStream(conn, io_loop=self.io_loop) stream.read_until(b"\r\n\r\n", functools.partial(write_response, stream)) netutil.add_accept_handler(sock, accept_callback, self.io_loop) self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop) resp = self.wait() resp.rethrow() self.assertEqual(resp.body, b"12") self.io_loop.remove_handler(sock.fileno())
Example #30
Source File: httpserver_test.py From opendevops with GNU General Public License v3.0 | 5 votes |
def test_100_continue(self): # Run through a 100-continue interaction by hand: # When given Expect: 100-continue, we get a 100 response after the # headers, and then the real response after the body. stream = IOStream(socket.socket()) yield stream.connect(("127.0.0.1", self.get_http_port())) yield stream.write( b"\r\n".join( [ b"POST /hello HTTP/1.1", b"Content-Length: 1024", b"Expect: 100-continue", b"Connection: close", b"\r\n", ] ) ) data = yield stream.read_until(b"\r\n\r\n") self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data) stream.write(b"a" * 1024) first_line = yield stream.read_until(b"\r\n") self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line) header_data = yield stream.read_until(b"\r\n\r\n") headers = HTTPHeaders.parse(native_str(header_data.decode("latin1"))) body = yield stream.read_bytes(int(headers["Content-Length"])) self.assertEqual(body, b"Got 1024 bytes in POST") stream.close()