Python read stream

50 Python code examples are found related to " read stream". You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: generic.py    From pdf-quench with GNU General Public License v2.0 6 votes vote down vote up
def readFromStream(stream, pdf):
        arr = ArrayObject()
        tmp = stream.read(1)
        if tmp != b_("["):
            raise utils.PdfReadError("Could not read array")
        while True:
            # skip leading whitespace
            tok = stream.read(1)
            while tok.isspace():
                tok = stream.read(1)
            stream.seek(-1, 1)
            # check for array ending
            peekahead = stream.read(1)
            if peekahead == b_("]"):
                break
            stream.seek(-1, 1)
            # read and append obj
            arr.append(readObject(stream, pdf))
        return arr 
Example 2
Source File: geometry.py    From armi with Apache License 2.0 6 votes vote down vote up
def readGeomFromStream(self, stream):
        """
        Read geometry info from a stream.

        This populates the object with info from any source.

        Notes
        -----
        There are two formats of geometry: yaml and xml. This tries
        xml first (legacy), and if it fails it tries yaml.
        """
        try:
            self._readXml(stream)
        except ET.ParseError:
            stream.seek(0)
            self._readYaml(stream)
        self._applyMigrations() 
Example 3
Source File: io.py    From prysm with MIT License 6 votes vote down vote up
def read_file_stream_or_path(path_or_file):
    try:
        with codecs.open(path_or_file, mode='r', encoding='cp1252') as fid:
            data = codecs.encode(fid.read(), 'utf-8').decode('utf-8')
    except (FileNotFoundError, TypeError):  # FNF -- file object, TypeError -- file_like
        try:
            path_or_file.seek(0)
            raw = path_or_file.read()
            data = codecs.encode(raw, 'utf-8').decode('utf-8')
        except TypeError:  # opened in bytes mode
            data = raw.decode('cp1252')
        except AttributeError:
            data = path_or_file  # TODO: avoid duplicate
    except (AttributeError, UnicodeDecodeError):
        data = path_or_file

    return data 
Example 4
Source File: _object.py    From pydatalab with Apache License 2.0 6 votes vote down vote up
def read_stream(self, start_offset=0, byte_count=None):
    """Reads the content of this object as text.

    Args:
      start_offset: the start offset of bytes to read.
      byte_count: the number of bytes to read. If None, it reads to the end.
    Returns:
      The text content within the object.
    Raises:
      Exception if there was an error requesting the object's content.
    """
    try:
      return self._api.object_download(self._bucket, self._key,
                                       start_offset=start_offset, byte_count=byte_count)
    except Exception as e:
      raise e 
Example 5
Source File: openbabel.py    From molecular-design-toolkit with Apache License 2.0 6 votes vote down vote up
def read_stream(filelike, format, name=None):
    """ Read a molecule from a file-like object

    Note:
        Currently only reads the first conformation in a file

    Args:
        filelike: a file-like object to read a file from
        format (str): File format: pdb, sdf, mol2, bbll, etc.
        name (str): name to assign to molecule

    Returns:
        moldesign.Molecule: parsed result
    """
    molstring = str(filelike.read())  # openbabel chokes on unicode
    return read_string(molstring, format, name=name) 
Example 6
Source File: s3_storage_driver.py    From fileflow with Apache License 2.0 6 votes vote down vote up
def get_read_stream(self, dag_id, task_id, execution_date):
        key_name = self.get_key_name(dag_id, task_id, execution_date)
        key = self.bucket.get_key(key_name)

        if key is not None:
            import tempfile
            temp_file_stream = tempfile.TemporaryFile(mode='w+b')
            key.get_file(temp_file_stream)

            # Stream has been read in and is now at the end
            # So reset it to the start
            temp_file_stream.seek(0)

            return temp_file_stream

        message = \
            'S3 key named {key_name} in bucket {bucket_name} does not exist.'.format(key_name=key_name,
                                                                                     bucket_name=self.bucket_name)
        raise StorageDriverError(message) 
Example 7
Source File: helpers.py    From aztk with MIT License 6 votes vote down vote up
def read_stream_as_string(stream, encoding="utf-8"):
    """
        Read stream as string
        :param stream: input stream generator
        :param str encoding: The encoding of the file. The default is utf-8.
        :return: The file content.
        :rtype: str
    """
    output = io.BytesIO()
    try:
        for data in stream:
            output.write(data)
        return output.getvalue().decode(encoding)
    finally:
        output.close()
    raise RuntimeError("could not write data to stream or decode bytes") 
Example 8
Source File: decoding.py    From python-sdk with MIT License 6 votes vote down vote up
def read_data_from_stream(stream):
        data_length = decode_uint_256(stream)
        padded_length = ceil32(data_length)

        data = stream.read(padded_length)

        if len(data) < padded_length:
            raise InsufficientDataBytes(
                "Tried to read {0} bytes.  Only got {1} bytes".format(
                    padded_length,
                    len(data),
                )
            )

        padding_bytes = data[data_length:]

        if padding_bytes != b'\x00' * (padded_length - data_length):
            raise NonEmptyPaddingBytes(
                "Padding bytes were not empty: {0}".format(repr(padding_bytes))
            )

        return data[:data_length] 
Example 9
Source File: generic.py    From pdf-quench with GNU General Public License v2.0 6 votes vote down vote up
def readHexStringFromStream(stream):
    stream.read(1)
    txt = ""
    x = b_("")
    while True:
        tok = readNonWhitespace(stream)
        if not tok:
            # stream has truncated prematurely
            raise PdfStreamError("Stream has ended unexpectedly")
        if tok == b_(">"):
            break
        x += tok
        if len(x) == 2:
            txt += chr(int(x, base=16))
            x = b_("")
    if len(x) == 1:
        x += b_("0")
    if len(x) == 2:
        txt += chr(int(x, base=16))
    return createStringObject(b_(txt)) 
Example 10
Source File: monaural_beat.py    From accel-brain-code with GNU General Public License v2.0 6 votes vote down vote up
def read_stream(self, left_chunk, right_chunk, volume, bit16=32767.0):
        '''
        具象メソッド
        wavファイルに保存するモノラルビートを読み込む

        Args:
            left_chunk:     左音源に対応するチャンク
            right_chunk:    右音源に対応するチャンク
            volume:         音量
            bit16:          整数化の条件

        Returns:
            フレームのlist
        '''
        if len(left_chunk) != len(right_chunk):
            raise ValueError()

        frame_list = []
        for i in range(len(left_chunk)):
            chunk = int((left_chunk[i] + right_chunk[i]) * bit16 * volume)
            data = struct.pack("2h", chunk, chunk)
            frame_list.append(data)

        return frame_list 
Example 11
Source File: data_stream.py    From ad_examples with MIT License 6 votes vote down vote up
def read_next_from_stream(self, n=1):
        """Returns first n instances from X and removes these instances from X"""
        n = min(n, self.X.shape[0])
        # logger.debug("DataStream.read_next_from_stream n: %d" % n)
        if n == 0:
            return None
        mask = np.zeros(self.X.shape[0], dtype=bool)
        mask[np.arange(n)] = True
        instances = self.X[mask]
        self.X = self.X[~mask]
        labels = None
        if self.y is not None:
            labels = self.y[mask]
            self.y = self.y[~mask]
        ids = None
        if self.id_server is not None:
            ids = self.id_server.get_next(n)
        # logger.debug("DataStream.read_next_from_stream instances: %s" % str(instances.shape))
        return InstanceList(instances, labels, ids) 
Example 12
Source File: corpus.py    From online-hdp with GNU General Public License v2.0 6 votes vote down vote up
def read_stream_data(f, num_docs):
  c = corpus()
  splitexp = re.compile(r'[ :]')
  for i in range(num_docs):
    line = f.readline()
    line = line.strip()
    if len(line) == 0:
      break
    d = document()
    splitline = [int(i) for i in splitexp.split(line)]
    wordids = splitline[1::2]
    wordcts = splitline[2::2]
    d.words = wordids
    d.counts = wordcts
    d.total = sum(d.counts)
    d.length = len(d.words)
    c.docs.append(d)

  c.num_docs = len(c.docs)
  return c

# This version is about 33% faster 
Example 13
Source File: terminal.py    From modules-repo with GNU Affero General Public License v3.0 6 votes vote down vote up
def read_stream(func, stream, delay):
    last_task = None
    data = b""
    while True:
        dat = (await stream.read(1))
        if not dat:
            # EOF
            if last_task:
                # Send all pending data
                last_task.cancel()
                await func(data.decode("utf-8"))
                # If there is no last task there is inherently no data, so theres no point sending a blank string
            break
        data += dat
        if last_task:
            last_task.cancel()
        last_task = asyncio.ensure_future(sleep_for_task(func, data, delay)) 
Example 14
Source File: per.py    From pyrdp with GNU General Public License v3.0 5 votes vote down vote up
def readOctetStream(s: BinaryIO, minValue: int = 0) -> bytes:
    """
    Unpack a PER octet stream
    :param s: stream
    :param minValue: minimum string length
    """
    size = readLength(s) + minValue
    return s.read(size) 
Example 15
Source File: ReadWriteFileServer.py    From bacpypes with MIT License 5 votes vote down vote up
def read_stream(self, start_position, octet_count):
        """ Read a chunk of data out of the file. """
        if _debug: TestStreamFile._debug("read_stream %r %r",
                start_position, octet_count,
                )

        # end of file is true if last record is returned
        end_of_file = (start_position+octet_count) >= len(self._file_data)

        return end_of_file, \
            self._file_data[start_position:start_position + octet_count] 
Example 16
Source File: base.py    From python-symphony with Apache License 2.0 5 votes vote down vote up
def read_stream(self, stream_id, since_epoch):
        ''' get datafeed '''
        response, status_code = self.__agent__.Messages.get_v4_stream_sid_message(
            sessionToken=self.__session__,
            keyManagerToken=self.__keymngr__,
            sid=stream_id,
            since=since_epoch
        ).result()
        self.logger.debug('%s: %s' % (status_code, response))
        return status_code, response 
Example 17
Source File: cprofilev.py    From cprofilev with MIT License 5 votes vote down vote up
def read_stream(self):
        value = self.stream.getvalue()
        self.stream.seek(0)
        self.stream.truncate()
        return value 
Example 18
Source File: __init__.py    From python-zulip-api with Apache License 2.0 5 votes vote down vote up
def mark_stream_as_read(self, stream_id: int) -> Dict[str, Any]:
        '''
            Example usage:

            >>> client.mark_stream_as_read(42)
            {'result': 'success', 'msg': ''}
        '''
        return self.call_endpoint(
            url='mark_stream_as_read',
            method='POST',
            request={'stream_id': stream_id},
        ) 
Example 19
Source File: protocol.py    From catnip with Apache License 2.0 5 votes vote down vote up
def ReadToStream(self, stream):
    assert self._info
    padded_size = ((self._info.size + TAR_RECORD_SIZE - 1)
                   / TAR_RECORD_SIZE * TAR_RECORD_SIZE)
    pad_size = padded_size - self._info.size
    self._ReadToStream(self._info.size, stream)
    self._Skip(pad_size)
    self._info = None 
Example 20
Source File: fileJobStore.py    From toil with Apache License 2.0 5 votes vote down vote up
def readSharedFileStream(self, sharedFileName):
        self._requireValidSharedFileName(sharedFileName)
        try:
            with open(self._getSharedFilePath(sharedFileName), 'rb') as f:
                yield f
        except IOError as e:
            if e.errno == errno.ENOENT:
                raise NoSuchFileException(sharedFileName)
            else:
                raise 
Example 21
Source File: fileJobStore.py    From toil with Apache License 2.0 5 votes vote down vote up
def readFileStream(self, jobStoreFileID):
        self._checkJobStoreFileID(jobStoreFileID)
        with open(self._getFilePathFromId(jobStoreFileID), 'rb') as f:
            yield f

    ##########################################
    # The following methods deal with shared files, i.e. files not associated
    # with specific jobs.
    ########################################## 
Example 22
Source File: abstractFileStore.py    From toil with Apache License 2.0 5 votes vote down vote up
def readGlobalFileStream(self, fileStoreID):
        """
        Similar to readGlobalFile, but allows a stream to be read from the job store. The yielded
        file handle does not need to and should not be closed explicitly.

        :return: a context manager yielding a file handle which can be read from.
        """
        raise NotImplementedError() 
Example 23
Source File: common.py    From RFHO with MIT License 5 votes vote down vote up
def read_stream(prefix='', start=0, stop=100000):
    res = []
    for kk in range(start, stop):
        try:
            res.append(load_obj(prefix + str(kk)))
        except FileNotFoundError:
            break
    return res 
Example 24
Source File: utils.py    From st2 with Apache License 2.0 5 votes vote down vote up
def make_read_and_store_stream_func(execution_db, action_db, store_data_func):
    """
    Factory function which returns a function for reading from a stream (stdout / stderr).

    This function writes read data into a buffer and stores it in a database.
    """
    # NOTE: This import has intentionally been moved here to avoid massive performance overhead
    # (1+ second) for other functions inside this module which don't need to use those imports.
    from st2common.util import concurrency

    greenlet_exit_exc_cls = concurrency.get_greenlet_exit_exception_class()

    def read_and_store_stream(stream, buff):
        try:
            while not stream.closed:
                line = stream.readline()
                if not line:
                    break

                if isinstance(line, six.binary_type):
                    line = line.decode('utf-8')

                buff.write(line)

                # Filter out result delimiter lines
                if ACTION_OUTPUT_RESULT_DELIMITER in line:
                    continue

                if cfg.CONF.actionrunner.stream_output:
                    store_data_func(execution_db=execution_db, action_db=action_db, data=line)
        except RuntimeError:
            # process was terminated abruptly
            pass
        except greenlet_exit_exc_cls:
            # Green thread exited / was killed
            pass

    return read_and_store_stream 
Example 25
Source File: util.py    From micromasters with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def read_stream(self):
        """
        Reads stream of json objects from a file
        """
        self.file.seek(0)
        for line in self.file.readlines():
            yield json.loads(line) 
Example 26
Source File: websocket.py    From synse-server with GNU General Public License v3.0 5 votes vote down vote up
def handle_request_read_stream(self, payload: Payload) -> None:
        """WebSocket 'read stream' event message handler.

        Args:
            payload: The message payload received from the WebSocket.
        """
        ids = payload.data.get('ids')
        tag_groups = payload.data.get('tag_groups')
        stop = payload.data.get('stop', False)

        if stop:
            logger.debug('read stream stop request received - terminating stream tasks')
            for t in self.tasks:
                t.cancel()
            return

        async def send_readings():
            async for reading in cmd.read_stream(self.ws, ids, tag_groups):
                try:
                    await self.send(
                        id=payload.id,
                        event='response/reading',
                        data=reading,
                    )
                except ConnectionClosed:
                    logger.info('websocket raised ConnectionClosed - terminating read stream')
                    return

        t = asyncio.ensure_future(send_readings())
        self.tasks.append(t) 
Example 27
Source File: utils.py    From mead-baseline with Apache License 2.0 5 votes vote down vote up
def read_config_stream(config_stream) -> Dict:
    """Read config stream.  May be a path to a YAML or JSON file, or a str containing JSON or the name
    of an env variable, or even a JSON object directly

    :param config_stream:
    :return:
    """
    if isinstance(config_stream, (dict, list)) or config_stream is None:
        return config_stream
    if os.path.exists(config_stream) and os.path.isfile(config_stream):
        logger.info("Reading config file '{}'".format(config_stream))
        return read_config_file(config_stream)
    config = config_stream
    if config_stream.startswith("hub:"):
        vec = config_stream.split(":")
        version = vec[1]
        rest = ":".join(vec[2:])
        config_stream = f"http://raw.githubusercontent.com/mead-ml/hub/master/{version}/{rest}.yml"

    if config_stream.startswith("$"):
        logger.info("Reading config from '{}'".format(config_stream))
        config = os.getenv(config_stream[1:])
    
    else:
        if validate_url(config_stream):
            path_to_save, _ = urlretrieve(config_stream)
            return read_config_stream(path_to_save)
        else:
            logger.info("No file found '{}...', loading as string".format(config_stream[:12]))
    return json.loads(config) 
Example 28
Source File: _api.py    From pykaldi with Apache License 2.0 5 votes vote down vote up
def read_from_stream(cls, strm, ropts):
        """Reads an FST from an input stream.

        Args:
            strm (istream): The input stream to read from.
            ropts (FstReadOptions): FST reading options.

        Returns:
            An FST object.

        Raises:
            RuntimeError: Read failed.
        """
        return cls._read_from_stream(strm, ropts) 
Example 29
Source File: session.py    From tidb-docker-compose with Apache License 2.0 5 votes vote down vote up
def readStream(self):
        """
        Returns a :class:`DataStreamReader` that can be used to read data streams
        as a streaming :class:`DataFrame`.

        .. note:: Evolving.

        :return: :class:`DataStreamReader`
        """
        return DataStreamReader(self._wrapped) 
Example 30
Source File: config.py    From hgvs with Apache License 2.0 5 votes vote down vote up
def read_stream(self, flo):
        """read configuration from ini-formatted file-like object

        """
        self._cp.read_string(flo.read().decode('ascii')) 
Example 31
Source File: response_lexer.py    From sndlatr with Apache License 2.0 5 votes vote down vote up
def read_token_stream(self, stream_i):
        whitespace = WHITESPACE
        wordchars = NON_SPECIALS
        read_until = self.read_until

        while True:
            # whitespace
            for nextchar in stream_i:
                if nextchar not in whitespace:
                    stream_i.push(nextchar)
                    break    # done skipping over the whitespace

            # non whitespace
            token = ''
            for nextchar in stream_i:
                if nextchar in wordchars:
                    token += nextchar
                elif nextchar == '[':
                    token += nextchar + read_until(stream_i, ']', escape=False)
                else:
                    if nextchar in whitespace:
                        yield token
                    elif nextchar == '"':
                        assert not token
                        yield nextchar + read_until(stream_i, nextchar)
                    else:
                        # Other punctuation, eg. "(". This ends the current token.
                        if token:
                            yield token
                        yield nextchar
                    break
            else:
                if token:
                    yield token
                break 
Example 32
Source File: base.py    From flyingcloud with Apache License 2.0 5 votes vote down vote up
def read_docker_output_stream(self, namespace, generator, logger_prefix, log_level=None):
        log_level = log_level or logging.DEBUG
        logger = getattr(namespace.logger, logging.getLevelName(log_level).lower())
        full_output = []

        for raw_chunk in generator:
            try:
                chunk, repl_count = self.filter_stream_header(raw_chunk)
                decoded_chunk = chunk.decode('utf-8')
            except UnicodeDecodeError:
                decoded_chunk = chunk.decode('utf-8', 'replace')
                logger("Couldn't decode %s", hexdump(chunk, 64))

            full_output.append(decoded_chunk)
            try:
                data = json.loads(decoded_chunk)
            except ValueError:
                data = decoded_chunk.rstrip('\r\n')
            logger("%s: %s", logger_prefix, data)
            if isinstance(data, dict) and 'error' in data:
                raise DockerResultError("Error: {!r}".format(data))
        return '\n'.join(full_output)

    # See "Stream details" at https://docs.docker.com/engine/api/v1.18/
    # {STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}
    # STREAM_TYPE = 0 (stdin), = 1 (stdout), = 2 (stderr) 
Example 33
Source File: launcher.py    From modmail with GNU Affero General Public License v3.0 5 votes vote down vote up
def read_stream(self, stream):
        while self.is_active:
            try:
                line = await stream.readline()
            except (asyncio.LimitOverrunError, ValueError):
                continue
            if line:
                line = line.decode("utf-8")[:-1]
                print(f"[Cluster {self.id}] {line}")
            else:
                break 
Example 34
Source File: log_reader.py    From airflow with Apache License 2.0 5 votes vote down vote up
def read_log_stream(self, ti: TaskInstance, try_number: Optional[int],
                        metadata: dict) -> Iterator[str]:
        """
        Used to continuously read log to the end

        :param ti: The Task Instance
        :type ti: TaskInstance
        :param try_number: the task try number
        :type try_number: Optional[int]
        :param metadata: A dictionary containing information about how to read the task log
        :type metadata: dict
        :rtype: Iterator[str]
        """

        if try_number is None:
            next_try = ti.next_try_number
            try_numbers = list(range(1, next_try))
        else:
            try_numbers = [try_number]
        for current_try_number in try_numbers:
            metadata.pop('end_of_log', None)
            metadata.pop('max_offset', None)
            metadata.pop('offset', None)
            while 'end_of_log' not in metadata or not metadata['end_of_log']:
                logs, metadata = self.read_log_chunks(ti, current_try_number, metadata)
                yield "\n".join(logs) + "\n" 
Example 35
Source File: _ebi_expression_atlas.py    From scanpy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def read_mtx_from_stream(stream: BinaryIO) -> sparse.csr_matrix:
    curline = stream.readline()
    while curline.startswith(b"%"):
        curline = stream.readline()
    n, m, _ = (int(x) for x in curline[:-1].split(b" "))
    data = pd.read_csv(
        stream,
        sep=r"\s+",
        header=None,
        dtype={0: np.integer, 1: np.integer, 2: np.float32},
    )
    mtx = sparse.csr_matrix((data[2], (data[1] - 1, data[0] - 1)), shape=(m, n))
    return mtx 
Example 36
Source File: message_flags.py    From zulip with Apache License 2.0 5 votes vote down vote up
def mark_stream_as_read(request: HttpRequest,
                        user_profile: UserProfile,
                        stream_id: int=REQ(validator=check_int)) -> HttpResponse:
    stream, recipient, sub = access_stream_by_id(user_profile, stream_id)
    count = do_mark_stream_messages_as_read(user_profile, request.client, stream)

    log_data_str = f"[{count} updated]"
    request._log_data["extra"] = log_data_str

    return json_success({'result': 'success',
                         'msg': ''}) 
Example 37
Source File: settingsIO.py    From armi with Apache License 2.0 5 votes vote down vote up
def readFromStream(self, stream, handleInvalids=True, fmt=SettingsInputFormat.YAML):
        """Read from a file-like stream."""
        self.format = fmt
        if self.format == self.SettingsInputFormat.YAML:
            try:
                self._readYaml(stream, handleInvalids=handleInvalids)
            except ruamel.yaml.scanner.ScannerError:
                # mediocre way to detect xml vs. yaml at the stream level
                runLog.info(
                    "Could not read stream in YAML format. Attempting XML format."
                )
                self.format = self.SettingsInputFormat.XML
                stream.seek(0)
        if self.format == self.SettingsInputFormat.XML:
            self._readXml(stream, handleInvalids=handleInvalids) 
Example 38
Source File: context.py    From LearningApacheSpark with MIT License 5 votes vote down vote up
def readStream(self):
        """
        Returns a :class:`DataStreamReader` that can be used to read data streams
        as a streaming :class:`DataFrame`.

        .. note:: Evolving.

        :return: :class:`DataStreamReader`

        >>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
        >>> text_sdf.isStreaming
        True
        """
        return DataStreamReader(self) 
Example 39
Source File: flujo.py    From pychemqt with GNU General Public License v3.0 5 votes vote down vote up
def readItemFromStream(self, stream):
        type = QtCore.QString()
        matrix = QtGui.QTransform()
        stream >> type >> matrix
        if type == "txt":
            text = QtCore.QString()
            stream >> text
            item = TextItem(text)
        elif type == "square":
            rect = QtCore.QRectF()
            pen = QtGui.QPen()
            stream >> rect >> pen
            item = RectItem()
            item.setRect(rect)
            item.setPen(pen)
        elif type == "ellipse":
            rect = QtCore.QRectF()
            pen = QtGui.QPen()
            stream >> rect >> pen
            item = EllipseItem()
            item.setRect(rect)
            item.setPen(pen)
        elif type == "equip":
            name = QtCore.QString()
            stream >> name
            dialogoid = stream.readInt32()
            item = EquipmentItem(name, dialogoid)

        item.setTransform(matrix)
        return item 
Example 40
Source File: plots.py    From pychemqt with GNU General Public License v3.0 5 votes vote down vote up
def readToStream(cls, stream):
        id1=stream.readInt32()
        id2=stream.readInt32()
        len=stream.readInt32()
        x=[]
        for i in range(len):
            x.append(stream.readFloat())
        y=[]
        for i in range(len):
            y.append(stream.readFloat())
        self.plot(0, x, y) 
Example 41
Source File: files.py    From zulip-archive with MIT License 5 votes vote down vote up
def read_zulip_stream_info(json_root):
    '''
    stream_index.json

    This JSON goes two levels deep, showing every stream, and
    then within each stream, a bit of info for every topic in
    the stream.  To get actual messages within a topic, you go
    to other files deeper in the directory structure.
    '''
    f = (json_root / Path('stream_index.json')).open('r', encoding='utf-8')
    stream_info = json.load(f, encoding='utf-8')
    f.close()
    return stream_info 
Example 42
Source File: arm_chromeos.py    From script.module.inputstreamhelper with MIT License 5 votes vote down vote up
def read_stream(self, num_of_bytes):
        """Read and return a chunk of the bytestream"""
        self.bstream[1] += num_of_bytes

        return self.bstream[0].read(num_of_bytes) 
Example 43
Source File: stream.py    From ccs-calendarserver with Apache License 2.0 5 votes vote down vote up
def readStream(stream, gotDataCallback):
    """Pass a stream's data to a callback.

    Returns Deferred which will be triggered on finish.  Errors in
    reading the stream or in processing it will be returned via this
    Deferred.
    """
    return _StreamReader(stream, gotDataCallback).run() 
Example 44
Source File: main.py    From streamlink with BSD 2-Clause "Simplified" License 4 votes vote down vote up
def read_stream(stream, output, prebuffer, chunk_size=8192):
    """Reads data from stream and then writes it to the output."""
    is_player = isinstance(output, PlayerOutput)
    is_http = isinstance(output, HTTPServer)
    is_fifo = is_player and output.namedpipe
    show_progress = (
        isinstance(output, FileOutput)
        and output.fd is not stdout
        and (sys.stdout.isatty() or args.force_progress)
    )
    show_record_progress = (
        hasattr(output, "record")
        and isinstance(output.record, FileOutput)
        and output.record.fd is not stdout
        and (sys.stdout.isatty() or args.force_progress)
    )

    stream_iterator = chain(
        [prebuffer],
        iter(partial(stream.read, chunk_size), b"")
    )
    if show_progress:
        stream_iterator = progress(stream_iterator,
                                   prefix=os.path.basename(args.output))
    elif show_record_progress:
        stream_iterator = progress(stream_iterator,
                                   prefix=os.path.basename(args.record))

    try:
        for data in stream_iterator:
            # We need to check if the player process still exists when
            # using named pipes on Windows since the named pipe is not
            # automatically closed by the player.
            if is_win32 and is_fifo:
                output.player.poll()

                if output.player.returncode is not None:
                    log.info("Player closed")
                    break

            try:
                output.write(data)
            except IOError as err:
                if is_player and err.errno in ACCEPTABLE_ERRNO:
                    log.info("Player closed")
                elif is_http and err.errno in ACCEPTABLE_ERRNO:
                    log.info("HTTP connection closed")
                else:
                    console.exit("Error when writing to output: {0}, exiting", err)

                break
    except IOError as err:
        console.exit("Error when reading from stream: {0}, exiting", err)
    finally:
        stream.close()
        log.info("Stream ended") 
Example 45
Source File: generic.py    From pdf-quench with GNU General Public License v2.0 4 votes vote down vote up
def readStringFromStream(stream):
    tok = stream.read(1)
    parens = 1
    txt = b_("")
    while True:
        tok = stream.read(1)
        if not tok:
            # stream has truncated prematurely
            raise PdfStreamError("Stream has ended unexpectedly")
        if tok == b_("("):
            parens += 1
        elif tok == b_(")"):
            parens -= 1
            if parens == 0:
                break
        elif tok == b_("\\"):
            tok = stream.read(1)
            if tok == b_("n"):
                tok = b_("\n")
            elif tok == b_("r"):
                tok = b_("\r")
            elif tok == b_("t"):
                tok = b_("\t")
            elif tok == b_("b"):
                tok = b_("\b")
            elif tok == b_("f"):
                tok = b_("\f")
            elif tok == b_("("):
                tok = b_("(")
            elif tok == b_(")"):
                tok = b_(")")
            elif tok == b_("\\"):
                tok = b_("\\")
            elif tok in (b_(" "), b_("/"), b_("%"), b_("<"), b_(">"), b_("["), b_("]")):
                # odd/unnessecary escape sequences we have encountered
                tok = b_(tok)
            elif tok.isdigit():
                # "The number ddd may consist of one, two, or three
                # octal digits; high-order overflow shall be ignored.
                # Three octal digits shall be used, with leading zeros
                # as needed, if the next character of the string is also
                # a digit." (PDF reference 7.3.4.2, p 16)
                for i in range(2):
                    ntok = stream.read(1)
                    if ntok.isdigit():
                        tok += ntok
                    else:
                        break
                tok = b_(chr(int(tok, base=8)))
            elif tok in b_("\n\r"):
                # This case is  hit when a backslash followed by a line
                # break occurs.  If it's a multi-char EOL, consume the
                # second character:
                tok = stream.read(1)
                if not tok in b_("\n\r"):
                    stream.seek(-1, 1)
                # Then don't add anything to the actual string, since this
                # line break was escaped:
                tok = b_('')
            else:
                raise utils.PdfReadError("Unexpected escaped string")
        txt += tok
    return createStringObject(txt)


##
# Represents a string object where the text encoding could not be determined.
# This occurs quite often, as the PDF spec doesn't provide an alternate way to
# represent strings -- for example, the encryption data stored in files (like
# /O) is clearly not text, but is still stored in a "String" object. 
Example 46
Source File: _torrent.py    From torf with GNU General Public License v3.0 4 votes vote down vote up
def read_stream(cls, stream, validate=True):
        """
        Read torrent metainfo from file-like object

        :param stream: Readable file-like object (e.g. :class:`io.BytesIO`)
        :param bool validate: Whether to run :meth:`validate` on the new Torrent
            instance

            NOTE: If the "info" field is not a dictionary,
                  :class:`MetainfoError` is raised even if `validate` is set to
                  False

        :raises ReadError: if reading from `stream` fails
        :raises BdecodeError: if `stream` does not produce a valid bencoded byte
            sequence
        :raises MetainfoError: if `validate` is `True` and the read metainfo is
            invalid

        :return: New :class:`Torrent` instance
        """
        try:
            content = stream.read(cls.MAX_TORRENT_FILE_SIZE)
        except OSError as e:
            raise error.ReadError(e.errno)
        else:
            try:
                metainfo_enc = bencode.decode(content)
            except (bencode.DecodingError, ValueError):
                raise error.BdecodeError()
            else:
                if not isinstance(metainfo_enc, abc.Mapping):
                    raise error.BdecodeError()

            # Extract 'pieces' from metainfo before decoding because it's the
            # only byte sequence that isn't supposed to be decoded to a string.
            if (b'info' in metainfo_enc and
                isinstance(metainfo_enc[b'info'], dict) and
                b'pieces' in metainfo_enc[b'info']):
                pieces = metainfo_enc[b'info'].pop(b'pieces')
                metainfo = utils.decode_dict(metainfo_enc)
                metainfo['info']['pieces'] = pieces
            else:
                metainfo = utils.decode_dict(metainfo_enc)

            # "info" must be a dictionary.  If validation is not wanted, it's OK
            # if it doesn't exist because the "metainfo" property will add
            # automatically.
            utils.assert_type(metainfo, ('info',), (dict,), must_exist=validate)

            torrent = cls()
            torrent._metainfo = metainfo

            # Convert "creation date" to datetime.datetime and "private" to
            # bool, but only if they exist
            if b'creation date' in metainfo_enc:
                torrent.creation_date = metainfo_enc[b'creation date']
            if b'private' in metainfo_enc.get(b'info', {}):
                torrent.private = metainfo_enc[b'info'][b'private']

            if validate:
                torrent.validate()

            return torrent 
Example 47
Source File: protocol.py    From aioh2 with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def read_stream(self, stream_id, size=None):
        """
        Read data from the given stream.

        By default (`size=None`), this returns all data left in current HTTP/2
        frame. In other words, default behavior is to receive frame by frame.

        If size is given a number above zero, method will try to return as much
        bytes as possible up to the given size, block until enough bytes are
        ready or stream is remotely closed.

        If below zero, it will read until the stream is remotely closed and
        return everything at hand.

        `size=0` is a special case that does nothing but returns `b''`. The
        same result `b''` is also returned under other conditions if there is
        no more data on the stream to receive, even under `size=None` and peer
        sends an empty frame - you can use `b''` to safely identify the end of
        the given stream.

        Flow control frames will be automatically sent while reading clears the
        buffer, allowing more data to come in.

        :param stream_id: Stream to read
        :param size: Expected size to read, `-1` for all, default frame.
        :return: Bytes read or empty if there is no more to expect.
        """
        rv = []
        try:
            with (yield from self._get_stream(stream_id).rlock):
                if size is None:
                    rv.append((
                        yield from self._get_stream(stream_id).read_frame()))
                    self._flow_control(stream_id)
                elif size < 0:
                    while True:
                        rv.extend((
                            yield from self._get_stream(stream_id).read_all()))
                        self._flow_control(stream_id)
                else:
                    while size > 0:
                        bufs, count = yield from self._get_stream(
                            stream_id).read(size)
                        rv.extend(bufs)
                        size -= count
                        self._flow_control(stream_id)
        except StreamClosedError:
            pass
        except _StreamEndedException as e:
            try:
                self._flow_control(stream_id)
            except StreamClosedError:
                pass
            rv.extend(e.bufs)
        return b''.join(rv) 
Example 48
Source File: autorecon.py    From AutoRecon with GNU General Public License v3.0 4 votes vote down vote up
def read_stream(stream, target, tag='?', patterns=[], color=Fore.BLUE):
    address = target.address
    while True:
        line = await stream.readline()
        if line:
            line = str(line.rstrip(), 'utf8', 'ignore')
            debug(color + '[' + Style.BRIGHT + address + ' ' + tag + Style.NORMAL + '] ' + Fore.RESET + '{line}', color=color)

            for p in global_patterns:
                matches = re.findall(p['pattern'], line)
                if 'description' in p:
                    for match in matches:
                        if verbose >= 1:
                            info('Task {bgreen}{tag}{rst} on {byellow}{address}{rst} - {bmagenta}' + p['description'].replace('{match}', '{bblue}{match}{crst}{bmagenta}') + '{rst}')
                        async with target.lock:
                            with open(os.path.join(target.scandir, '_patterns.log'), 'a') as file:
                                file.writelines(e('{tag} - ' + p['description'] + '\n\n'))
                else:
                    for match in matches:
                        if verbose >= 1:
                            info('Task {bgreen}{tag}{rst} on {byellow}{address}{rst} - {bmagenta}Matched Pattern: {bblue}{match}{rst}')
                        async with target.lock:
                            with open(os.path.join(target.scandir, '_patterns.log'), 'a') as file:
                                file.writelines(e('{tag} - Matched Pattern: {match}\n\n'))

            for p in patterns:
                matches = re.findall(p['pattern'], line)
                if 'description' in p:
                    for match in matches:
                        if verbose >= 1:
                            info('Task {bgreen}{tag}{rst} on {byellow}{address}{rst} - {bmagenta}' + p['description'].replace('{match}', '{bblue}{match}{crst}{bmagenta}') + '{rst}')
                        async with target.lock:
                            with open(os.path.join(target.scandir, '_patterns.log'), 'a') as file:
                                file.writelines(e('{tag} - ' + p['description'] + '\n\n'))
                else:
                    for match in matches:
                        if verbose >= 1:
                            info('Task {bgreen}{tag}{rst} on {byellow}{address}{rst} - {bmagenta}Matched Pattern: {bblue}{match}{rst}')
                        async with target.lock:
                            with open(os.path.join(target.scandir, '_patterns.log'), 'a') as file:
                                file.writelines(e('{tag} - Matched Pattern: {match}\n\n'))
        else:
            break 
Example 49
Source File: read.py    From synse-server with GNU General Public License v3.0 4 votes vote down vote up
def read_stream(
        ws: websockets.WebSocketCommonProtocol,
        ids: List[str] = None,
        tag_groups: List[List[str]] = None,
) -> AsyncIterable:
    """Stream reading data from registered plugins for the provided websocket.

    Note that this will only work for the Synse WebSocket API as of v3.0.

    Args:
        ws: The WebSocket for the request. Note that this command only works
            with the WebSocket API as of v3.0
        ids: A list of device IDs which can be used to constrain the devices
            for which readings should be streamed. If no IDs are specified, no
            filtering by ID is done.
        tag_groups: A collection of tag groups to constrain the devices for which
            readings should be streamed. The tags within a group are subtractive
            (e.g. a device must match all tags in the group to match the filter),
            but each tag group specified is additive (e.g. readings will be
            streamed for the union of all specified groups). If no tag groups are
            specified, no filtering by tags is done.

    Yields:
        The device reading, formatted as a Python dictionary.
    """

    logger.info('issuing command', command='READ STREAM', ids=ids, tag_groups=tag_groups)

    q = queue.Queue()

    threads = []
    for p in plugin.manager:
        if not p.active:
            logger.debug(
                'plugin not active, will not read its devices',
                plugin=p.tag, plugin_id=p.id,
            )
            continue

        t = Stream(p, ids, tag_groups, q)
        t.start()
        threads.append(t)

    def close_callback(*args, **kwargs):
        logger.debug('executing callback to cancel read stream threads')
        for stream in threads:
            stream.cancel()

    # The websocket has a 'close_connection_task' which will run once the
    # data transfer task as completed or been cancelled. This task should
    # always be run in the lifecycle of the websocket. We attach a callback
    # to the task to terminate the stream threads associated with the request,
    # therefore terminating the synse-server<->plugin(s) stream when the
    # client<->synse-server websocket is closed.
    ws.close_connection_task.add_done_callback(close_callback)

    logger.debug('collecting streamed readings...')
    try:
        while True:
            # Needed as an async break point in the loop, particularly for
            # cancelling the async task.
            await asyncio.sleep(0)
            try:
                val = q.get_nowait() or None
            except queue.Empty:
                await asyncio.sleep(0.25)
                continue
            else:
                if val is not None:
                    yield val
    finally:
        # The above should run until either the task is cancelled or there is
        # an exception. In either case, make the threads are terminated prior
        # to returning from this function so we are not constantly streaming
        # readings in the background.
        close_callback()