Python sys.stderr.flush() Examples

The following are 22 code examples of sys.stderr.flush(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module sys.stderr , or try the search function .
Example #1
Source File: hevector.py    From vector-homomorphic-encryption with MIT License 7 votes vote down vote up
def evaluate(operations, DEBUG=False):
    from subprocess import Popen, PIPE

    if DEBUG:
        print
        print operations
        print
    inp = send(operations)
    if DEBUG:
        print inp
        print
    with open('vhe.in', 'w') as f:
        f.write(inp)
    output, error = Popen(['./vhe'], stdin=PIPE, stdout=PIPE, shell=True).communicate('')
    if DEBUG:
        print output
        print
    
    if error:
        from sys import stderr
        stderr.write(error + '\n')
        stderr.flush()

    return recv(output) 
Example #2
Source File: scheduler.py    From asynq with Apache License 2.0 6 votes vote down vote up
def _continue_with_batch(self):
        """
        Flushes one of batches (the longest one by default).

        :param assert_no_batch: indicates whether exception must be
                                raised if there is no batch to flush
        :return: the batch that was flushed, if there was a flush;
                 otherwise, ``None``.

        """
        batch = self._select_batch_to_flush()
        if batch is None:
            if _debug_options.DUMP_FLUSH_BATCH:
                debug.write("@async: no batch to flush")
            else:
                return None
        self._batches.remove(batch)
        self._flush_batch(batch)
        return batch 
Example #3
Source File: scheduler.py    From asynq with Apache License 2.0 6 votes vote down vote up
def try_time_based_dump(self, last_task=None):
        current_time = time.time()
        if (
            current_time - self._last_dump_time
        ) < _debug_options.SCHEDULER_STATE_DUMP_INTERVAL:
            return
        self._last_dump_time = current_time
        debug.write(
            "\n--- Scheduler state dump: --------------------------------------------"
        )
        try:
            self.dump()
            if last_task is not None:
                debug.write("Last task: %s" % debug.str(last_task), 1)
        finally:
            debug.write(
                "----------------------------------------------------------------------\n"
            )
            stdout.flush()
            stderr.flush() 
Example #4
Source File: debug.py    From asynq with Apache License 2.0 6 votes vote down vote up
def dump_error(error, tb=None):
    """Dumps errors w/async stack traces."""
    try:
        stderr.write("\n" + (format_error(error, tb=tb) or "No error"))
    finally:
        stdout.flush()
        stderr.flush() 
Example #5
Source File: counter.py    From DeepNeuralNet-QSAR with GNU General Public License v3.0 5 votes vote down vote up
def done(self):
        stderr.write( str(self.cur ) )
        stderr.write( "\n" )
        stderr.flush() 
Example #6
Source File: utils.py    From apt-select with MIT License 5 votes vote down vote up
def progress_msg(processed, total):
    """Update user on percent done"""
    if total > 1:
        percent = int((float(processed) / total) * 100)
        stderr.write("\r[%d/%d] %d%%" % (processed, total, percent))
        stderr.flush() 
Example #7
Source File: utils.py    From iterativeWGCNA with GNU General Public License v2.0 5 votes vote down vote up
def warning(*objs):
    '''
    wrapper for writing to stderr
    '''
    print(*objs, file=stderr)
    stderr.flush() 
Example #8
Source File: hitchdir.py    From hitch with GNU Affero General Public License v3.0 5 votes vote down vote up
def check_hitch_directory_integrity():
    directory, _ = _check_for_directory()
    if exists(join(directory, "absdir")):
        with open(join(directory, "absdir"), "r") as absdir_handle:
            absdir = absdir_handle.read()
        if directory != absdir:
            stderr.write(languagestrings.HITCH_DIRECTORY_MOVED.format(
                directory, abspath(join(directory, os.pardir))
            ))
            stderr.flush()
            exit(1)
    if not exists(join(get_hitch_directory(), "virtualenv", "bin")):
        stderr.write(languagestrings.SOMETHING_CORRUPTED)
        stderr.flush()
        exit(1) 
Example #9
Source File: hitchdir.py    From hitch with GNU Affero General Public License v3.0 5 votes vote down vote up
def get_hitch_directory_or_fail():
    directory, directories_checked = _check_for_directory()
    if not directory:
        stderr.write(languagestrings.HITCH_NOT_INITIALIZED)
        stderr.write('\n'.join(directories_checked))
        stderr.flush()
        exit(1)
    return directory 
Example #10
Source File: commandline.py    From hitch with GNU Affero General Public License v3.0 5 votes vote down vote up
def clean():
    """Remove the hitch directory entirely."""
    if hitchdir.hitch_exists():
        hitchdir.remove_hitch_directory_if_exists()
    else:
        stderr.write("No hitch directory found. Doing nothing.\n")
        stderr.flush() 
Example #11
Source File: counter.py    From DeepNeuralNet-QSAR with GNU General Public License v3.0 5 votes vote down vote up
def ProgressLine(line):
    stderr.write(line)
    stderr.write( "\r" )
    stderr.flush() 
Example #12
Source File: counter.py    From DeepNeuralNet-QSAR with GNU General Public License v3.0 5 votes vote down vote up
def done(self):
        stderr.write( '100%' )
        stderr.write( "\n" )
        stderr.flush() 
Example #13
Source File: counter.py    From DeepNeuralNet-QSAR with GNU General Public License v3.0 5 votes vote down vote up
def tick(self):
        self.cur += 1
        newPercent = (100*self.cur)/self.total
        if newPercent > self.curPercent:
            self.curPercent = newPercent
            stderr.write( str(self.curPercent)+"%" )
            stderr.write( "\r" )
            stderr.flush() 
Example #14
Source File: scheduler.py    From asynq with Apache License 2.0 5 votes vote down vote up
def _flush_batch(self, batch):
        self.on_before_batch_flush(batch)
        try:
            if _debug_options.COLLECT_PERF_STATS:
                start = utime()
                batch.flush()
                batch.dump_perf_stats(utime() - start)
            else:
                batch.flush()
        finally:
            self.on_after_batch_flush(batch)
        return 0 
Example #15
Source File: counter.py    From DeepNeuralNet-QSAR with GNU General Public License v3.0 5 votes vote down vote up
def tick(self):
        self.cur += 1
        if self.cur % self.step == 0:
            stderr.write( str(self.cur ) )
            stderr.write( "\r" )
            stderr.flush() 
Example #16
Source File: trezor.py    From encompass with GNU General Public License v3.0 5 votes vote down vote up
def log(msg):
    stderr.write("%s\n" % msg)
    stderr.flush() 
Example #17
Source File: debug.py    From asynq with Apache License 2.0 5 votes vote down vote up
def async_exception_hook(type, error, tb):
    """Exception hook capable of printing async stack traces."""

    stdout.flush()
    stderr.flush()
    if _use_original_exc_handler and original_hook is not None:
        original_hook(type, error, tb)
    dump_error(error, tb=tb) 
Example #18
Source File: debug.py    From asynq with Apache License 2.0 5 votes vote down vote up
def dump_stack(skip=0, limit=None):
    """Dumps current stack trace."""
    skip += 2  # To skip dump_stack and traceback.extract_stack
    if limit is None:
        limit = options.STACK_DUMP_LIMIT
    print("--- Stack trace: -----------------------------------------------------")
    try:
        stack = traceback.extract_stack(limit=None if limit is None else limit + skip)
        print("".join(traceback.format_list(stack[:-skip])), end="")
    finally:
        print("----------------------------------------------------------------------")
        stdout.flush() 
Example #19
Source File: morfessor.py    From Ossian with Apache License 2.0 4 votes vote down vote up
def main():
    logging.basicConfig(stream=stderr, level=INFO)

    a = ArgumentParser()
    a.add_argument('-data', dest='data', required=True, metavar='WORDLIST',
                   help="a text file (the corpus) consisting of one word per line. The word may be preceded by a word"\
                        " count (separated by whitespace), otherwise a count of one is assumed. If the same word "\
                        "occurs many times, the counts are accumulated.")
    a.add_argument('-finish', dest='finish', metavar='float', type=float, default=0.005,
                   help="convergence threshold. From one pass over all input words to the next, "\
                        "if the overall coding length in bits (i.e. logprob) of the lexicon together with the corpus "\
                        "improves less than this value times the number of word types (distinct word forms) in the "\
                        "data, the program stops. (If this value is small the program runs for a longer time and the "\
                        "result is in principle more accurate. However, the changes in word splittings during the "\
                        "last training epochs are usually very small.) The value must be within the range: 0 < float "\
                        "< 1. Default 0.005")
    a.add_argument('-rand', dest='rand', metavar='int', type=int, default=0,
                   help="random seed that affects the sorting of words when processing them. Default 0")
    a.add_argument('-gammalendistr', dest='gammalendistr', type=float, metavar='float', nargs=2,
                   help="Use Gamma Length distribution with two parameters. Float1 is the prior for the most common "\
                        "morph length in the lexicon, such that 0 < float1 <= 24*float2. Float2 is the beta value of "\
                        "the Gamma pdf, such that beta > 0. The beta value affects the wideness of the morph length "\
                        "distribution. The higher beta, the wider and less discriminative the distribution. If this "\
                        "option is omitted, morphs in the lexicon are terminated with  an end-of-morph character, "\
                        "which corresponds to an exponential pdf for morph lengths. Suggested values: float1 = 7.0, "\
                        "float2 = 1.0 ")
    a.add_argument('-zipffreqdistr', dest='zipffreqdistr', type=float, metavar='float',
                   help="Use Zipf Frequency distribution with paramter float1 for the proportion of morphs in the "\
                        "lexicon that occur only once in the data (hapax legomena): 0 < value < 1. If this option is "\
                        "omitted a (non-informative) morph frequency distribution based on enumerative coding is used"\
                        " instead. Suggested value: 0.5")
    a.add_argument('-load', dest='load', metavar='filename',
                   help="An existing model for word splitting is loaded from a file (which is the output of an "\
                        "earlier run of this program) and the words in the corpus defined using the option '-data "\
                        "wordlist' are segmented according to the loaded model. That is, "\
                        "no learning of a new model takes place. The existing model is simply used for segmenting a " \
                        "list of words. The segmentation takes place using Viterbi search. No new morphs are ever " \
                        "created (except one-letter morphs, if there is no other way of segmenting a particular input" \
                        " word)")

    a.add_argument('-encoding', dest='encoding', help='Input encoding (defaults to local encoding)')

    a.add_argument('-savememory', type=int, nargs='?', help=SUPPRESS)

    options = a.parse_args()

    if options.load is not None:
        m = MorphModel(vars(options))
        m.load(options.load)

        for word in open(options.data):
            print(' + '.join(m.viterbi_segment_word(word.strip())))

    else:
        m = MorphModel(vars(options))
        m.train(options.data)
        stderr.flush()
        m.print_segmentation() 
Example #20
Source File: adb.py    From QCSuper with GNU General Public License v3.0 4 votes vote down vote up
def _relaunch_adb_bridge(self):
        
        if hasattr(self, 'adb_proc'):
            self.adb_proc.terminate()
        
        self.adb_shell(
            'killall -q adb_bridge; ' +
            'chmod 755 ' + ANDROID_TMP_DIR + '/adb_bridge'
        )
        
        run_safe([adb_exe, 'forward', 'tcp:' + str(QCSUPER_TCP_PORT), 'tcp:' + str(QCSUPER_TCP_PORT)], check = True, stdin = DEVNULL)
        
        self.adb_proc = Popen([adb_exe, 'exec-out' if self.can_use_exec_out else 'shell', self.su_command % (ANDROID_TMP_DIR + '/adb_bridge')],
            
            stdin = DEVNULL, stdout = PIPE, stderr = STDOUT,
            preexec_fn = setpgrp,
            bufsize = 0, universal_newlines = True
        )
    
        for line in self.adb_proc.stdout:
            
            if 'Connection to Diag established' in line:
                
                break
            
            else:
                
                stderr.write(line)
                stderr.flush()

        self.socket = socket(AF_INET, SOCK_STREAM)
        
        try:
            
            self.socket.connect(('localhost', QCSUPER_TCP_PORT))
        
        except Exception:
            
            self.adb_proc.terminate()
            
            exit('Could not communicate with the adb_bridge through TCP')
        
        self.received_first_packet = False 
Example #21
Source File: termdown.py    From termdown with GNU General Public License v3.0 4 votes vote down vote up
def main(**kwargs):
    """
    \b
    Starts a countdown to or from TIME. Example values for TIME:
    10, '1h 5m 30s', '12:00', '2020-01-01', '2020-01-01 14:00 UTC'.
    \b
    If TIME is not given, termdown will operate in stopwatch mode
    and count forward.
    \b
    Hotkeys:
    \tL\tLap (stopwatch mode only)
    \tR\tReset
    \tSPACE\tPause (will delay absolute TIME)
    \t+\tPlus (will add 10 seconds)
    \t-\tMinus (will subtract 10 seconds)
    \tQ\tQuit
    """
    if kwargs['time_format'] is None:
        kwargs['time_format'] = \
                DEFAULT_TIME_FORMAT[:-3] if kwargs['no_seconds'] else DEFAULT_TIME_FORMAT

    if kwargs['timespec']:
        curses.wrapper(countdown, **kwargs)
    else:
        seconds_elapsed, laps = curses.wrapper(stopwatch, **kwargs)

        for lap_index, lap_time in enumerate(laps):
            stderr.write("{:.3f}\t{}\tlap {}\n".format(
                lap_time,
                format_seconds(int(lap_time)),
                lap_index + 1,
            ))

        if laps:
            stderr.write("{:.3f}\t{}\tlap {}\n".format(
                seconds_elapsed,
                format_seconds(int(seconds_elapsed)),
                len(laps) + 1,
            ))
            laps.append(seconds_elapsed)
            total_seconds = sum(laps)
            average_seconds = total_seconds / len(laps)
            stderr.write("{:.3f}\t{}\tlap avg\n".format(
                average_seconds,
                format_seconds(int(average_seconds)),
            ))
            stderr.write("{:.3f}\t{}\ttotal\n".format(
                total_seconds,
                format_seconds(int(total_seconds)),
            ))
        else:
            stderr.write("{:.3f}\t{}\ttotal\n".format(
                seconds_elapsed,
                format_seconds(int(seconds_elapsed)),
            ))
        stderr.flush() 
Example #22
Source File: __main__.py    From vscode-azurecli with MIT License 4 votes vote down vote up
def main():
    timings = False
    start = time.time()
    initialize()
    if timings: print('initialize {} s'.format(time.time() - start), file=stderr)

    start = time.time()
    command_table = load_command_table()
    if timings: print('load_command_table {} s'.format(time.time() - start), file=stderr)

    start = time.time()
    group_index = get_group_index(command_table)
    if timings: print('get_group_index {} s'.format(time.time() - start), file=stderr)

    start = time.time()
    snippets = get_snippets(command_table) if AUTOMATIC_SNIPPETS_ENABLED else []
    if timings: print('get_snippets {} s'.format(time.time() - start), file=stderr)

    def enqueue_output(input, queue):
        for line in iter(input.readline, b''):
            queue.put(line)

    queue = Queue()
    thread = Thread(target=enqueue_output, args=(stdin, queue))
    thread.daemon = True
    thread.start()

    bkg_start = time.time()
    keep_loading = True
    while True:

        if keep_loading:
            keep_loading = load_arguments(command_table, 10)
            if not keep_loading and timings: print('load_arguments {} s'.format(time.time() - bkg_start), file=stderr)

        try:
            line = queue.get_nowait() if keep_loading else queue.get()
        except Empty:
            continue
        
        start = time.time()
        request = json.loads(line)
        response_data = None
        if request['data'].get('request') == 'status':
            response_data = get_status()
            if timings: print('get_status {} s'.format(time.time() - start), file=stderr)
        elif request['data'].get('request') == 'hover':
            response_data = get_hover_text(group_index, command_table, request['data']['command'])
            if timings: print('get_hover_text {} s'.format(time.time() - start), file=stderr)
        else:
            response_data = get_completions(group_index, command_table, snippets, request['data'], True)
            if timings: print('get_completions {} s'.format(time.time() - start), file=stderr)
        response = {
            'sequence': request['sequence'],
            'data': response_data
        }
        output = json.dumps(response)
        stdout.write(output + '\n')
        stdout.flush()
        stderr.flush()