Python ptvsd.wait_for_attach() Examples

The following are 9 code examples of ptvsd.wait_for_attach(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module ptvsd , or try the search function .
Example #1
Source File: start.py    From gauge-python with MIT License 6 votes vote down vote up
def start():
    if environ.get('DEBUGGING'):
        ptvsd.enable_attach(address=(
            '127.0.0.1', int(environ.get('DEBUG_PORT'))))
        print(ATTACH_DEBUGGER_EVENT)
        t = Timer(int(environ.get("debugger_wait_time", 30)), _handle_detached)
        t.start()
        ptvsd.wait_for_attach()
        t.cancel()
    logger.debug('Starting grpc server..')
    server = grpc.server(ThreadPoolExecutor(max_workers=1))
    p = server.add_insecure_port('127.0.0.1:0')
    handler = handlers.GrpcServiceHandler(server)
    spg.add_RunnerServicer_to_server(handler, server)
    logger.info('Listening on port:{}'.format(p))
    server.start()
    t = threading.Thread(name="listener", target=handler.wait_for_kill_event)
    t.start()
    t.join()
    os._exit(0) 
Example #2
Source File: util.py    From meson-cmake-wrapper with MIT License 6 votes vote down vote up
def debug_connect():
    connected = False
    while not connected:
        try:
            import socket
            server = socket.socket(proto=socket.IPPROTO_TCP)
            server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            server.bind(('127.0.0.1', 3018))
            server.close()
            connected = True
        except OSError:
            import time
            time.sleep(1)

    import ptvsd
    ptvsd.enable_attach('SECRET', ('127.0.0.1', 3018))
    ptvsd.wait_for_attach()
    return True 
Example #3
Source File: visualstudio_py_remote_launcher.py    From iot-utilities with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def debug_remote(
    file, 
    port_num,
    debug_id,
    wait_on_exception,
    redirect_output, 
    wait_on_exit,
    break_on_systemexit_zero,
    debug_stdlib,
    run_as
):
    global BREAK_ON_SYSTEMEXIT_ZERO, DEBUG_STDLIB
    BREAK_ON_SYSTEMEXIT_ZERO = break_on_systemexit_zero
    DEBUG_STDLIB = debug_stdlib

    import datetime
    print('%s: Remote launcher starting ptvsd attach wait with File: %s, Port: %d, Id: %s\n' % (datetime.datetime.now(), file, port_num, debug_id))

    ptvsd.enable_attach(debug_id, address = ('0.0.0.0', port_num), redirect_output = redirect_output)
    try:
        import _ptvsdhelper
        if _ptvsdhelper.ping_debugger_for_attach():
            ptvsd.wait_for_attach()
    except ImportError:
        _ptvsdhelper = None

    # now execute main file
    globals_obj = {'__name__': '__main__'}
    if run_as == 'module':
        vspd.exec_module(file, globals_obj)
    elif run_as == 'code':
        vspd.exec_code(file, '<string>', globals_obj)
    else:
        vspd.exec_file(file, globals_obj)

# arguments are port, debug id, normal arguments which should include a filename to execute

# change to directory we expected to start from 
Example #4
Source File: visualstudio_py_remote_launcher.py    From iot-utilities with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def debug_remote(
    file, 
    port_num,
    debug_id,
    wait_on_exception,
    redirect_output, 
    wait_on_exit,
    break_on_systemexit_zero,
    debug_stdlib,
    run_as
):
    global BREAK_ON_SYSTEMEXIT_ZERO, DEBUG_STDLIB
    BREAK_ON_SYSTEMEXIT_ZERO = break_on_systemexit_zero
    DEBUG_STDLIB = debug_stdlib

    import datetime
    print('%s: Remote launcher starting ptvsd attach wait with File: %s, Port: %d, Id: %s\n' % (datetime.datetime.now(), file, port_num, debug_id))

    ptvsd.enable_attach(debug_id, address = ('0.0.0.0', port_num), redirect_output = redirect_output)
    try:
        import _ptvsdhelper
        if _ptvsdhelper.ping_debugger_for_attach():
            ptvsd.wait_for_attach()
    except ImportError:
        _ptvsdhelper = None

    # now execute main file
    globals_obj = {'__name__': '__main__'}
    if run_as == 'module':
        vspd.exec_module(file, globals_obj)
    elif run_as == 'code':
        vspd.exec_code(file, '<string>', globals_obj)
    else:
        vspd.exec_file(file, globals_obj)

# arguments are port, debug id, normal arguments which should include a filename to execute

# change to directory we expected to start from 
Example #5
Source File: transfo_experiment.py    From axcell with Apache License 2.0 5 votes vote down vote up
def __post_init__(self):
        if os.path.exists(self.output_dir) and os.listdir(
                self.output_dir) and self.do_train and not self.overwrite_output_dir:
            raise ValueError(
                "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
                    self.output_dir))

        # Setup distant debugging if needed
        if self.server_ip and self.server_port:
            # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
            import ptvsd
            print("Waiting for debugger attach")
            ptvsd.enable_attach(address=(self.server_ip, self.server_port), redirect_output=True)
            ptvsd.wait_for_attach()

        # Setup CUDA, GPU & distributed training
        if self.local_rank == -1 or self.no_cuda:
            device = torch.device("cuda" if torch.cuda.is_available() and not self.no_cuda else "cpu")
            self.n_gpu = torch.cuda.device_count()
        else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
            torch.cuda.set_device(self.local_rank)
            device = torch.device("cuda", self.local_rank)
            torch.distributed.init_process_group(backend='nccl')
            self.n_gpu = 1
        self.device = device
        self.output_mode = "classification"

        self.train_batch_size = self.per_gpu_train_batch_size * max(1, self.n_gpu)
        self.eval_batch_size = self.per_gpu_eval_batch_size * max(1, self.n_gpu)
        self._tokenizer = None
        self._model = None
        self._data_cache = None
        self.train_started = None 
Example #6
Source File: aggregator_base.py    From In2ItChicago with GNU General Public License v3.0 4 votes vote down vote up
def __init__(self, organization, base_url, date_format, request_date_format = None, **kwargs):
        if config.debug:
            try:
                import ptvsd
                ptvsd.enable_attach(address=('0.0.0.0', 5860))
            except:
                # attach already enabled
                pass
            if not ptvsd.is_attached():
                ptvsd.wait_for_attach()
        
        self.organization = organization
        # date_format is the string that specifies the date style of the target website
        if request_date_format == None:
            request_date_format = date_format

        self.jobid = kwargs['_job'] if '_job' in kwargs else None

        self.session = HttpUtils.get_session()

        self.date_format = date_format
        self.time_utils = TimeUtils(date_format)
        self.base_url = base_url
        self.identifier = re.sub(r'\W', '', base_url)
        self.event_manager = EventManager()

        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        self.memory_handler = logging.handlers.MemoryHandler(0)
        self.memory_handler.setFormatter(formatter)

        self.stream_handler = logging.StreamHandler()
        self.stream_handler.setFormatter(formatter)

        self.configure_logger(self.name, self.memory_handler, logging.INFO)
        self.configure_logger(self.name, self.stream_handler, logging.INFO)
        self.configure_logger('scrapy', self.memory_handler, logging.WARNING)
        self.configure_logger('scrapy', self.stream_handler, logging.WARNING)
        self.configure_logger('twisted', self.memory_handler, logging.WARNING)
        self.configure_logger('twisted', self.stream_handler, logging.WARNING)
        
        start_date = datetime.now().strftime('%m-%d-%Y')
        end_date = (datetime.now() + relativedelta(months=+1)).strftime('%m-%d-%Y')
        
        request_format_utils = TimeUtils('%m-%d-%Y')
        # When this is running for multiple days, validating if the date is in the past causes issues
        self.start_date = request_format_utils.convert_date_format(start_date, request_date_format, validate_past=False)
        self.end_date = request_format_utils.convert_date_format(end_date, request_date_format, validate_past=False)
        self.start_timestamp = request_format_utils.min_timestamp_for_day(start_date)
        self.end_timestamp = request_format_utils.max_timestamp_for_day(end_date)

        if not config.bypass_auth:
            self.token = self.session.post(config.login, json={'email': config.scraper_username, 'password': config.scraper_password}).content.decode()
        else:
            self.token = None 
Example #7
Source File: run_seq2seq.py    From unilm with MIT License 4 votes vote down vote up
def prepare(args):
    # Setup distant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

    os.makedirs(args.output_dir, exist_ok=True)
    json.dump(args.__dict__, open(os.path.join(
        args.output_dir, 'train_opt.json'), 'w'), sort_keys=True, indent=2)

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        args.n_gpu = torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend='nccl')
        args.n_gpu = 1
    args.device = device

    # Setup logging
    logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt='%m/%d/%Y %H:%M:%S',
                        level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
    logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
                   args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)

    # Set seed
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    logger.info("Training/evaluation parameters %s", args)

    # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
    # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
    # remove the need for this code, but it is still valid.
    if args.fp16:
        try:
            import apex
            apex.amp.register_half_function(torch, 'einsum')
        except ImportError:
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") 
Example #8
Source File: transformer_base.py    From exbert with Apache License 2.0 4 votes vote down vote up
def generic_train(model, args):
    # init model
    set_seed(args)

    # Setup distant debugging if needed
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd

        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))

    checkpoint_callback = pl.callbacks.ModelCheckpoint(
        filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=5
    )

    train_params = dict(
        accumulate_grad_batches=args.gradient_accumulation_steps,
        gpus=args.n_gpu,
        max_epochs=args.num_train_epochs,
        early_stop_callback=False,
        gradient_clip_val=args.max_grad_norm,
        checkpoint_callback=checkpoint_callback,
    )

    if args.fp16:
        train_params["use_amp"] = args.fp16
        train_params["amp_level"] = args.fp16_opt_level

    if args.n_tpu_cores > 0:
        global xm
        import torch_xla.core.xla_model as xm

        train_params["num_tpu_cores"] = args.n_tpu_cores
        train_params["gpus"] = 0

    if args.n_gpu > 1:
        train_params["distributed_backend"] = "ddp"

    trainer = pl.Trainer(**train_params)

    if args.do_train:
        trainer.fit(model)

    return trainer 
Example #9
Source File: app.py    From clgen with GNU General Public License v3.0 4 votes vote down vote up
def RunWithArgs(
  main: Callable[[List[str]], None], argv: Optional[List[str]] = None,
):
  """Begin executing the program.

  Args:
    main: The main function to execute. It takes an single argument "argv",
      which is a list of command line arguments with parsed flags removed.
      If it returns an integer, it is used as the process's exit code.
    argv: A non-empty list of the command line arguments including program name,
      sys.argv is used if None.
  """

  def DoMain(argv):
    """Run the user-provided main method, with app-level arg handling."""
    if FLAGS.version:
      print(GetVersionInformationString())
      sys.exit(0)
    elif FLAGS.dump_flags:
      print(FlagsToString())
      sys.exit(0)
    elif FLAGS.dump_flags_to_json:
      print(
        json.dumps(
          FlagsToDict(), sort_keys=True, indent=2, separators=(",", ": ")
        )
      )
      sys.exit(0)

    # Optionally wait for the user to attach a Visual Studio (Code) debugger.
    # This requires additional configuration of the IDE.
    # See: https://stackoverflow.com/a/61367381
    if FLAGS.ptvsd:
      import ptvsd

      ptvsd.enable_attach(address=("localhost", 5724), redirect_output=True)
      Log(1, "Waiting to attach VS Code debugger on port 5724 ...")
      ptvsd.wait_for_attach()
      Log(1, "Debugger attached. Resuming ...")

    main(argv)

  try:
    absl_app.run(DoMain, argv=argv)
  except KeyboardInterrupt:
    FlushLogs()
    sys.stdout.flush()
    sys.stderr.flush()
    print("keyboard interrupt")
    sys.exit(1)