Python ptvsd.enable_attach() Examples
The following are 14
code examples of ptvsd.enable_attach().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
ptvsd
, or try the search function
.
Example #1
Source File: start.py From gauge-python with MIT License | 6 votes |
def start(): if environ.get('DEBUGGING'): ptvsd.enable_attach(address=( '127.0.0.1', int(environ.get('DEBUG_PORT')))) print(ATTACH_DEBUGGER_EVENT) t = Timer(int(environ.get("debugger_wait_time", 30)), _handle_detached) t.start() ptvsd.wait_for_attach() t.cancel() logger.debug('Starting grpc server..') server = grpc.server(ThreadPoolExecutor(max_workers=1)) p = server.add_insecure_port('127.0.0.1:0') handler = handlers.GrpcServiceHandler(server) spg.add_RunnerServicer_to_server(handler, server) logger.info('Listening on port:{}'.format(p)) server.start() t = threading.Thread(name="listener", target=handler.wait_for_kill_event) t.start() t.join() os._exit(0)
Example #2
Source File: Blender_VScode_Debugger.py From Blender-VScode-Debugger with GNU General Public License v3.0 | 6 votes |
def execute(self, context): user_preferences = context.user_preferences addon_prefs = user_preferences.addons[__name__].preferences ptvsdPath = os.path.abspath(addon_prefs.ptvsdPath) if not os.path.exists(ptvsdPath): self.report({'ERROR'}, 'Unable to find PTVSD module at %r. Configure the addon properties ' 'in the User Preferences menu.' % ptvsdPath) return {'CANCELLED'} if not any(ptvsdPath in p for p in sys.path): sys.path.append(ptvsdPath) import ptvsd ptvsd.enable_attach("my_secret", address = ('0.0.0.0', 3000)) return {'FINISHED'}
Example #3
Source File: util.py From meson-cmake-wrapper with MIT License | 6 votes |
def debug_connect(): connected = False while not connected: try: import socket server = socket.socket(proto=socket.IPPROTO_TCP) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(('127.0.0.1', 3018)) server.close() connected = True except OSError: import time time.sleep(1) import ptvsd ptvsd.enable_attach('SECRET', ('127.0.0.1', 3018)) ptvsd.wait_for_attach() return True
Example #4
Source File: __init__.py From blender-debugger-for-vscode with GNU General Public License v3.0 | 5 votes |
def execute(self, context): #get ptvsd and import if exists prefs = bpy.context.preferences.addons[__name__].preferences ptvsd_path = prefs.path ptvsd_port = prefs.port #actually check ptvsd is still available if ptvsd_path == "PTVSD not Found": self.report({"ERROR"}, "Couldn't detect ptvsd, please specify the path manually in the addon preferences or reload the addon if you installed ptvsd after enabling it.") return {"CANCELLED"} if not os.path.exists(os.path.abspath(ptvsd_path+"/ptvsd")): self.report({"ERROR"}, "Can't find ptvsd at: %r/ptvsd." % ptvsd_path) return {"CANCELLED"} if not any(ptvsd_path in p for p in sys.path): sys.path.append(ptvsd_path) global ptvsd #so we can do check later import ptvsd # can only be attached once, no way to detach (at least not that I understand?) try: ptvsd.enable_attach(("0.0.0.0", ptvsd_port), redirect_output=True) except: print("Server already running.") # call our confirmation listener bpy.ops.debug.check_for_debugger() return {"FINISHED"}
Example #5
Source File: visualstudio_py_remote_launcher.py From iot-utilities with BSD 3-Clause "New" or "Revised" License | 5 votes |
def debug_remote( file, port_num, debug_id, wait_on_exception, redirect_output, wait_on_exit, break_on_systemexit_zero, debug_stdlib, run_as ): global BREAK_ON_SYSTEMEXIT_ZERO, DEBUG_STDLIB BREAK_ON_SYSTEMEXIT_ZERO = break_on_systemexit_zero DEBUG_STDLIB = debug_stdlib import datetime print('%s: Remote launcher starting ptvsd attach wait with File: %s, Port: %d, Id: %s\n' % (datetime.datetime.now(), file, port_num, debug_id)) ptvsd.enable_attach(debug_id, address = ('0.0.0.0', port_num), redirect_output = redirect_output) try: import _ptvsdhelper if _ptvsdhelper.ping_debugger_for_attach(): ptvsd.wait_for_attach() except ImportError: _ptvsdhelper = None # now execute main file globals_obj = {'__name__': '__main__'} if run_as == 'module': vspd.exec_module(file, globals_obj) elif run_as == 'code': vspd.exec_code(file, '<string>', globals_obj) else: vspd.exec_file(file, globals_obj) # arguments are port, debug id, normal arguments which should include a filename to execute # change to directory we expected to start from
Example #6
Source File: visualstudio_py_remote_launcher.py From iot-utilities with BSD 3-Clause "New" or "Revised" License | 5 votes |
def debug_remote( file, port_num, debug_id, wait_on_exception, redirect_output, wait_on_exit, break_on_systemexit_zero, debug_stdlib, run_as ): global BREAK_ON_SYSTEMEXIT_ZERO, DEBUG_STDLIB BREAK_ON_SYSTEMEXIT_ZERO = break_on_systemexit_zero DEBUG_STDLIB = debug_stdlib import datetime print('%s: Remote launcher starting ptvsd attach wait with File: %s, Port: %d, Id: %s\n' % (datetime.datetime.now(), file, port_num, debug_id)) ptvsd.enable_attach(debug_id, address = ('0.0.0.0', port_num), redirect_output = redirect_output) try: import _ptvsdhelper if _ptvsdhelper.ping_debugger_for_attach(): ptvsd.wait_for_attach() except ImportError: _ptvsdhelper = None # now execute main file globals_obj = {'__name__': '__main__'} if run_as == 'module': vspd.exec_module(file, globals_obj) elif run_as == 'code': vspd.exec_code(file, '<string>', globals_obj) else: vspd.exec_file(file, globals_obj) # arguments are port, debug id, normal arguments which should include a filename to execute # change to directory we expected to start from
Example #7
Source File: manage.py From Wagtail-Pipit with MIT License | 5 votes |
def main(): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pipit.settings.prod") if_exists_load_env("../local.env") if_exists_load_env(".env") # enable vs code remote debugging # https://github.com/Microsoft/PTVS/issues/1057 if settings.DEBUG and settings.VS_CODE_REMOTE_DEBUG and os.environ.get("RUN_MAIN"): import ptvsd ptvsd.enable_attach(address=("0.0.0.0", 5678)) execute_from_command_line(sys.argv)
Example #8
Source File: manage.py From Wagtail-Pipit with MIT License | 5 votes |
def main(): os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pipit.settings.prod") if_exists_load_env("../local.env") if_exists_load_env(".env") # enable vs code remote debugging # https://github.com/Microsoft/PTVS/issues/1057 if settings.DEBUG and settings.VS_CODE_REMOTE_DEBUG and os.environ.get("RUN_MAIN"): import ptvsd ptvsd.enable_attach(address=("0.0.0.0", 5678)) execute_from_command_line(sys.argv)
Example #9
Source File: transfo_experiment.py From axcell with Apache License 2.0 | 5 votes |
def __post_init__(self): if os.path.exists(self.output_dir) and os.listdir( self.output_dir) and self.do_train and not self.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( self.output_dir)) # Setup distant debugging if needed if self.server_ip and self.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(self.server_ip, self.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if self.local_rank == -1 or self.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not self.no_cuda else "cpu") self.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(self.local_rank) device = torch.device("cuda", self.local_rank) torch.distributed.init_process_group(backend='nccl') self.n_gpu = 1 self.device = device self.output_mode = "classification" self.train_batch_size = self.per_gpu_train_batch_size * max(1, self.n_gpu) self.eval_batch_size = self.per_gpu_eval_batch_size * max(1, self.n_gpu) self._tokenizer = None self._model = None self._data_cache = None self.train_started = None
Example #10
Source File: romulus.py From pi_romulus with GNU General Public License v2.0 | 5 votes |
def onStart(self): """ Initialize the forms. """ if DEBUG: # Set DEBUG above to True, and you will be able to use remote # debugging for PyCharm and Visual Studio Code. import ptvsd ptvsd.enable_attach("my_secret", address=('localhost', 3000)) self.addForm('MAIN', SearchForm, name="Search for ROM")
Example #11
Source File: aggregator_base.py From In2ItChicago with GNU General Public License v3.0 | 4 votes |
def __init__(self, organization, base_url, date_format, request_date_format = None, **kwargs): if config.debug: try: import ptvsd ptvsd.enable_attach(address=('0.0.0.0', 5860)) except: # attach already enabled pass if not ptvsd.is_attached(): ptvsd.wait_for_attach() self.organization = organization # date_format is the string that specifies the date style of the target website if request_date_format == None: request_date_format = date_format self.jobid = kwargs['_job'] if '_job' in kwargs else None self.session = HttpUtils.get_session() self.date_format = date_format self.time_utils = TimeUtils(date_format) self.base_url = base_url self.identifier = re.sub(r'\W', '', base_url) self.event_manager = EventManager() formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') self.memory_handler = logging.handlers.MemoryHandler(0) self.memory_handler.setFormatter(formatter) self.stream_handler = logging.StreamHandler() self.stream_handler.setFormatter(formatter) self.configure_logger(self.name, self.memory_handler, logging.INFO) self.configure_logger(self.name, self.stream_handler, logging.INFO) self.configure_logger('scrapy', self.memory_handler, logging.WARNING) self.configure_logger('scrapy', self.stream_handler, logging.WARNING) self.configure_logger('twisted', self.memory_handler, logging.WARNING) self.configure_logger('twisted', self.stream_handler, logging.WARNING) start_date = datetime.now().strftime('%m-%d-%Y') end_date = (datetime.now() + relativedelta(months=+1)).strftime('%m-%d-%Y') request_format_utils = TimeUtils('%m-%d-%Y') # When this is running for multiple days, validating if the date is in the past causes issues self.start_date = request_format_utils.convert_date_format(start_date, request_date_format, validate_past=False) self.end_date = request_format_utils.convert_date_format(end_date, request_date_format, validate_past=False) self.start_timestamp = request_format_utils.min_timestamp_for_day(start_date) self.end_timestamp = request_format_utils.max_timestamp_for_day(end_date) if not config.bypass_auth: self.token = self.session.post(config.login, json={'email': config.scraper_username, 'password': config.scraper_password}).content.decode() else: self.token = None
Example #12
Source File: run_seq2seq.py From unilm with MIT License | 4 votes |
def prepare(args): # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() os.makedirs(args.output_dir, exist_ok=True) json.dump(args.__dict__, open(os.path.join( args.output_dir, 'train_opt.json'), 'w'), sort_keys=True, indent=2) # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, 'einsum') except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
Example #13
Source File: transformer_base.py From exbert with Apache License 2.0 | 4 votes |
def generic_train(model, args): # init model set_seed(args) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=5 ) train_params = dict( accumulate_grad_batches=args.gradient_accumulation_steps, gpus=args.n_gpu, max_epochs=args.num_train_epochs, early_stop_callback=False, gradient_clip_val=args.max_grad_norm, checkpoint_callback=checkpoint_callback, ) if args.fp16: train_params["use_amp"] = args.fp16 train_params["amp_level"] = args.fp16_opt_level if args.n_tpu_cores > 0: global xm import torch_xla.core.xla_model as xm train_params["num_tpu_cores"] = args.n_tpu_cores train_params["gpus"] = 0 if args.n_gpu > 1: train_params["distributed_backend"] = "ddp" trainer = pl.Trainer(**train_params) if args.do_train: trainer.fit(model) return trainer
Example #14
Source File: app.py From clgen with GNU General Public License v3.0 | 4 votes |
def RunWithArgs( main: Callable[[List[str]], None], argv: Optional[List[str]] = None, ): """Begin executing the program. Args: main: The main function to execute. It takes an single argument "argv", which is a list of command line arguments with parsed flags removed. If it returns an integer, it is used as the process's exit code. argv: A non-empty list of the command line arguments including program name, sys.argv is used if None. """ def DoMain(argv): """Run the user-provided main method, with app-level arg handling.""" if FLAGS.version: print(GetVersionInformationString()) sys.exit(0) elif FLAGS.dump_flags: print(FlagsToString()) sys.exit(0) elif FLAGS.dump_flags_to_json: print( json.dumps( FlagsToDict(), sort_keys=True, indent=2, separators=(",", ": ") ) ) sys.exit(0) # Optionally wait for the user to attach a Visual Studio (Code) debugger. # This requires additional configuration of the IDE. # See: https://stackoverflow.com/a/61367381 if FLAGS.ptvsd: import ptvsd ptvsd.enable_attach(address=("localhost", 5724), redirect_output=True) Log(1, "Waiting to attach VS Code debugger on port 5724 ...") ptvsd.wait_for_attach() Log(1, "Debugger attached. Resuming ...") main(argv) try: absl_app.run(DoMain, argv=argv) except KeyboardInterrupt: FlushLogs() sys.stdout.flush() sys.stderr.flush() print("keyboard interrupt") sys.exit(1)