Python os.setpgrp() Examples

The following are 30 code examples of os.setpgrp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module os , or try the search function .
Example #1
Source File: _utils.py    From imageio-ffmpeg with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _popen_kwargs(prevent_sigint=False):
    startupinfo = None
    preexec_fn = None
    creationflags = 0
    if sys.platform.startswith("win"):
        # Stops executable from flashing on Windows (see #22)
        startupinfo = subprocess.STARTUPINFO()
        startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
    if prevent_sigint:
        # Prevent propagation of sigint (see #4)
        # https://stackoverflow.com/questions/5045771
        if sys.platform.startswith("win"):
            creationflags = 0x00000200
        else:
            preexec_fn = os.setpgrp  # the _pre_exec does not seem to work
    return {
        "startupinfo": startupinfo,
        "creationflags": creationflags,
        "preexec_fn": preexec_fn,
    } 
Example #2
Source File: dispatch.py    From accelerator with Apache License 2.0 6 votes vote down vote up
def run(cmd, close_in_child, keep_in_child, with_pgrp=True):
	child = os.fork()
	if child:
		return child
	if with_pgrp:
		os.setpgrp() # this pgrp is killed if the job fails
	for fd in close_in_child:
		os.close(fd)
	keep_in_child = set(keep_in_child)
	keep_in_child.add(int(os.getenv('BD_STATUS_FD')))
	keep_in_child.add(int(os.getenv('BD_TERM_FD')))
	close_fds(keep_in_child)
	# unreadable stdin - less risk of stuck jobs
	devnull = os.open('/dev/null', os.O_RDONLY)
	os.dup2(devnull, 0)
	os.close(devnull)
	if PY3:
		keep_in_child.update([1, 2])
		for fd in keep_in_child:
			os.set_inheritable(fd, True)
	os.execv(cmd[0], cmd)
	os._exit() 
Example #3
Source File: _utils.py    From adbutils with MIT License 6 votes vote down vote up
def _popen_kwargs(prevent_sigint=False):
    startupinfo = None
    preexec_fn = None
    creationflags = 0
    if sys.platform.startswith("win"):
        # Stops executable from flashing on Windows (see imageio/imageio-ffmpeg#22)
        startupinfo = subprocess.STARTUPINFO()
        startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
    if prevent_sigint:
        # Prevent propagation of sigint (see imageio/imageio-ffmpeg#4)
        # https://stackoverflow.com/questions/5045771
        if sys.platform.startswith("win"):
            creationflags = 0x00000200
        else:
            preexec_fn = os.setpgrp  # the _pre_exec does not seem to work
    return {
        "startupinfo": startupinfo,
        "creationflags": creationflags,
        "preexec_fn": preexec_fn,
    } 
Example #4
Source File: version_layout.py    From Blender-Version-Manager with GNU General Public License v3.0 6 votes vote down vote up
def open(self):
        if self.platform == 'Windows':
            DETACHED_PROCESS = 0x00000008
            b3d_exe = os.path.join(
                self.root_folder, self.version, "blender.exe")
            process = subprocess.Popen(b3d_exe, shell=True, stdin=None, stdout=None,
                                       stderr=None, close_fds=True, creationflags=DETACHED_PROCESS)
        elif self.platform == 'Linux':
            b3d_exe = os.path.join(self.root_folder, self.version, "blender")
            process = subprocess.Popen(
                'nohup "' + b3d_exe + '"', shell=True, stdout=None, stderr=None, close_fds=True, preexec_fn=os.setpgrp)

        self.processes.append(process)

        if (len(self.processes) == 1):
            self.observe_instances = ObserveInstances(self)
            self.observe_instances.started.connect(self.observe_started)
            self.observe_instances.finished.connect(self.observe_finished)
            self.observe_instances.count_changed.connect(self.count_changed)
            self.observe_instances.start()
        else:
            self.count_changed() 
Example #5
Source File: Tool.py    From RENAT with Apache License 2.0 6 votes vote down vote up
def tcpdump_to_file(self,filename='capture.pcap',params='', timeout='10s'):
        """ Uses tcpdump (for packet capture) and wait

        The keyword ignores detail output of the command.
        By default, the keyword only captures 10s
        """
        BuiltIn().log('Run tcpdump command')
        result_file = '%s/%s' % (Common.get_result_path(),filename)
        cmd = 'sudo /usr/sbin/tcpdump %s -w %s' % (params,result_file)
        proc1 = subprocess.Popen(cmd,stderr=subprocess.STDOUT,stdout=subprocess.PIPE,shell=True,preexec_fn=os.setpgrp)
        time.sleep(DateTime.convert_time(timeout))

        output2 = subprocess.check_output('sudo /bin/kill %s' % proc1.pid,shell=True)
        time.sleep(1)
        output1 = b'\n'.join(proc1.stdout.readlines())
        BuiltIn().log(output1)
        BuiltIn().log(output2)

        # change owner of the captured file
        username = Common.current_username()
        usergroup = Common.current_usergroup()
        output = subprocess.check_output('sudo /bin/chown %s:%s %s' % (username,usergroup,result_file),shell=True)

        BuiltIn().log('Executed tcpdump command `%s`' % cmd) 
Example #6
Source File: ostools.py    From YASA with Apache License 2.0 6 votes vote down vote up
def __init__(self, cmd, cwd=None, env=None):
        self._cmd = cmd
        self._cwd = cwd

        # Create process with new process group
        # Sending a signal to a process group will send it to all children
        # Hopefully this way no orphaned processes will be left behind
        self._process = subprocess.Popen(
            self._cmd,
            cwd=self._cwd,
            stdout=subprocess.PIPE,
            stdin=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            universal_newlines=True,
            shell=True,
            bufsize=0,
            # Create new process group on POSIX, setpgrp does not exist on Windows
            #preexec_fn=os.setsid)
            preexec_fn=os.setpgrp)  # pylint: disable=no-member
        LOGGER.debug("Started process with pid=%i: '%s'", self._process.pid, (" ".join(self._cwd)))

        self._queue = InterruptableQueue()
        self._reader = AsynchronousFileReader(self._process.stdout, self._queue)
        self._reader.start() 
Example #7
Source File: compiled_executor.py    From judge-server with GNU Affero General Public License v3.0 6 votes vote down vote up
def create_executable_limits(self) -> Optional[Callable[[], None]]:
        try:
            import resource
            from dmoj.utils.os_ext import oom_score_adj, OOM_SCORE_ADJ_MAX

            def limit_executable():
                os.setpgrp()

                # Mark compiler process as first to die in an OOM situation, just to ensure that the judge will not
                # be killed.
                try:
                    oom_score_adj(OOM_SCORE_ADJ_MAX)
                except Exception:
                    import traceback

                    traceback.print_exc()

                resource.setrlimit(resource.RLIMIT_FSIZE, (self.executable_size, self.executable_size))

            return limit_executable
        except ImportError:
            return None 
Example #8
Source File: zynthian_gui_midi_recorder.py    From zynthian-ui with GNU General Public License v3.0 6 votes vote down vote up
def start_recording(self):
		if self.get_status() not in ("REC", "PLAY+REC"):
			logging.info("STARTING NEW MIDI RECORD ...")
			try:
				cmd=self.sys_dir +"/sbin/jack-smf-recorder.sh --port {}".format(self.jack_record_port)
				#logging.info("COMMAND: %s" % cmd)
				self.rec_proc=Popen(cmd.split(" "), shell=True, preexec_fn=os.setpgrp)
				sleep(0.2)
			except Exception as e:
				logging.error("ERROR STARTING MIDI RECORD: %s" % e)
				self.zyngui.show_info("ERROR STARTING MIDI RECORD:\n %s" % e)
				self.zyngui.hide_info_timer(5000)

			self.update_list()
			return True

		else:
			return False 
Example #9
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #10
Source File: genericWrapper.py    From CAVE with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def process_results_ext(self, filepointer, out_args, ext_call):
        '''
        Args:
            filepointer: a pointer to the file containing the solver execution standard out.
            exit_code : exit code of target algorithm
        Returns:
            A map containing the standard AClib run results. The current standard result map as of AClib 2.06 is:
            {
                "status" : <"SAT"/"UNSAT"/"TIMEOUT"/"CRASHED"/"ABORT">,
                "quality" : <a domain specific measure of the quality of the solution [optional]>,
                "misc" : <a (comma-less) string that will be associated with the run [optional]>
            }
        '''
        
        cmd = ext_call.split(" ")
        cmd.append(filepointer.name)
        self.print_d(" ".join(cmd))
        try:
            io = Popen(cmd, shell=False, preexec_fn=os.setpgrp, stdout=PIPE, universal_newlines=True)
            self._subprocesses.append(io)
            out_, _ = io.communicate()
            self._subprocesses.remove(io)
        except OSError:
            self._ta_misc = "failed to run external program for output parsing"
            self._ta_runtime = self._cutoff
            self._exit_code = 2
            sys.exit(2)
        
        result_map = {}
        for line in out_.split("\n"):
            if line.startswith("status:"):
                result_map["status"] = line.split(":")[1].strip(" ")
            elif line.startswith("quality:"):
                result_map["quality"] = line.split(":")[1].strip(" ")
            elif line.startswith("misc:"):
                result_map["misc"] = line.split(":")[1]
        
        return result_map 
Example #11
Source File: genericWrapper.py    From CAVE with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_command_line_args_ext(self, runargs, config, ext_call):
        '''
        When production of the target algorithm is done from a source other than python,
        override this method to return a command call list to execute whatever you need to produce the command line.

        Args:
            runargs: a map of any non-configuration arguments required for the execution of the solver.
            config: a mapping from parameter name (with prefix) to parameter value.
            ext_call: string to call external program to get callstring of target algorithm
        Returns:
            A command call list to execute the command producing a single line of output containing the solver command string
        '''
        callstring_in = NamedTemporaryFile(suffix=".csv", prefix="callstring", dir=self._tmp_dir, delete=False)
        callstring_in.write("%s\n" %(runargs["instance"]))
        callstring_in.write("%d\n" %(runargs["seed"]))
        for name,value in config.items():
            callstring_in.write("%s,%s\n" %(name,value))
        callstring_in.flush()
        
        cmd = ext_call.split(" ")
        cmd.append(callstring_in.name)
        self.print_d(" ".join(cmd))
        try:
            io = Popen(cmd, shell=False, preexec_fn=os.setpgrp, stdout=PIPE, universal_newlines=True)
            self._subprocesses.append(io)
            out_, _ = io.communicate()
            self._subprocesses.remove(io)
        except OSError:
            self._ta_misc = "failed to run external program for output parsing : %s" %(" ".join(cmd))
            self._ta_runtime = self._cutoff
            self._exit_code = 2
            sys.exit(2)
        if not out_ :
            self._ta_misc = "external program for output parsing yielded empty output: %s" %(" ".join(cmd))
            self._ta_runtime = self._cutoff
            self._exit_code = 2
            sys.exit(2)
        callstring_in.close()
        os.remove(callstring_in.name)
        self._instance = runargs["instance"]
        return out_.strip('\n\r\b') 
Example #12
Source File: main.py    From TecoGAN with Apache License 2.0 5 votes vote down vote up
def preexec(): # Don't forward signals.
    os.setpgrp() 
Example #13
Source File: tunnel.py    From chopsticks with Apache License 2.0 5 votes vote down vote up
def connect_pipes(self):
        args = self.cmd_args()
        self.proc = subprocess.Popen(
            args,
            bufsize=0,
            stdout=subprocess.PIPE,
            stdin=subprocess.PIPE,
            stderr=subprocess.PIPE,
            shell=False,
            preexec_fn=os.setpgrp
        )
        self.wpipe = self.proc.stdin
        self.rpipe = self.proc.stdout
        self.epipe = self.proc.stderr 
Example #14
Source File: smb-autopwn.py    From smb-autopwn with MIT License 5 votes vote down vote up
def run_proc_xterm(cmd):
    '''
    Runs a process in an xterm window that doesn't die with icebreaker.py
    '''
    xterm_cmd = 'nohup xterm -hold -e {}'
    full_cmd = xterm_cmd.format(cmd)
    print_info('Running: {}'.format(full_cmd))
    # Split it only on xterm args, leave system command in 1 string
    cmd_split = full_cmd.split(' ', 4)
    # preexec_fn allows the xterm window to stay alive after closing script
    proc = Popen(cmd_split, stdout=PIPE, stderr=PIPE, preexec_fn=os.setpgrp)

    return proc 
Example #15
Source File: runGan.py    From TecoGAN with Apache License 2.0 5 votes vote down vote up
def preexec(): # Don't forward signals.
    os.setpgrp() 
Example #16
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #17
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #18
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #19
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #20
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #21
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #22
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #23
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #24
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #25
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #26
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #27
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #28
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #29
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err) 
Example #30
Source File: TestVariousMakeInvocation.py    From Makefile.test with Apache License 2.0 5 votes vote down vote up
def make_parallelism(self, cmd, tests, expected_parallel_jobs):
        """ Populate a test dir with the given tests, execute the given cmd.
        While the test is running verify that the expected number of parallel
        jobs can be found in the recursive chidren of the make command"""

        with TempDir() as td:
            d = td.dir()
            Test.copy_makefile_test_to(d)
            Test.populate_test_dir(d, tests, Test.same_dir)

            env = Test.get_clean_env()
            def in_new_pgrp():
                os.setpgrp()
                return
            p = subprocess.Popen(cmd,
                cwd=d,
                env=env,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                preexec_fn=in_new_pgrp)

            pid = p.pid

            wait_for_condition(lambda: self.find_file_at_root(d, \
                [".makefile_test_executed_tests"]) != None)

            # Both of the indefinite_tests should be running in parallel.
            check_count = 3

            for i in range(check_count):
                wait_for_condition(lambda: Test.descendant_sleep_process_count(pid) \
                        == expected_parallel_jobs)

            os.killpg(pid, signal.SIGTERM)
            out, err = p.communicate()
            logging.debug(out)
            logging.debug(err)