Python os.fdopen() Examples
The following are 30
code examples of os.fdopen().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
os
, or try the search function
.
Example #1
Source File: bindiff.py From BASS with GNU General Public License v2.0 | 9 votes |
def bindiff_export(self, sample, is_64_bit = True, timeout = None): """ Load a sample into IDA Pro, perform autoanalysis and export a BinDiff database. :param sample: The sample's path :param is_64_bit: If the sample needs to be analyzed by the 64 bit version of IDA :param timeout: Timeout for the analysis in seconds :return: The file name of the exported bindiff database. The file needs to be deleted by the caller. Returns None on error. """ data_to_send = { "timeout": timeout, "is_64_bit": is_64_bit} url = "%s/binexport" % next(self._urls) log.debug("curl -XPOST --data '%s' '%s'", json.dumps(data_to_send), url) response = requests.post(url, data = data_to_send, files = {os.path.basename(sample): open(sample, "rb")}) if response.status_code == 200: handle, output = tempfile.mkstemp(suffix = ".BinExport") with os.fdopen(handle, "wb") as f: map(f.write, response.iter_content(1024)) return output else: log.error("Bindiff server responded with status code %d: %s", response.status_code, response.content) return None
Example #2
Source File: core.py From BASS with GNU General Public License v2.0 | 9 votes |
def get_num_triggering_samples(signature, samples): """ Get number of samples triggering ClamAV signature _signature_. :param signature: A dictionary with keys 'type' for the signature type and 'signature' for the signature string. :param samples: A list of sample paths to scan. :returns: The number of samples triggering this signature. """ handle, temp_sig = tempfile.mkstemp(suffix = "." + signature["type"]) try: with os.fdopen(handle, "w") as f: f.write(signature["signature"]) proc_clamscan = subprocess.Popen(["clamscan", "-d", temp_sig, "--no-summary", "--infected"] + samples, stdout = subprocess.PIPE, stderr = subprocess.PIPE) stdout, stderr = proc_clamscan.communicate() if not stdout: return 0 else: return len(stdout.strip().split("\n")) finally: os.unlink(temp_sig)
Example #3
Source File: cache.py From jbox with MIT License | 6 votes |
def set(self, key, value, timeout=None): if timeout is None: timeout = int(time() + self.default_timeout) elif timeout != 0: timeout = int(time() + timeout) filename = self._get_filename(key) self._prune() try: fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix, dir=self._path) with os.fdopen(fd, 'wb') as f: pickle.dump(timeout, f, 1) pickle.dump(value, f, pickle.HIGHEST_PROTOCOL) rename(tmp, filename) os.chmod(filename, self._mode) except (IOError, OSError): return False else: return True
Example #4
Source File: bindiff.py From BASS with GNU General Public License v2.0 | 6 votes |
def compare(self, primary, secondary, timeout = None): """ Run BinDiff on the two BinDiff databases. :param primary: The first BinExport database :param secondary: The second BinExport database :param timeout: Timeout for the command in seconds :returns: The directory name of the directory with the generated data on the shared volume """ url = "%s/compare" % next(self._urls) log.debug("curl -XPOST --form 'timeout=%s' --form 'primary=@%s' --form 'secondary=@%s' '%s'", str(timeout), primary, secondary, url) response = requests.post(url, data = {"timeout": timeout}, \ files = {"primary": open(primary, "rb"), "secondary": open(secondary, "rb")}) if response.status_code == 200: handle, path = tempfile.mkstemp(suffix = ".bindiff.sqlite3") with os.fdopen(handle, "wb") as f: map(f.write, response.iter_content(1024)) return path else: log.error("Bindiff server responded with status code %d: %s", response.status_code, response.content) return None
Example #5
Source File: epr.py From epr with MIT License | 6 votes |
def open_media(scr, epub, src): sfx = os.path.splitext(src)[1] fd, path = tempfile.mkstemp(suffix=sfx) try: with os.fdopen(fd, "wb") as tmp: tmp.write(epub.file.read(src)) # run(VWR +" "+ path, shell=True) subprocess.call( VWR + [path], # shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL ) k = scr.getch() finally: os.remove(path) return k
Example #6
Source File: filecache.py From cutout with MIT License | 6 votes |
def set(self, key, value, timeout=None): if timeout is None: timeout = self.default_timeout filename = self._get_filename(key) self._prune() try: fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix, dir=self._path) f = os.fdopen(fd, 'wb') try: pickle.dump(int(time() + timeout), f, 1) pickle.dump(value, f, pickle.HIGHEST_PROTOCOL) finally: f.close() rename(tmp, filename) os.chmod(filename, self._mode) except (IOError, OSError): pass
Example #7
Source File: bindiff.py From BASS with GNU General Public License v2.0 | 6 votes |
def pickle_export(self, sample, is_64_bit = True, timeout = None): """ Load a sample into IDA Pro, perform autoanalysis and export a pickle file. :param sample: The sample's path :param is_64_bit: If the sample needs to be analyzed by the 64 bit version of IDA :param timeout: Timeout for the analysis in seconds :return: The file name of the exported pickle database. The file needs to be deleted by the caller. Returns None on error. """ data_to_send = { "timeout": timeout, "is_64_bit": is_64_bit} url = "%s/pickle" % next(self._urls) log.debug("curl -XPOST --data '%s' '%s'", json.dumps(data_to_send), url) response = requests.post(url, data = data_to_send, files = {os.path.basename(sample): open(sample, "rb")}) if response.status_code == 200: handle, output = tempfile.mkstemp(suffix = ".pickle") with os.fdopen(handle, "wb") as f: map(f.write, response.iter_content(1024)) return output else: log.error("Bindiff server responded with status code %d: %s", response.status_code, response.content) return None
Example #8
Source File: perf.py From workload-collocation-agent with Apache License 2.0 | 6 votes |
def _create_file_from_fd(pfd): """Validates file description and creates a file-like object""" # -1 is returned on error: http://man7.org/linux/man-pages/man2/open.2.html#RETURN_VALUE if pfd == -1: INVALID_ARG_ERRNO = 22 errno = ctypes.get_errno() if errno == INVALID_ARG_ERRNO: raise UnableToOpenPerfEvents('Invalid perf event file descriptor: {}, {}. ' 'For cgroup based perf counters it may indicate there is ' 'no enough hardware counters for measure all metrics!' 'If traceback shows problem in perf_uncore ' 'it could be problem with PERF_FORMAT_GROUP in' 'perf_event_attr structure for perf_event_open syscall.' 'Older kernel cannot handle with extended format group.' 'Kernel cannot be 3.10.0-862.el7.x86_64 or lower.' ''.format(errno, os.strerror(errno))) else: raise UnableToOpenPerfEvents('Invalid perf event file descriptor: {}, {}.' .format(errno, os.strerror(errno))) return os.fdopen(pfd, 'rb')
Example #9
Source File: workertmp.py From jbox with MIT License | 6 votes |
def __init__(self, cfg): old_umask = os.umask(cfg.umask) fdir = cfg.worker_tmp_dir if fdir and not os.path.isdir(fdir): raise RuntimeError("%s doesn't exist. Can't create workertmp." % fdir) fd, name = tempfile.mkstemp(prefix="wgunicorn-", dir=fdir) # allows the process to write to the file util.chown(name, cfg.uid, cfg.gid) os.umask(old_umask) # unlink the file so we don't leak tempory files try: if not IS_CYGWIN: util.unlink(name) self._tmp = os.fdopen(fd, 'w+b', 1) except: os.close(fd) raise self.spinner = 0
Example #10
Source File: util.py From razzy-spinner with GNU General Public License v3.0 | 6 votes |
def cache_to_tempfile(cls, sequence, delete_on_gc=True): """ Write the given sequence to a temporary file as a pickle corpus; and then return a ``PickleCorpusView`` view for that temporary corpus file. :param delete_on_gc: If true, then the temporary file will be deleted whenever this object gets garbage-collected. """ try: fd, output_file_name = tempfile.mkstemp('.pcv', 'nltk-') output_file = os.fdopen(fd, 'wb') cls.write(sequence, output_file) output_file.close() return PickleCorpusView(output_file_name, delete_on_gc) except (OSError, IOError) as e: raise ValueError('Error while creating temp file: %s' % e) ###################################################################### #{ Block Readers ######################################################################
Example #11
Source File: pidlockfile.py From jbox with MIT License | 6 votes |
def write_pid_to_pidfile(pidfile_path): """ Write the PID in the named PID file. Get the numeric process ID (“PID”) of the current process and write it to the named file as a line of text. """ open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY) open_mode = 0o644 pidfile_fd = os.open(pidfile_path, open_flags, open_mode) pidfile = os.fdopen(pidfile_fd, 'w') # According to the FHS 2.3 section on PID files in /var/run: # # The file must consist of the process identifier in # ASCII-encoded decimal, followed by a newline character. For # example, if crond was process number 25, /var/run/crond.pid # would contain three characters: two, five, and newline. pid = os.getpid() pidfile.write("%s\n" % pid) pidfile.close()
Example #12
Source File: linear_flows.py From me-ica with GNU Lesser General Public License v2.1 | 6 votes |
def dump(self, filename=None): """ Save a pickle dump of the crashing object on filename. If filename is None, the crash dump is saved on a file created by the tempfile module. Return the filename. """ if filename is None: # This 'temporary file' should actually stay 'forever', i.e. until # deleted by the user. (fd, filename)=_tempfile.mkstemp(suffix=".pic", prefix="MDPcrash_") fl = _os.fdopen(fd, 'w+b', -1) else: fl = open(filename, 'w+b', -1) _cPickle.dump(self.crashing_obj, fl) fl.close() return filename
Example #13
Source File: comparebinaries.py From binaryanalysis with Apache License 2.0 | 6 votes |
def comparebinaries(path1, path2): basepath1 = os.path.basename(path1) dirpath1 = os.path.dirname(path1) basepath2 = os.path.basename(path2) dirpath2 = os.path.dirname(path2) ## binaries are identical if gethash(dirpath1, basepath1) == gethash(dirpath2, basepath2): return 0 difftmp = tempfile.mkstemp() os.fdopen(difftmp[0]).close() p = subprocess.Popen(["bsdiff", path1, path2, difftmp[1]], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) ## cleanup (stanout, stanerr) = p.communicate() diffsize = os.stat(difftmp[1]).st_size os.unlink(difftmp[1]) return diffsize
Example #14
Source File: batxor.py From binaryanalysis with Apache License 2.0 | 6 votes |
def unpackXOR(filename, sig, tempdir=None): tmpdir = fwunpack.unpacksetup(tempdir) tmpfile = tempfile.mkstemp(dir=tmpdir) os.fdopen(tmpfile[0]).close() fwunpack.unpackFile(filename, 0, tmpfile[1], tmpdir, modify=True) datafile = open(filename) datafile.seek(0) data = datafile.read(1000000) ## read data, XOR, write data out again f2 = open(tmpfile[1], 'w') counter = 0 while data != '': for i in data: f2.write(chr(ord(i) ^ ord(signatures[sig][counter]))) counter = (counter+1)%len(signatures[sig]) data = datafile.read(1000000) f2.close() datafile.close() return tmpdir
Example #15
Source File: notused.py From codimension with GNU General Public License v3.0 | 6 votes |
def __run(self): """Runs vulture""" errTmp = tempfile.mkstemp() errStream = os.fdopen(errTmp[0]) process = Popen(['vulture', self.__path], stdin=PIPE, stdout=PIPE, stderr=errStream) process.stdin.close() processStdout = process.stdout.read() process.stdout.close() errStream.seek(0) err = errStream.read() errStream.close() process.wait() try: os.unlink(errTmp[1]) except: pass return processStdout.decode(DEFAULT_ENCODING), err.strip()
Example #16
Source File: pidlockfile.py From recruit with Apache License 2.0 | 6 votes |
def write_pid_to_pidfile(pidfile_path): """ Write the PID in the named PID file. Get the numeric process ID (“PID”) of the current process and write it to the named file as a line of text. """ open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY) open_mode = 0o644 pidfile_fd = os.open(pidfile_path, open_flags, open_mode) pidfile = os.fdopen(pidfile_fd, 'w') # According to the FHS 2.3 section on PID files in /var/run: # # The file must consist of the process identifier in # ASCII-encoded decimal, followed by a newline character. For # example, if crond was process number 25, /var/run/crond.pid # would contain three characters: two, five, and newline. pid = os.getpid() pidfile.write("%s\n" % pid) pidfile.close()
Example #17
Source File: link_pyqt.py From qutebrowser with GNU General Public License v3.0 | 6 votes |
def run_py(executable, *code): """Run the given python code with the given executable.""" if os.name == 'nt' and len(code) > 1: # Windows can't do newlines in arguments... oshandle, filename = tempfile.mkstemp() with os.fdopen(oshandle, 'w') as f: f.write('\n'.join(code)) cmd = [executable, filename] try: ret = subprocess.run(cmd, universal_newlines=True, check=True, stdout=subprocess.PIPE).stdout finally: os.remove(filename) else: cmd = [executable, '-c', '\n'.join(code)] ret = subprocess.run(cmd, universal_newlines=True, check=True, stdout=subprocess.PIPE).stdout return ret.rstrip()
Example #18
Source File: file_cache.py From jbox with MIT License | 5 votes |
def _secure_open_write(filename, fmode): # We only want to write to this file, so open it in write only mode flags = os.O_WRONLY # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only # will open *new* files. # We specify this because we want to ensure that the mode we pass is the # mode of the file. flags |= os.O_CREAT | os.O_EXCL # Do not follow symlinks to prevent someone from making a symlink that # we follow and insecurely open a cache file. if hasattr(os, "O_NOFOLLOW"): flags |= os.O_NOFOLLOW # On Windows we'll mark this file as binary if hasattr(os, "O_BINARY"): flags |= os.O_BINARY # Before we open our file, we want to delete any existing file that is # there try: os.remove(filename) except (IOError, OSError): # The file must not exist already, so we can just skip ahead to opening pass # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a # race condition happens between the os.remove and this line, that an # error will be raised. Because we utilize a lockfile this should only # happen if someone is attempting to attack us. fd = os.open(filename, flags, fmode) try: return os.fdopen(fd, "wb") except: # An error occurred wrapping our FD in a file object os.close(fd) raise
Example #19
Source File: gtest_parallel.py From gtest-parallel with Apache License 2.0 | 5 votes |
def __init__(self, output_dir): if sys.stdout.isatty(): # stdout needs to be unbuffered since the output is interactive. if isinstance(sys.stdout, io.TextIOWrapper): # workaround for https://bugs.python.org/issue17404 sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True, write_through=True, newline='\n') else: sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) self.output_dir = output_dir self.total_tasks = 0 self.finished_tasks = 0 self.out = Outputter(sys.stdout) self.stdout_lock = threading.Lock()
Example #20
Source File: tasks_mirror.py From pagure with GNU General Public License v2.0 | 5 votes |
def _create_ssh_key(keyfile): """ Create the public and private ssh keys. The specified file name will be the private key and the public one will be in a similar file name ending with a '.pub'. """ private_key = rsa.generate_private_key( public_exponent=65537, key_size=4096, backend=default_backend() ) private_pem = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), ) with os.fdopen( os.open(keyfile, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o600), "wb" ) as stream: stream.write(private_pem) public_key = private_key.public_key() public_pem = _serialize_public_ssh_key(public_key) if public_pem: with open(keyfile + ".pub", "wb") as stream: stream.write(public_pem)
Example #21
Source File: _termui_impl.py From recruit with Apache License 2.0 | 5 votes |
def edit(self, text): import tempfile text = text or '' if text and not text.endswith('\n'): text += '\n' fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension) try: if WIN: encoding = 'utf-8-sig' text = text.replace('\n', '\r\n') else: encoding = 'utf-8' text = text.encode(encoding) f = os.fdopen(fd, 'wb') f.write(text) f.close() timestamp = os.path.getmtime(name) self.edit_file(name) if self.require_save \ and os.path.getmtime(name) == timestamp: return None f = open(name, 'rb') try: rv = f.read() finally: f.close() return rv.decode('utf-8-sig').replace('\r\n', '\n') finally: os.unlink(name)
Example #22
Source File: file_cache.py From recruit with Apache License 2.0 | 5 votes |
def _secure_open_write(filename, fmode): # We only want to write to this file, so open it in write only mode flags = os.O_WRONLY # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only # will open *new* files. # We specify this because we want to ensure that the mode we pass is the # mode of the file. flags |= os.O_CREAT | os.O_EXCL # Do not follow symlinks to prevent someone from making a symlink that # we follow and insecurely open a cache file. if hasattr(os, "O_NOFOLLOW"): flags |= os.O_NOFOLLOW # On Windows we'll mark this file as binary if hasattr(os, "O_BINARY"): flags |= os.O_BINARY # Before we open our file, we want to delete any existing file that is # there try: os.remove(filename) except (IOError, OSError): # The file must not exist already, so we can just skip ahead to opening pass # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a # race condition happens between the os.remove and this line, that an # error will be raised. Because we utilize a lockfile this should only # happen if someone is attempting to attack us. fd = os.open(filename, flags, fmode) try: return os.fdopen(fd, "wb") except: # An error occurred wrapping our FD in a file object os.close(fd) raise
Example #23
Source File: cache.py From recruit with Apache License 2.0 | 5 votes |
def set(self, key, value, timeout=None, mgmt_element=False): # Management elements have no timeout if mgmt_element: timeout = 0 # Don't prune on management element update, to avoid loop else: self._prune() timeout = self._normalize_timeout(timeout) filename = self._get_filename(key) try: fd, tmp = tempfile.mkstemp( suffix=self._fs_transaction_suffix, dir=self._path ) with os.fdopen(fd, "wb") as f: pickle.dump(timeout, f, 1) pickle.dump(value, f, pickle.HIGHEST_PROTOCOL) rename(tmp, filename) os.chmod(filename, self._mode) except (IOError, OSError): return False else: # Management elements should not count towards threshold if not mgmt_element: self._update_count(delta=1) return True
Example #24
Source File: sessions.py From recruit with Apache License 2.0 | 5 votes |
def save(self, session): fn = self.get_session_filename(session.sid) fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix, dir=self.path) f = os.fdopen(fd, "wb") try: dump(dict(session), f, HIGHEST_PROTOCOL) finally: f.close() try: rename(tmp, fn) os.chmod(fn, self.mode) except (IOError, OSError): pass
Example #25
Source File: misc_util.py From recruit with Apache License 2.0 | 5 votes |
def make_temp_file(suffix='', prefix='', text=True): if not hasattr(_tdata, 'tempdir'): _tdata.tempdir = tempfile.mkdtemp() _tmpdirs.append(_tdata.tempdir) fid, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=_tdata.tempdir, text=text) fo = os.fdopen(fid, 'w') return fo, name # Hooks for colored terminal output. # See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle
Example #26
Source File: terminal.py From moler with BSD 3-Clause "New" or "Revised" License | 5 votes |
def start_reading_pty(protocol, pty_fd): """ Make asyncio to read file descriptor of Pty :param protocol: protocol of subprocess speaking via Pty :param pty_fd: file descriptor of Pty (dialog with subprocess goes that way) :return: """ loop, its_new = thread_secure_get_event_loop() # Create Protocol classes class PtyFdProtocol(asyncio.Protocol): def connection_made(self, transport): if hasattr(protocol, 'on_pty_open'): protocol.on_pty_open() def data_received(self, data, recv_time): if hasattr(protocol, 'data_received'): protocol.data_received(data) def connection_lost(self, exc): if hasattr(protocol, 'on_pty_close'): protocol.on_pty_close(exc) # Add the pty's to the read loop # Also store the transport, protocol tuple for each call to # connect_read_pipe, to prevent the destruction of the protocol # class instance, otherwise no data is received. fd_transport, fd_protocol = await loop.connect_read_pipe(PtyFdProtocol, os.fdopen(pty_fd, 'rb', 0)) protocol.pty_fd_transport = fd_transport protocol.pty_fd_protocol = fd_protocol
Example #27
Source File: baseclient.py From rucio with Apache License 2.0 | 5 votes |
def __write_token(self): """ Write the current auth_token to the local token file. """ # check if rucio temp directory is there. If not create it with permissions only for the current user if not path.isdir(self.token_path): try: LOG.debug('rucio token folder \'%s\' not found. Create it.' % self.token_path) makedirs(self.token_path, 0o700) except Exception: raise # if the file exists check if the stored token is valid. If not request a new one and overwrite the file. Otherwise use the one from the file try: file_d, file_n = mkstemp(dir=self.token_path) with fdopen(file_d, "w") as f_token: f_token.write(self.auth_token) move(file_n, self.token_file) if self.auth_type == 'oidc' and self.token_exp_epoch and self.auth_oidc_refresh_active: file_d, file_n = mkstemp(dir=self.token_path) with fdopen(file_d, "w") as f_exp_epoch: f_exp_epoch.write(str(self.token_exp_epoch)) move(file_n, self.token_exp_epoch_file) except IOError as error: print("I/O error({0}): {1}".format(error.errno, error.strerror)) except Exception: raise
Example #28
Source File: update_ssh_config.py From JetPack with Apache License 2.0 | 5 votes |
def update_etc_hosts(overcloud): """ Rewrites /etc/hosts, adding fresh entries for each overcloud node. """ etc_hosts = '/etc/hosts' etc_file = open(etc_hosts, 'r') new_fd, new_hosts = tempfile.mkstemp() new_file = os.fdopen(new_fd, 'w') marker = '# Overcloud entries generated by update_ssh_config.py\n' # Generate a clean hosts file with old entries removed for line in etc_file.readlines(): words = line.split() if ((line == marker) or (len(words) == 3 and words[1] in overcloud.keys())): continue new_file.write(line) etc_file.close() # Add new entries for the overcloud nodes new_file.write(marker) for node in sorted(overcloud.keys()): new_file.write('{}\n'.format(overcloud[node])) new_file.close() os.chmod(new_hosts, 0644) os.system('sudo mv {} {}'.format(new_hosts, etc_hosts)) os.system('sudo chown root:root {}'.format(etc_hosts))
Example #29
Source File: workflow.py From Quiver-alfred with MIT License | 5 votes |
def acquire(self, blocking=True): """Acquire the lock if possible. If the lock is in use and ``blocking`` is ``False``, return ``False``. Otherwise, check every `self.delay` seconds until it acquires lock or exceeds `self.timeout` and raises an exception. """ start = time.time() while True: try: fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR) with os.fdopen(fd, 'w') as fd: fd.write('{0}'.format(os.getpid())) break except OSError as err: if err.errno != errno.EEXIST: # pragma: no cover raise if self.timeout and (time.time() - start) >= self.timeout: raise AcquisitionError('Lock acquisition timed out.') if not blocking: return False time.sleep(self.delay) self._locked = True return True
Example #30
Source File: createdb.py From binaryanalysis with Apache License 2.0 | 5 votes |
def parsepython((filedir, filepath, unpackdir)): comments = [] strings = [] pathname = os.path.join(filedir, filepath) returndict = {} parseiterator = open(pathname, 'r').readline parsetokens = tokenize.generate_tokens(parseiterator) for p in parsetokens: if p[0] == tokenize.COMMENT: comments.append(p[1]) elif p[0] == tokenize.STRING: strings.append(p[1]) if comments != [] or strings != []: commentsfile = None stringsfile = None if comments != []: ## there are comments, so print them to a file commentsfile = tempfile.mkstemp(dir=unpackdir) for c in comments: os.write(commentsfile[0], c) os.write(commentsfile[0], "\n") os.fdopen(commentsfile[0]).close() commentsfile = os.path.basename(commentsfile[1]) if strings != []: ## there are comments, so print them to a file stringsfile = tempfile.mkstemp(dir=unpackdir) for c in strings: os.write(stringsfile[0], c) os.write(stringsfile[0], "\n") os.fdopen(stringsfile[0]).close() stringsfile = os.path.basename(stringsfile[1]) returndict = {'unpackdir': unpackdir, 'commentsfile': commentsfile, 'stringsfile': stringsfile} return (filedir, filepath, returndict) return None ## walk the Linux kernel directory and process all the Makefiles