Python os.listdir() Examples

The following are 30 code examples of os.listdir(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module os , or try the search function .
Example #1
Source File: weather-icons.py    From unicorn-hat-hd with MIT License 7 votes vote down vote up
def help():
    print("""
    Usage:
    python weather-icons.py options

    options:
    loop
    image-file.png

    example:
    weather-icons.py loop
    weather-icons.py clear-day.png

    try one of the files from this list:
    {}
    """.format(', '.join(os.listdir(folder_path)))) 
Example #2
Source File: ref_seq.py    From CAMISIM with Apache License 2.0 7 votes vote down vote up
def __init__(self, refDir, databaseFilePath):
        """
            Provides information about NCBI reference sequences (genomes or draft genomes).

            @param refDir: directory that contains reference sequences,
                each file has format ncbi_taxon_id.[0-9]+.fna(fas), for instance 382638.1.fna or 2110.1.fas
            @param databaseFilePath: ncbi taxonomy file in sqlite3 format
        """
        assert os.path.isdir(refDir)
        assert os.path.isfile(databaseFilePath)
        self._taxonIdSet = set()  # taxonIds in the reference
        self._taxonIdToSize = {}  # taxonId -> cumulative file size
        for fileName in os.listdir(refDir):
            if fileName.endswith(('.fna', '.fas')):
                taxonId = int(fileName[0:fileName.index('.')])
                self._taxonIdSet.add(taxonId)
                fileSize = int(os.path.getsize(os.path.join(refDir, fileName)))
                if taxonId in self._taxonIdToSize:
                    self._taxonIdToSize[taxonId] += fileSize
                else:
                    self._taxonIdToSize[taxonId] = fileSize
        self._taxonomy = taxonomy_ncbi.TaxonomyNcbi(databaseFilePath, considerNoRank=True)
        self._childrenBuffer = {}  # taxonId -> set of children taxon Ids
        self._rankBuffer = {}  # taxonId -> rank 
Example #3
Source File: encoding_images.py    From face-attendance-machine with Apache License 2.0 7 votes vote down vote up
def encoding_images(path):
    """
    对path路径下的子文件夹中的图片进行编码,
    TODO:
        对人脸数据进行历史库中的人脸向量进行欧式距离的比较,当距离小于某个阈值的时候提醒:
        如果相似的是本人,则跳过该条记录,并提醒已经存在,否则警告人脸过度相似问题,
    :param path:
    :return:
    """
    with open(name_and_encoding, 'w') as f:
        subdirs = [os.path.join(path, x) for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))]
        for subdir in subdirs:
            print('process image name :', subdir)
            person_image_encoding = []
            for y in os.listdir(subdir):
                print("image name is ", y)
                _image = face_recognition.load_image_file(os.path.join(subdir, y))
                face_encodings = face_recognition.face_encodings(_image)
                name = os.path.split(subdir)[-1]
                if face_encodings and len(face_encodings) == 1:
                    if len(person_image_encoding) == 0:
                        person_image_encoding.append(face_encodings[0])
                        known_face_names.append(name)
                        continue
                    for i in range(len(person_image_encoding)):
                        distances = face_recognition.compare_faces(person_image_encoding, face_encodings[0], tolerance=image_thread)
                        if False in distances:
                            person_image_encoding.append(face_encodings[0])
                            known_face_names.append(name)
                            print(name, " new feature")
                            f.write(name + ":" + str(face_encodings[0]) + "\n")
                            break
                    # face_encoding = face_recognition.face_encodings(_image)[0]
                    # face_recognition.compare_faces()
            known_face_encodings.extend(person_image_encoding)
            bb = np.array(known_face_encodings)
            print("--------")
    np.save(KNOWN_FACE_ENCODINGS, known_face_encodings)
    np.save(KNOWN_FACE_NANE, known_face_names) 
Example #4
Source File: update_cache_compatibility.py    From gated-graph-transformer-network with MIT License 6 votes vote down vote up
def main(cache_dir):
    files_list = list(os.listdir(cache_dir))
    for file in files_list:
        full_filename = os.path.join(cache_dir, file)
        if os.path.isfile(full_filename):
            print("Processing {}".format(full_filename))
            m, stored_kwargs = pickle.load(open(full_filename, 'rb'))
            updated_kwargs = util.get_compatible_kwargs(model.Model, stored_kwargs)

            model_hash = util.object_hash(updated_kwargs)
            print("New hash -> " + model_hash)
            model_filename = os.path.join(cache_dir, "model_{}.p".format(model_hash))
            sys.setrecursionlimit(100000)
            pickle.dump((m,updated_kwargs), open(model_filename,'wb'), protocol=pickle.HIGHEST_PROTOCOL)

            os.remove(full_filename) 
Example #5
Source File: weather-icons.py    From unicorn-hat-hd with MIT License 6 votes vote down vote up
def weather_icons():
    try:

        if argv[1] == 'loop':

            loop()

        elif argv[1] in os.listdir(folder_path):

            print('Drawing Image: {}'.format(argv[1]))

            img = Image.open(folder_path + argv[1])

            draw_animation(img)
            unicorn.off()

        else:
            help()

    except IndexError:
        help() 
Example #6
Source File: GXManufacturerCollection.py    From Gurux.DLMS.Python with GNU General Public License v2.0 6 votes vote down vote up
def readManufacturerSettings(cls, manufacturers, path):
        # pylint: disable=broad-except
        manufacturers = []
        files = [f for f in listdir(path) if isfile(join(path, f))]
        if files:
            for it in files:
                if it.endswith(".obx"):
                    try:
                        manufacturers.append(cls.__parse(os.path.join(path, it)))
                    except Exception as e:
                        print(e)
                        continue

    #
    # Serialize manufacturer from the xml.
    #
    # @param in
    #            Input stream.
    # Serialized manufacturer.
    # 
Example #7
Source File: deckhand.py    From drydock with Apache License 2.0 6 votes vote down vote up
def load_schemas(self):
        self.v1_doc_schemas = dict()
        schema_dir = self._get_schema_dir()

        for schema_file in os.listdir(schema_dir):
            f = open(os.path.join(schema_dir, schema_file), 'r')
            for schema in yaml.safe_load_all(f):
                schema_for = schema['metadata']['name']
                if schema_for in self.v1_doc_schemas:
                    self.logger.warning(
                        "Duplicate document schemas found for document kind %s."
                        % schema_for)
                self.logger.debug(
                    "Loaded schema for document kind %s." % schema_for)
                self.v1_doc_schemas[schema_for] = schema.get('data')
            f.close() 
Example #8
Source File: Senti.py    From Financial-NLP with Apache License 2.0 6 votes vote down vote up
def score_of_date(self, date='2018-08-01'):
        """
        Returns
            tuple: double   score_of_date
                   tuple    info:( title, score)
        """        
        senti_score=0
        articles = os.listdir(os.path.join(self.article_dir, date))
        info = []
        count = 0
        for article in articles:
            if is_cut_file(os.path.join(self.article_dir, date, article)):
                continue
            score, title = self.score_of_article(os.path.join(self.article_dir, date, article))
            senti_score += score
            info.append((title, score))
            count +=1
        return senti_score/len(articles), info 
Example #9
Source File: Senti.py    From Financial-NLP with Apache License 2.0 6 votes vote down vote up
def calculate_scores_of_all(self, saveflag=0, savefilename=''):
        dates = os.listdir(self.article_dir)
        all_date_score=[]
        for date in dates:
            try:
                score,info=self.score_of_date(date)
                all_date_score.append((date,score))
            except:
                continue
        if saveflag:
            rawdata=pd.DataFrame(all_date_score)
            pd.DataFrame.to_csv(rawdata, savefilename)
        return all_date_score,dates 
Example #10
Source File: yaml.py    From drydock with Apache License 2.0 6 votes vote down vote up
def load_schemas(self):
        self.v1_doc_schemas = dict()
        schema_dir = self._get_schema_dir()

        for schema_file in os.listdir(schema_dir):
            f = open(os.path.join(schema_dir, schema_file), 'r')
            for schema in yaml.safe_load_all(f):
                schema_for = schema['metadata']['name']
                if schema_for in self.v1_doc_schemas:
                    self.logger.warning(
                        "Duplicate document schemas found for document kind %s."
                        % schema_for)
                self.logger.debug(
                    "Loaded schema for document kind %s." % schema_for)
                self.v1_doc_schemas[schema_for] = schema
            f.close() 
Example #11
Source File: test_pylama_linter.py    From gql with MIT License 6 votes vote down vote up
def load_test_cases():
    base_path = os.path.dirname(__file__)
    test_case_path = os.path.join(base_path, "test_cases")
    test_case_files = os.listdir(test_case_path)

    test_cases = []

    for fname in test_case_files:
        if not fname.endswith(".py"):
            continue

        fullpath = os.path.join(test_case_path, fname)
        data = open(fullpath).read()
        codes, messages = extract_expected_errors(data)
        test_cases.append((fullpath, codes, messages))

    return test_cases 
Example #12
Source File: options.py    From arm_now with MIT License 6 votes vote down vote up
def sync_upload(rootfs, src, dest):
    fs = Filesystem(rootfs)
    if not fs.implemented():
        return
    print("Adding current directory to the filesystem..")
    with tempfile.TemporaryDirectory() as tmpdirname:
        files = [i for i in os.listdir(".") if i != "arm_now" and not i.startswith("-")]
        if files:
            tar = tmpdirname + "/current_directory.tar"
            subprocess.check_call(["tar", "cf", tar] + files)
            subprocess.check_call("e2cp -G 0 -O 0".split(' ') + [tar, rootfs + ":/"])
            fs.create("/etc/init.d/S95_sync_current_diretory", """
                        cd {dest}
                        tar xf /current_directory.tar
                        rm /current_directory.tar
                        rm /etc/init.d/S95_sync_current_diretory
                        """.format(dest=dest), right=555)

    # TODO: check rootfs fs against parameter injection
    fs.create("/sbin/save", """
                cd {dest}
                tar cf /root.tar *
                sync
                """.format(dest=dest), right=555) 
Example #13
Source File: workflow.py    From wechat-alfred-workflow with MIT License 6 votes vote down vote up
def _delete_directory_contents(self, dirpath, filter_func):
        """Delete all files in a directory.

        :param dirpath: path to directory to clear
        :type dirpath: ``unicode`` or ``str``
        :param filter_func function to determine whether a file shall be
            deleted or not.
        :type filter_func ``callable``

        """
        if os.path.exists(dirpath):
            for filename in os.listdir(dirpath):
                if not filter_func(filename):
                    continue
                path = os.path.join(dirpath, filename)
                if os.path.isdir(path):
                    shutil.rmtree(path)
                else:
                    os.unlink(path)
                self.logger.debug('deleted : %r', path) 
Example #14
Source File: mcg_munge.py    From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License 6 votes vote down vote up
def munge(src_dir):
    # stored as: ./MCG-COCO-val2014-boxes/COCO_val2014_000000193401.mat
    # want:      ./MCG/mat/COCO_val2014_0/COCO_val2014_000000141/COCO_val2014_000000141334.mat

    files = os.listdir(src_dir)
    for fn in files:
        base, ext = os.path.splitext(fn)
        # first 14 chars / first 22 chars / all chars + .mat
        # COCO_val2014_0/COCO_val2014_000000447/COCO_val2014_000000447991.mat
        first = base[:14]
        second = base[:22]
        dst_dir = os.path.join('MCG', 'mat', first, second)
        if not os.path.exists(dst_dir):
            os.makedirs(dst_dir)
        src = os.path.join(src_dir, fn)
        dst = os.path.join(dst_dir, fn)
        print 'MV: {} -> {}'.format(src, dst)
        os.rename(src, dst) 
Example #15
Source File: weather-icons.py    From unicorn-hat-hd with MIT License 6 votes vote down vote up
def loop():

    print('Looping through all images in folder {}\n'
          'CRL+C to skip image'.format(folder_path))

    try:

        for img_file in os.listdir(folder_path):

            if img_file.endswith(icon_extension):

                print('Drawing image: {}'.format(folder_path + img_file))

                img = Image.open(folder_path + img_file)

                draw_animation(img)

            else:

                print('Not using this file, might be not an image: {}'.format(img_file))

    except KeyboardInterrupt:
        unicorn.off()

    unicorn.off() 
Example #16
Source File: datasetGenerator.py    From Traffic_sign_detection_YOLO with MIT License 6 votes vote down vote up
def generateDataset(self):
        # generate training dataset
        self.FileSequence = []
        self.imgPath = './train/images'
        self.annotationsPath = './train/annotations'
        for subdir in self.trainDatasetPath:
            subdir = os.path.join(subdir,os.listdir(subdir)[0])
            print('trainset >>',subdir)
            self.datasetPath=subdir
            self.generateDatasetFiles()
        self.generateFileSequence('./train')    

        # generate testing dataset
        self.FileSequence = []
        self.imgPath = './test/images'
        self.annotationsPath = './test/annotations'
        for subdir in self.testDatasetPath:
            subdir = os.path.join(subdir,os.listdir(subdir)[0])
            print('testset >>',subdir)
            self.datasetPath=subdir
            self.generateDatasetFiles()    
        self.generateFileSequence('./test')    

        print(self.labels)
        self.generateLabels() 
Example #17
Source File: rename_file.py    From face-attendance-machine with Apache License 2.0 6 votes vote down vote up
def change_name(path):
    global i
    if not os.path.isdir(path) and not os.path.isfile(path):
        return False
    if os.path.isfile(path):
        file_path = os.path.split(path)  # 分割出目录与文件
        lists = file_path[1].split('.')  # 分割出文件与文件扩展名
        file_ext = lists[-1]  # 取出后缀名(列表切片操作)
        img_ext = ['bmp', 'jpeg', 'gif', 'psd', 'png', 'jpg']
        if file_ext in img_ext:
            os.rename(path, file_path[0] + '/' + lists[0] + '_fc.' + file_ext)
            i += 1  # 注意这里的i是一个陷阱
        # 或者
        # img_ext = 'bmp|jpeg|gif|psd|png|jpg'
        # if file_ext in img_ext:
        #    print('ok---'+file_ext)
    elif os.path.isdir(path):
        for x in os.listdir(path):
            change_name(os.path.join(path, x))  # os.path.join()在路径处理上很有用 
Example #18
Source File: encoding_images.py    From face-attendance-machine with Apache License 2.0 6 votes vote down vote up
def load_encodings():
    """
    加载保存的历史人脸向量,以及name向量,并返回
    :return:
    """
    known_face_encodings = np.load(KNOWN_FACE_ENCODINGS)
    known_face_names = np.load(KNOWN_FACE_NANE)
    if not os.path.exists(KNOWN_FACE_NANE) or not os.path.exists(KNOWN_FACE_ENCODINGS):
        encoding_images(data_path)
    aa = [file for file in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, file)) and file.endswith("npy")]
    # ("known_face_encodings_") or file.startswith("known_face_name_"))
    for data in aa:
        if data.startswith('known_face_encodings_'):
            tmp_face_encodings = np.load(os.path.join(data_path,data))
            known_face_encodings = np.concatenate((known_face_encodings, tmp_face_encodings), axis=0)
            print("load ", data)
        elif data.startswith('known_face_name_'):
            tmp_face_name = np.load(os.path.join(data_path, data))
            known_face_names = np.concatenate((known_face_names, tmp_face_name), axis=0)
            print("load ", data)
        else:
            print('skip to load original ', data)
    return known_face_encodings,known_face_names 
Example #19
Source File: run_attacks_and_defenses.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _load_dataset_clipping(self, dataset_dir, epsilon):
    """Helper method which loads dataset and determines clipping range.

    Args:
      dataset_dir: location of the dataset.
      epsilon: maximum allowed size of adversarial perturbation.
    """
    self.dataset_max_clip = {}
    self.dataset_min_clip = {}
    self._dataset_image_count = 0
    for fname in os.listdir(dataset_dir):
      if not fname.endswith('.png'):
        continue
      image_id = fname[:-4]
      image = np.array(
          Image.open(os.path.join(dataset_dir, fname)).convert('RGB'))
      image = image.astype('int32')
      self._dataset_image_count += 1
      self.dataset_max_clip[image_id] = np.clip(image + epsilon,
                                                0,
                                                255).astype('uint8')
      self.dataset_min_clip[image_id] = np.clip(image - epsilon,
                                                0,
                                                255).astype('uint8') 
Example #20
Source File: LWS.py    From Griffin_lim with MIT License 6 votes vote down vote up
def main():
    data_foler = "data"
    wavs = [os.path.join(data_foler, file[:-4]) for file in os.listdir(data_foler) if file.endswith(".wav")]
    outputs_lws = [file + ".lws.gen.wav" for file in wavs]
    wavs = [audio.load_wav(wav_path + ".wav", hparams.sample_rate) for wav_path in wavs]

    lws_processor = lws.lws(512, 128, mode="speech")  # 512: window length; 128: window shift
    i = 0
    for x in wavs:
        X = lws_processor.stft(x)  # where x is a single-channel waveform
        X0 = np.abs(X)  # Magnitude spectrogram
        print('{:6}: {:5.2f} dB'.format('Abs(X)', lws_processor.get_consistency(X0)))
        X1 = lws_processor.run_lws(
            X0)  # reconstruction from magnitude (in general, one can reconstruct from an initial complex spectrogram)
        print(X1.shape)
        print('{:6}: {:5.2f} dB'.format('LWS', lws_processor.get_consistency(X1)))
        print(X1.shape)
        wav = lws_processor.istft(X1).astype(np.float32)

        audio.save_wav(wav, outputs_lws[i])
        i += 1 
Example #21
Source File: dataio.py    From open-sesame with Apache License 2.0 6 votes vote down vote up
def read_ptb():
    sys.stderr.write("\nReading PTB data from " + PTB_DATA_DIR + " ...\n")
    sentences = []
    senno = 0
    with codecs.open("ptb.sents", "w", "utf-8") as ptbsf:
        for constitfile in os.listdir(PTB_DATA_DIR):
            reader = BracketParseCorpusReader(PTB_DATA_DIR, constitfile)
            parses = reader.parsed_sents()
            # TODO: map from parses to sentences
            for p in parses:
                ptbsf.write(" ".join(p.leaves()) + "\n")
                tokpos = p.pos()
                tokens = [VOCDICT.addstr(tok) for tok,pos in tokpos]
                postags = [POSDICT.addstr(pos) for tok,pos in tokpos]
                s = Sentence("constit",sentnum=senno,tokens=tokens,postags=postags,)
                s.get_all_parts_of_ctree(p, CLABELDICT, False)
                sentences.append(s)
                senno += 1
        sys.stderr.write("# PTB sentences: %d\n" %len(sentences))
        ptbsf.close()
    return sentences 
Example #22
Source File: novelty.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def get_taxonomic_ids_from_directory(self, directory):
		"""
		search a directory for all files with taxonomic IDS and return them as a set

		@param directory: directory containing sequences named [ID].[nr].fna
		@type directory: str | unicode

		@return: set of the IDs
		@rtype: set[str | unicode]
		"""
		assert self.validate_dir(directory)
		directory_list = os.listdir(directory)
		tax_ids = set()
		for item in directory_list:
			if not os.path.isfile(os.path.join(directory, item)):
				continue
			tid = item.split(".")[0]
			if '_' in tid:
				tid = tid.split('_')[0]
			tax_ids.add(tid)
		return tax_ids 
Example #23
Source File: projectfilefolderhandle.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def get_bam_dirs(self):
		"""
		Get list of bam directories of all samples

		@attention: The list includes previous runs!

		@return: List of bam directories
		@rtype: list[str|unicode]
		"""
		out_dir = self.get_output_directory()
		list_of_dirs = [
			os.path.join(out_dir, folder_name) for folder_name in os.listdir(out_dir)
			if os.path.isdir(os.path.join(out_dir, folder_name))]
		sample_dirs = sorted([
			directory for directory in list_of_dirs
			if self.validate_dir(directory, sub_directories=self._sub_folders_sample, silent=True)])
		return [os.path.join(sample_dir, self._folder_name_bam) for sample_dir in sample_dirs] 
Example #24
Source File: readsimulationwrapper.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def _fix_extensions(self, directory_output, sequence_map): # rename fastq to fq
        files = os.listdir(directory_output)
        for f in files:
            if (f.endswith("fastq")):
                oldname = "%s/%s" % (directory_output,f)
                prefix = f.rsplit('_',1)[0] # original name
                with open(oldname,'r') as reads:
                    newname = "%s/%s.fq" % (directory_output,"".join(f.split(".")[:-1]))
                    with open(newname, 'w') as fq: # rename file to fq and rename sequence names
                        for line in reads:
                            if len(line) < 1:
                                continue
                            seq_name = line[1:].strip()
                            if seq_name in sequence_map[prefix]:
                                newline = line[0] + sequence_map[prefix][seq_name] + '\n'
                                fq.write(newline)
                            else:
                                fq.write(line) 
Example #25
Source File: readsimulationwrapper.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def _sam_from_reads(self, directory_output, dict_id_file_path):
        files = os.listdir(directory_output)
        id_to_cigar_map = {}
        for f in files:
            if f.endswith("_error_profile"): # these are the introduced errors by Nanosim
                prefix = f.rsplit("_",2)[0] # get basename
                id_to_cigar_map[prefix] = sam_from_reads.get_cigars_nanosim(os.path.join(directory_output,f))
                os.remove(os.path.join(directory_output,f)) # error_profile files are huge (TODO temporary requirement is still high)
        for f in files:
            if f.endswith("_reads.fasta"):
                prefix = f.rsplit(".",1)[0].rsplit("_",1)[0]
                read_file = os.path.join(directory_output,f)
                cigars = id_to_cigar_map[prefix]
                reference_path = dict_id_file_path[prefix]
                sam_from_reads.write_sam(read_file, cigars, reference_path, prefix)
                sam_from_reads.convert_fasta(read_file)
                os.remove(os.path.join(directory_output,f)) # do not store read file twice 
Example #26
Source File: create_joint_gs.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def bamToGold(bamtogold, merged, out, metadata, threads):
    """
    Calls the bamToGold script for all of the merged bam files, creating the gold standard
    """
    out_name = os.path.join(out, "anonymous_gsa.fasta")
    all_files = os.listdir(merged)
    bams = []
    for f in all_files:
        if f.endswith(".bam"):
            bams.append(f)
    for bam in bams:
        genome = bam.rstrip(".bam")
        otu, ncbi, novelty, path = metadata[genome]
        cmd = "{bamToGold} -r {path} -b {bam} -l 1 -c 1 >> {gsa}".format(
            bamToGold = bamtogold,
            path = path,
            bam = os.path.join(out,"bam",bam),
            gsa = out_name
        )
        subprocess.call([cmd],shell=True) 
Example #27
Source File: discover.py    From multibootusb with GNU General Public License v2.0 5 votes vote down vote up
def find_subsystems(cls, context):
        """
        Find subsystems in /sys/dev.

        :param Context context: the context
        :returns: a lis of available subsystems
        :rtype: list of str
        """
        sys_path = context.sys_path
        return os.listdir(os.path.join(sys_path, 'dev')) 
Example #28
Source File: gen.py    From multibootusb with GNU General Public License v2.0 5 votes vote down vote up
def clean_iso_cfg_ext_dir(iso_cfg_ext_dir):
    """
    Clean old ISO config files extracted by previous use of multibootusb.
    :param iso_cfg_ext_dir: Path to config extract directory.
    :return:
    """
    if os.path.exists(iso_cfg_ext_dir):
        for f in os.listdir(iso_cfg_ext_dir):
            fullpath = os.path.join(iso_cfg_ext_dir, f)
            if os.path.isdir(fullpath):
                shutil.rmtree(fullpath)
            else:
                os.remove(fullpath)
    else:
        log('iso_cfg_ext_dir directory does not exist.') 
Example #29
Source File: setup.py    From multibootusb with GNU General Public License v2.0 5 votes vote down vote up
def root_files(_dir):
    """
    Get path to all files of root directories
    :param _dir: Path to a directory
    :return: Path to files as list
    """
    data = []
    for _file in os.listdir(_dir):
        path = os.path.join(_dir, _file)
        if not os.path.isdir(path):
            data.append(path)
    return data 
Example #30
Source File: label.py    From mlimages with MIT License 5 votes vote down vote up
def label_dir_auto(self, label_file="", path_from_root="", mode="w", label_def_file=""):
        path = self.file_api.to_abs(path_from_root)
        _label_file = label_file if label_file else self.__get_default_path(path_from_root)
        ld_file = label_def_file if label_def_file else FileAPI.add_ext_name(_label_file, "_class_def")

        ls = os.listdir(path)
        ls.sort(key=str.lower)

        labels = []
        images = []
        _m = lambda: mode if len(labels) == 0 else "a"
        for d in ls:
            rpath = self.file_api.join_relative(path_from_root, d)
            p = self.file_api.to_abs(rpath)
            if os.path.isdir(p) and not d.startswith("."):
                labeled = self.__label_dir(rpath, len(labels))
                self.file_api.write_iter(_label_file, _m(), labeled)
                labels.append(rpath)
            elif os.path.isfile(p) and self.file_api.is_image(d):
                images.append(self.file_api.to_rel(p))
        else:
            if len(images) > 0:
                labeled = [self.__to_line(i, len(labels)) for i in images]
                labels.append(path_from_root if path_from_root else "ROOT")
                self.file_api.write_iter(_label_file, _m(), iter(labeled))

        if len(labels) > 0:
            self.file_api.write_iter(ld_file, "w", iter([" ".join([str(i), lb]) + "\n" for i, lb in enumerate(labels)]))
        lf = LabelFile(_label_file, img_root=self.file_api.root)
        return lf, ld_file