Python csv.writer() Examples

The following are 30 code examples of csv.writer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module csv , or try the search function .
Example #1
Source File: models.py    From comport with BSD 3-Clause "New" or "Revised" License 9 votes vote down vote up
def get_denominator_csv(self):
        output = io.StringIO()

        writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)

        writer.writerow(["year", "month", "officers out on service"])

        values = sorted(self.denominator_values,
                        key=lambda x: (x.year, x.month))

        for value in values:
            row = [
                value.year,
                value.month,
                value.officers_out_on_service
            ]
            writer.writerow(row)

        return output.getvalue() 
Example #2
Source File: master.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _save_sorted_results(self, run_stats, scores, image_count, filename):
    """Saves sorted (by score) results of the evaluation.

    Args:
      run_stats: dictionary with runtime statistics for submissions,
        can be generated by WorkPiecesBase.compute_work_statistics
      scores: dictionary mapping submission ids to scores
      image_count: dictionary with number of images processed by submission
      filename: output filename
    """
    with open(filename, 'w') as f:
      writer = csv.writer(f)
      writer.writerow(['SubmissionID', 'ExternalTeamId', 'Score',
                       'MedianTime', 'ImageCount'])
      get_second = lambda x: x[1]
      for s_id, score in sorted(iteritems(scores),
                                key=get_second, reverse=True):
        external_id = self.submissions.get_external_id(s_id)
        stat = run_stats.get(
            s_id, collections.defaultdict(lambda: float('NaN')))
        writer.writerow([s_id, external_id, score,
                         stat['median_eval_time'],
                         image_count[s_id]]) 
Example #3
Source File: lineup_exporter.py    From pydfs-lineup-optimizer with MIT License 6 votes vote down vote up
def export(self, filename, render_func=None):
        if not self.lineups:
            return
        total_players = 0
        with open(filename, 'r') as csvfile:
            lines = list(csv.reader(csvfile))
            for i, lineup in enumerate(self.lineups, start=1):
                if i >= len(lines):
                    lines.append([])
                players_list = [(render_func or self.render_player)(player) for player in lineup.lineup]
                if not total_players:
                    total_players = len(players_list)
                lines[i] = players_list + lines[i][total_players:]
            for line_order in range(i, len(lines) - 1):
                lines[line_order] = [''] * total_players + lines[line_order][total_players:]
        with open(filename, 'w') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerows(lines) 
Example #4
Source File: validate_and_copy_submissions.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def save_id_to_path_mapping(self):
    """Saves mapping from submission IDs to original filenames.

    This mapping is saved as CSV file into target directory.
    """
    if not self.id_to_path_mapping:
      return
    with open(self.local_id_to_path_mapping_file, 'w') as f:
      writer = csv.writer(f)
      writer.writerow(['id', 'path'])
      for k, v in sorted(iteritems(self.id_to_path_mapping)):
        writer.writerow([k, v])
    cmd = ['gsutil', 'cp', self.local_id_to_path_mapping_file,
           os.path.join(self.target_dir, 'id_to_path_mapping.csv')]
    if subprocess.call(cmd) != 0:
      logging.error('Can\'t copy id_to_path_mapping.csv to target directory') 
Example #5
Source File: env.py    From vergeml with MIT License 6 votes vote down vote up
def write(self, epoch, step, data):
        if not self.ks:
            return

        # Make sure that keys have no underscores.
        data = {k.replace('_', '-'):v for k, v in data.items()}

        row = [epoch, step]
        for k in self.ks:
            if k in data:
                row.append(_toscalar(data[k]))
                self.prev[k] = data[k]
            elif k in self.prev:
                row.append(_toscalar(self.prev[k]))
            else:
                row.append(None)

        self.writer.writerow(row) 
Example #6
Source File: recorders.py    From pywr with GNU General Public License v3.0 6 votes vote down vote up
def reset(self):
        import csv
        kwargs = {"newline": "", "encoding": "utf-8"}
        mode = "wt"

        if self.complib == "gzip":
            import gzip
            self._fh = gzip.open(self.csvfile, mode, self.complevel, **kwargs)
        elif self.complib in ("bz2", "bzip2"):
            import bz2
            self._fh = bz2.open(self.csvfile, mode, self.complevel, **kwargs)
        elif self.complib is None:
            self._fh = open(self.csvfile, mode, **kwargs)
        else:
            raise KeyError("Unexpected compression library: {}".format(self.complib))
        self._writer = csv.writer(self._fh, **self.csv_kwargs)
        # Write header data
        row = ["Datetime"] + [name for name in self._node_names]
        self._writer.writerow(row) 
Example #7
Source File: stt_bi_graphemes_util.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def generate_bi_graphemes_dictionary(label_list):
    freqs = Counter()
    for label in label_list:
        label = label.split(' ')
        for i in label:
            for pair in split_every(2, i):
                if len(pair) == 2:
                    freqs[pair] += 1


    with open('resources/unicodemap_en_baidu_bi_graphemes.csv', 'w') as bigram_label:
        bigramwriter = csv.writer(bigram_label, delimiter = ',')
        baidu_labels = list('\' abcdefghijklmnopqrstuvwxyz')
        for index, key in enumerate(baidu_labels):
            bigramwriter.writerow((key, index+1))
        for index, key in enumerate(freqs.keys()):
            bigramwriter.writerow((key, index+len(baidu_labels)+1)) 
Example #8
Source File: common.py    From razzy-spinner with GNU General Public License v3.0 6 votes vote down vote up
def outf_writer_compat(outfile, encoding, errors, gzip_compress=False):
    """
    Identify appropriate CSV writer given the Python version
    """
    if compat.PY3:
        if gzip_compress:
            outf = gzip.open(outfile, 'wt', encoding=encoding, errors=errors)
        else:
            outf = open(outfile, 'w', encoding=encoding, errors=errors)
        writer = csv.writer(outf)
    else:
        if gzip_compress:
            outf = gzip.open(outfile, 'wb')
        else:
            outf = open(outfile, 'wb')
        writer = compat.UnicodeWriter(outf, encoding=encoding, errors=errors)
    return (writer, outf) 
Example #9
Source File: adsb-polar.py    From dump1090-tools with ISC License 6 votes vote down vote up
def write(self, filename):
        with closing(open(filename + '.new', 'w')) as w:
            c = csv.writer(w)
            c.writerow(['bearing_start','bearing_end','bin_start','bin_end','samples','unique'])
            for b_low,b_high,histo in self.values():
                # make sure we write at least one value per sector,
                # it makes things a little easier when plotting
                first = True
                for h_low,h_high,count,unique in histo.values():
                    if unique or first:
                        c.writerow(['%f' % b_low,
                                    '%f' % b_high,
                                    '%f' % h_low,
                                    '%f' % h_high,
                                    '%d' % count,
                                    '%d' % unique])
                        first = False
        os.rename(filename + '.new', filename) 
Example #10
Source File: order.py    From Servo with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def download_results(request):
    import csv
    response = HttpResponse(content_type='text/csv')
    response['Content-Disposition'] = 'attachment; filename="orders.csv"'

    writer = csv.writer(response)
    header = [
        'CODE',
        'CUSTOMER',
        'CREATED_AT',
        'ASSIGNED_TO',
        'CHECKED_IN',
        'LOCATION'
    ]
    writer.writerow(header)

    for o in request.session['order_queryset']:
        row = [o.code, o.customer, o.created_at,
               o.user, o.checkin_location, o.location]
        coded = [unicode(s).encode('utf-8') for s in row]

        writer.writerow(coded)

    return response 
Example #11
Source File: main.py    From ICDAR-2019-SROIE with MIT License 6 votes vote down vote up
def for_task3():
    filenames = [os.path.splitext(f)[0] for f in glob.glob("boundingbox/*.txt")]
    box_files = [s + ".txt" for s in filenames]
    for boxfile in box_files:
        box = []
        with open(boxfile,'r') as boxes:
            for line in csv.reader(boxes):
                box.append([int(string, 10) for string in line[0:8]])
        words = []
        with open('test_result/'+ boxfile.split('/')[1], 'r') as prediction:
            for line in csv.reader(prediction):
                words.append(line)
        words = [s if len(s)!=0 else [' '] for s in words]
        new = []
        for line in zip(box,words):
            a,b = line
            new.append(a+b)
        with open('for_task3/'+ boxfile.split('/')[1], 'w+') as newfile:
            csv_out = csv.writer(newfile)
            for line in new:
                csv_out.writerow(line) 
Example #12
Source File: main.py    From ICDAR-2019-SROIE with MIT License 6 votes vote down vote up
def process_txt():
    filenames = [os.path.splitext(f)[0] for f in glob.glob("test_result/*.txt")]
    old_files = [s + ".txt" for s in filenames]
    for old_file in old_files:
        new = []
        with open(old_file, "r") as old:
            for line in csv.reader(old):
                if not line:
                    continue
                if not line[0]:
                    continue
                if line[0][0] == ' ' or line[0][-1] == ' ':
                    line[0] = line[0].strip()
                if ' ' in line[0]:
                    line = line[0].split(' ')
                new.append(line)
        with open('task2_result/' + old_file.split('/')[1], "w+") as newfile:
            wr = csv.writer(newfile, delimiter = '\n')
            new = [[s[0].upper()] for s in new]
            wr.writerows(new) 
Example #13
Source File: prepare_dataset.py    From ICDAR-2019-SROIE with MIT License 6 votes vote down vote up
def get_data():
    filenames = [os.path.splitext(f)[0] for f in glob.glob("original/*.jpg")]
    jpg_files = [s + ".jpg" for s in filenames]
    txt_files = [s + ".txt" for s in filenames]

    for file in txt_files:
        boxes = []
        with open(file, "r", encoding="utf-8", newline="") as lines:
            for line in csv.reader(lines):
                boxes.append([line[0], line[1], line[6], line[7]])
        with open('mlt/label/' + file.split('/')[1], "w+") as labelFile:
            wr = csv.writer(labelFile)
            wr.writerows(boxes)

    for jpg in jpg_files:
        shutil.copy(jpg, 'mlt/image/') 
Example #14
Source File: CreateSubsetFile.py    From python-toolbox-for-rapid with Apache License 2.0 6 votes vote down vote up
def execute(self, parameters, messages):
        """The source code of the tool."""
        in_drainage_line = parameters[0].valueAsText
        out_csv_file = parameters[1].valueAsText

        fields = ['NextDownID', 'HydroID']

        list_all = []

        '''The script line below makes sure that rows in the subset file are
           arranged in descending order of NextDownID of stream segements'''
        for row in sorted(arcpy.da.SearchCursor(in_drainage_line, fields), reverse=True):
            list_all.append([row[1]])

        with open(out_csv_file,'wb') as csvfile:
            connectwriter = csv.writer(csvfile, dialect='excel')
            for row_list in list_all:
                out = row_list
                connectwriter.writerow(out)

        return 
Example #15
Source File: statistics.py    From neat-python with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def save_species_count(self, delimiter=' ', filename='speciation.csv'):
        """ Log speciation throughout evolution. """
        with open(filename, 'w') as f:
            w = csv.writer(f, delimiter=delimiter)
            for s in self.get_species_sizes():
                w.writerow(s) 
Example #16
Source File: statistics.py    From neat-python with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def save_genome_fitness(self,
                            delimiter=' ',
                            filename='fitness_history.csv'):
        """ Saves the population's best and average fitness. """
        with open(filename, 'w') as f:
            w = csv.writer(f, delimiter=delimiter)

            best_fitness = [c.fitness for c in self.most_fit_genomes]
            avg_fitness = self.get_fitness_mean()

            for best, avg in zip(best_fitness, avg_fitness):
                w.writerow([best, avg]) 
Example #17
Source File: drive.py    From SDRC with GNU General Public License v3.0 5 votes vote down vote up
def write(self, direction):
        self.writer.writerow(direction) 
Example #18
Source File: generate_poses.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def generate_poses(file_path):
	with open(file_path, 'a') as csvfile:
		csvwriter = csv.writer(csvfile)
		for idx in range(10000):
			orientation_x = np.round(np.random.uniform()*2*45*(np.pi/180) - 45*(np.pi/180),4)
			orientation_y = np.round(np.random.uniform()*2*45*(np.pi/180) - 45*(np.pi/180),4)
			orientation_z = np.round(np.random.uniform()*2*45*(np.pi/180) - 45*(np.pi/180),4)
			x = np.round(2*np.random.uniform()-1,4)
			y = np.round(2*np.random.uniform()-1,4)
			z = np.round(2*np.random.uniform()-1,4)
			pose = [x,y,z,orientation_x,orientation_y,orientation_z]
			csvwriter.writerow(pose) 
Example #19
Source File: helper_analysis.py    From pointnet-registration-framework with MIT License 5 votes vote down vote up
def generate_stat_data(self, filename):
		eval_poses = helper.read_poses(FLAGS.data_dict, FLAGS.eval_poses)
		template_data = self.templates[self.template_idx,:,:].reshape((1,MAX_NUM_POINT,3))
		TIME, ITR, Trans_Err, Rot_Err = [], [], [], []
		for pose in eval_poses:
			source_data = helper.apply_transformation(self.templates[self.template_idx,:,:],pose.reshape((-1,6)))
			final_pose, _, _, _, _, elapsed_time, itr = self.test_one_case(source_data,template_data)
			translation_error, rotational_error = self.find_errors(pose.reshape((-1,6)), final_pose)
			TIME.append(elapsed_time)
			ITR.append(itr)
			Trans_Err.append(translation_error)
			Rot_Err.append(rotational_error)
		
		TIME_mean, ITR_mean, Trans_Err_mean, Rot_Err_mean = sum(TIME)/len(TIME), sum(ITR)/len(ITR), sum(Trans_Err)/len(Trans_Err), sum(Rot_Err)/len(Rot_Err)
		TIME_var, ITR_var, Trans_Err_var, Rot_Err_var = np.var(np.array(TIME)), np.var(np.array(ITR)), np.var(np.array(Trans_Err)), np.var(np.array(Rot_Err))
		import csv
		with open(filename + '.csv','w') as csvfile:
			csvwriter = csv.writer(csvfile)
			for i in range(len(TIME)):
				csvwriter.writerow([i, TIME[i], ITR[i], Trans_Err[i], Rot_Err[i]])
		with open(filename+'.txt','w') as file:
			file.write("Mean of Time: {}".format(TIME_mean))
			file.write("Mean of Iterations: {}".format(ITR_mean))
			file.write("Mean of Translation Error: {}".format(Trans_Err_mean))
			file.write("Mean of Rotation Error: {}".format(Rot_Err_mean))

			file.write("Variance in Time: {}".format(TIME_var))
			file.write("Variance in Iterations: {}".format(ITR_var))
			file.write("Variance in Translation Error: {}".format(Trans_Err_var))
			file.write("Variance in Rotation Error: {}".format(Rot_Err_var)) 
Example #20
Source File: transformations.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def _write_csv(fn, type_, coordinates, extra, name, extra_cols, header):
    coordinates = coordinates.tolist()
    name = [[] if not n else [n] for n in name]
    extra_cols = [[] if not e_c else e_c for e_c in extra_cols]
    extra = [[] if e is None else e.tolist() for e in extra]
    with open(fn, 'w', newline='') as f:
        writer = csv.writer(f)
        if header != []:
            writer.writerow(header)
        for t, c, e, n, e_c in zip(type_, coordinates, extra, name, extra_cols):
            writer.writerow([t] + c + e + n + e_c) 
Example #21
Source File: opt_struct.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def write_currents_csv(self, currents, fn_csv, electrode_names=None):
        ''' Writes the currents and the corresponding electrode names to a CSV file
        
        Parameters
        ------------
        currents: N_elec x 1 ndarray
            Array with electrode currents
        fn_csv: str
            Name of CSV file to write
        electrode_names: list of strings (optional)
            Name of electrodes. Default: will read from the electrode_names attribute in
            the leadfield dataset
        '''
        if electrode_names is None:
            if self.leadfield_hdf is not None:
                with h5py.File(self.leadfield_hdf, 'r') as f:
                    electrode_names = f[self.leadfield_path].attrs['electrode_names']
                    electrode_names = [n.decode() for n in electrode_names]
            else:
                raise ValueError('Please define the electrode names')

        assert len(electrode_names) == len(currents)
        with open(fn_csv, 'w', newline='') as f:
            writer = csv.writer(f)
            for n, c in zip(electrode_names, currents):
                writer.writerow([n, c]) 
Example #22
Source File: cli.py    From smother with MIT License 5 votes vote down vote up
def csv(ctx, dst):
    """
    Flatten a coverage file into a CSV
    of source_context, testname
    """
    sm = Smother.load(ctx.obj['report'])
    semantic = ctx.obj['semantic']
    writer = _csv.writer(dst, lineterminator='\n')
    dst.write("source_context, test_context\n")
    writer.writerows(sm.iter_records(semantic=semantic)) 
Example #23
Source File: category_util.py    From object_detector_app with MIT License 5 votes vote down vote up
def save_categories_to_csv_file(categories, csv_path):
  """Saves categories to a csv file.

  Args:
    categories: A list of dictionaries representing categories to save to file.
                Each category must contain an 'id' and 'name' field.
    csv_path: Path to the csv file to be parsed into categories.
  """
  categories.sort(key=lambda x: x['id'])
  with tf.gfile.Open(csv_path, 'w') as csvfile:
    writer = csv.writer(csvfile, delimiter=',', quotechar='"')
    for category in categories:
      writer.writerow([category['id'], category['name']]) 
Example #24
Source File: prepare.py    From DeepLung with GNU General Public License v3.0 5 votes vote down vote up
def splitvaltestcsv():
    testfiles = []
    for f in os.listdir(config['test_data_path']):
        if f.endswith('.mhd'):
            testfiles.append(f[:-4])
    valcsvlines = []
    testcsvlines = []
    import csv 
    valf = open(config['val_annos_path'], 'r')
    valfcsv = csv.reader(valf)
    for line in valfcsv:
        if line[0] in testfiles:
            testcsvlines.append(line)
        else:
            valcsvlines.append(line)
    valf.close()
    testf = open(config['test_annos_path']+'annotations.csv', 'w')
    testfcsv = csv.writer(testf)
    for line in testcsvlines:
        testfcsv.writerow(line)
    testf.close()
    valf = open(config['val_annos_path'], 'w')
    valfcsv = csv.writer(valf)
    for line in valcsvlines:
        valfcsv.writerow(line)
    valf.close() 
Example #25
Source File: csvTools.py    From DeepLung with GNU General Public License v3.0 5 votes vote down vote up
def writeCSV(filename, lines):
    with open(filename, "wb") as f:
        csvwriter = csv.writer(f)
        csvwriter.writerows(lines) 
Example #26
Source File: parser.py    From cronosparser with MIT License 5 votes vote down vote up
def parse(db_folder, out_folder):
    """
    Parse a cronos database.

    Convert the database located in ``db_folder`` into CSV files in the
    directory ``out_folder``.
    """
    # The database structure, containing table and column definitions as
    # well as other data.
    stru_dat = get_file(db_folder, 'CroStru.dat')
    # Index file for the database, which contains offsets for each record.
    data_tad = get_file(db_folder, 'CroBank.tad')
    # Actual data records, can only be decoded using CroBank.tad.
    data_dat = get_file(db_folder, 'CroBank.dat')
    if None in [stru_dat, data_tad, data_dat]:
        raise CronosException("Not all database files are present.")

    meta, tables = parse_structure(stru_dat)

    for table in tables:
        # TODO: do we want to export the "FL" table?
        if table['abbr'] == 'FL' and table['name'] == 'Files':
            continue
        fh = open(make_csv_file_name(meta, table, out_folder), 'w')
        columns = table.get('columns')
        writer = csv.writer(fh)
        writer.writerow([encode_cell(c['name']) for c in columns])
        for row in parse_data(data_tad, data_dat, table.get('id'), columns):
            writer.writerow([encode_cell(c) for c in row])
        fh.close() 
Example #27
Source File: frocwrtdetpepchluna16.py    From DeepLung with GNU General Public License v3.0 5 votes vote down vote up
def getcsv(detp, eps):
    for ep in eps:
    	bboxpath = results_path + str(ep) + '/'
        for detpthresh in detp:
            print 'ep', ep, 'detp', detpthresh
            f = open(bboxpath + 'predanno'+ str(detpthresh) + 'd3.csv', 'w')
            fwriter = csv.writer(f)
            fwriter.writerow(firstline)
            fnamelist = []
            for fname in os.listdir(bboxpath):
                if fname.endswith('_pbb.npy'):
                    fnamelist.append(fname)
                    # print fname
                    # for row in convertcsv(fname, bboxpath, k):
                        # fwriter.writerow(row)
            # # return
            print(len(fnamelist))
            predannolist = p.map(functools.partial(convertcsv, bboxpath=bboxpath, detp=detpthresh), fnamelist) 
            # print len(predannolist), len(predannolist[0])
            for predanno in predannolist:
                # print predanno
                for row in predanno:
                    # print row
                    fwriter.writerow(row)
            f.close()
# getcsv(detp, eps) 
Example #28
Source File: azure_bdist_wheel.py    From botbuilder-python with MIT License 5 votes vote down vote up
def write_record(self, bdist_dir, distinfo_dir):
        from wheel.util import urlsafe_b64encode

        record_path = os.path.join(distinfo_dir, "RECORD")
        record_relpath = os.path.relpath(record_path, bdist_dir)

        def walk():
            for dir, dirs, files in os.walk(bdist_dir):
                dirs.sort()
                for f in sorted(files):
                    yield os.path.join(dir, f)

        def skip(path):
            """Wheel hashes every possible file."""
            return path == record_relpath

        with open_for_csv(record_path, "w+") as record_file:
            writer = csv.writer(record_file)
            for path in walk():
                relpath = os.path.relpath(path, bdist_dir)
                if skip(relpath):
                    hash = ""
                    size = ""
                else:
                    with open(path, "rb") as f:
                        data = f.read()
                    digest = hashlib.sha256(data).digest()
                    hash = "sha256=" + native(urlsafe_b64encode(digest))
                    size = len(data)
                record_path = os.path.relpath(path, bdist_dir).replace(os.path.sep, "/")
                writer.writerow((record_path, hash, size))


# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------- 
Example #29
Source File: ls.py    From vergeml with MIT License 5 votes vote down vote up
def _output_table(output, theader, tdata, left_align):

    if not tdata:
        print("No matching trained models found.", file=sys.stderr)

    if output == 'table':
        if not tdata:
            return
        tdata.insert(0, theader)
        print(DISPLAY.table(tdata, left_align=left_align).getvalue(fit=True))

    elif output == 'json':
        res = []
        for row in tdata:
            res.append(dict(zip(theader, row)))
        print(json.dumps(res))

    elif output == 'csv':
        buffer = io.StringIO()

        writer = csv.writer(buffer)
        writer.writerow(theader)
        for row in tdata:
            writer.writerow(row)
        val = buffer.getvalue()
        val = val.replace('\r', '')
        print(val.strip()) 
Example #30
Source File: drive.py    From SDRC with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, fileName='data/steering.csv'):
        file = open(fileName, 'a')
        self.writer = csv.writer(file)