Python csv.QUOTE_NONNUMERIC Examples
The following are 30
code examples of csv.QUOTE_NONNUMERIC().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
csv
, or try the search function
.
Example #1
Source File: models.py From comport with BSD 3-Clause "New" or "Revised" License | 9 votes |
def get_denominator_csv(self): output = io.StringIO() writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC) writer.writerow(["year", "month", "officers out on service"]) values = sorted(self.denominator_values, key=lambda x: (x.year, x.month)) for value in values: row = [ value.year, value.month, value.officers_out_on_service ] writer.writerow(row) return output.getvalue()
Example #2
Source File: api.py From dribdat with MIT License | 6 votes |
def gen_csv(csvdata): headerline = csvdata[0].keys() if PY3: output = io.StringIO() else: output = io.BytesIO() headerline = [l.encode('utf-8') for l in headerline] writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC) writer.writerow(headerline) for rk in csvdata: rkline = [] for l in rk.values(): if l is None: rkline.append("") elif isinstance(l, (int, float, datetime)): rkline.append(l) else: rkline.append(l.encode('utf-8')) writer.writerow(rkline) return output.getvalue() # ------ EVENT INFORMATION --------- # API: Outputs JSON about the current event
Example #3
Source File: common.py From elasticintel with GNU General Public License v3.0 | 6 votes |
def writerow(self, row): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) row = [x if _check_as_is(x) else pprint_thing(x).encode("utf-8") for x in row] self.writer.writerow([s for s in row]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and reencode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
Example #4
Source File: common.py From vnpy_crypto with MIT License | 6 votes |
def writerow(self, row): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) row = [x if _check_as_is(x) else pprint_thing(x).encode("utf-8") for x in row] self.writer.writerow([s for s in row]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and re-encode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
Example #5
Source File: common.py From Splunking-Crime with GNU Affero General Public License v3.0 | 6 votes |
def writerows(self, rows): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) for i, row in enumerate(rows): rows[i] = [x if _check_as_is(x) else pprint_thing(x).encode("utf-8") for x in row] self.writer.writerows([[s for s in row] for row in rows]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and reencode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
Example #6
Source File: common.py From recruit with Apache License 2.0 | 6 votes |
def writerow(self, row): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) row = [x if _check_as_is(x) else pprint_thing(x).encode("utf-8") for x in row] self.writer.writerow([s for s in row]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and re-encode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
Example #7
Source File: common.py From vnpy_crypto with MIT License | 6 votes |
def writerows(self, rows): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) for i, row in enumerate(rows): rows[i] = [x if _check_as_is(x) else pprint_thing(x).encode("utf-8") for x in row] self.writer.writerows([[s for s in row] for row in rows]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and re-encode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
Example #8
Source File: common.py From recruit with Apache License 2.0 | 6 votes |
def writerows(self, rows): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) for i, row in enumerate(rows): rows[i] = [x if _check_as_is(x) else pprint_thing(x).encode("utf-8") for x in row] self.writer.writerows([[s for s in row] for row in rows]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and re-encode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
Example #9
Source File: common.py From elasticintel with GNU General Public License v3.0 | 6 votes |
def writerows(self, rows): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) for i, row in enumerate(rows): rows[i] = [x if _check_as_is(x) else pprint_thing(x).encode("utf-8") for x in row] self.writer.writerows([[s for s in row] for row in rows]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and reencode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
Example #10
Source File: common.py From Splunking-Crime with GNU Affero General Public License v3.0 | 6 votes |
def writerow(self, row): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) row = [x if _check_as_is(x) else pprint_thing(x).encode("utf-8") for x in row] self.writer.writerow([s for s in row]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and reencode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
Example #11
Source File: report.py From bobby_boy with MIT License | 6 votes |
def clear(filename): rows = [] with open(filename, 'rb') as csvfile: reader = csv.reader(csvfile, delimiter=';') rows = [row for row in reader] with open(filename, 'wb') as csvfile: writer = csv.writer(csvfile, delimiter=';', quotechar='"', skipinitialspace=True, quoting=csv.QUOTE_NONNUMERIC) # QUOTE_MINIMAL vs QUOTE_NONNUMERIC for row in rows: if not row[0].startswith("#"): # удаляем неразрывные пробелы в названиях категорий row[2] = row[2].strip(' ') writer.writerow(row)
Example #12
Source File: common.py From Computable with MIT License | 6 votes |
def writerows(self, rows): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) for i, row in enumerate(rows): rows[i] = [x if _check_as_is(x) else pprint_thing(x).encode('utf-8') for x in row] self.writer.writerows([[s for s in row] for row in rows]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and reencode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
Example #13
Source File: py2.py From pyRevit with GNU General Public License v3.0 | 6 votes |
def __init__(self, f, dialect=None, encoding='utf-8', errors='strict', **kwds): format_params = ['delimiter', 'doublequote', 'escapechar', 'lineterminator', 'quotechar', 'quoting', 'skipinitialspace'] if dialect is None: if not any([kwd_name in format_params for kwd_name in kwds.keys()]): dialect = csv.excel self.reader = csv.reader(f, dialect, **kwds) self.encoding = encoding self.encoding_errors = errors self._parse_numerics = bool( self.dialect.quoting & csv.QUOTE_NONNUMERIC)
Example #14
Source File: etl.py From open-context-py with GNU General Public License v3.0 | 6 votes |
def update_subjects_context_open_context_db( project_uuid=PROJECT_UUID, source_prefix=SOURCE_ID_PREFIX, load_files=DESTINATION_PATH, all_contexts_file=FILENAME_ALL_CONTEXTS, loaded_contexts_file=FILENAME_LOADED_CONTEXTS, ): """Loads subjects, contexts items and containment relations""" all_contexts_df = pd.read_csv((load_files + all_contexts_file)) new_contexts_df = update_contexts_subjects( project_uuid, (source_prefix + all_contexts_file), all_contexts_df ) loaded_contexts_path = (load_files + loaded_contexts_file) new_contexts_df.to_csv( loaded_contexts_path, index=False, quoting=csv.QUOTE_NONNUMERIC )
Example #15
Source File: common.py From Computable with MIT License | 6 votes |
def writerow(self, row): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) row = [x if _check_as_is(x) else pprint_thing(x).encode('utf-8') for x in row] self.writer.writerow([s for s in row]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and reencode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
Example #16
Source File: policygradient-car-10.1.1.py From Advanced-Deep-Learning-with-Keras with MIT License | 6 votes |
def setup_writer(fileid, postfix): """Use to prepare file and writer for data logging Arguments: fileid (string): unique file identfier postfix (string): path """ # we dump episode num, step, total reward, and # number of episodes solved in a csv file for analysis csvfilename = "%s.csv" % fileid csvfilename = os.path.join(postfix, csvfilename) csvfile = open(csvfilename, 'w', 1) writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONNUMERIC) writer.writerow(['Episode', 'Step', 'Total Reward', 'Number of Episodes Solved']) return csvfile, writer
Example #17
Source File: emote_trainer.py From emoter with MIT License | 6 votes |
def openPDF(path, data): return # def openCSV(path, data): # path = input("\n\tEnter the name of the text and extension of the text, CSV, or PDF file (has to be in same directory) to be mass analyzed in Emote: ") # try: # with open(path) as csvfile: # reader = csv.reader(csvfile, dialect='excel', quoting=csv.QUOTE_NONNUMERIC) # datalist = [] # datalist = list(reader) # return datalist # except IOError as err: # print("Error opening path to file.") # startInterface() # return
Example #18
Source File: table.py From cfmmc with MIT License | 6 votes |
def dump(self): """ :param saveto: :return: """ filePath = self.csvFilePath isCsvExists = os.path.exists(filePath) data = self.data fields = self.fields with open(filePath, 'a') as f: wr = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC) if not isCsvExists: # 新文件,需要写入表头 wr.writerow(fields) for d in data: # 写入数据 wr.writerow(d.values())
Example #19
Source File: __main__.py From BIGSI with MIT License | 6 votes |
def d_to_csv(d, with_header=True, carriage_return=True): df = [] results = d["results"] if results: header = sorted(results[0].keys()) if with_header: df.append(["query"] + header) for res in results: row = [d["query"]] for key in header: row.append(res[key]) df.append(row) output = io.StringIO() writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC) for row in df: writer.writerow(row) csv_string = output.getvalue() if carriage_return: return csv_string else: return csv_string[:-1]
Example #20
Source File: csvdump.py From integration with Apache License 2.0 | 6 votes |
def save_csv (prefix='data'): # Dump the ChangeSets if len(ChangeSets) > 0: fd = open('%s-changesets.csv' % prefix, 'w') writer = csv.writer (fd, quoting=csv.QUOTE_NONNUMERIC) writer.writerow (['Commit', 'Date', 'Domain', 'Email', 'Name', 'Affliation', 'Added', 'Removed']) for commit in ChangeSets: writer.writerow(commit) # Dump the file types if len(FileTypes) > 0: fd = open('%s-filetypes.csv' % prefix, 'w') writer = csv.writer (fd, quoting=csv.QUOTE_NONNUMERIC) writer.writerow (['Commit', 'Type', 'Added', 'Removed']) for commit in FileTypes: writer.writerow(commit)
Example #21
Source File: common.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 6 votes |
def writerows(self, rows): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) for i, row in enumerate(rows): rows[i] = [x if _check_as_is(x) else pprint_thing(x).encode("utf-8") for x in row] self.writer.writerows([[s for s in row] for row in rows]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and re-encode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
Example #22
Source File: common.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 6 votes |
def writerow(self, row): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) row = [x if _check_as_is(x) else pprint_thing(x).encode("utf-8") for x in row] self.writer.writerow([s for s in row]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and re-encode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
Example #23
Source File: stats_utils.py From catalyst with Apache License 2.0 | 6 votes |
def get_csv_stats(stats, recorded_cols=None): """ Create a CSV buffer from the stats DataFrame. Parameters ---------- path: str stats: list[Object] recorded_cols: list[str] Returns ------- """ df, columns = prepare_stats(stats, recorded_cols=recorded_cols) return df.to_csv( None, columns=columns, # encoding='utf-8', quoting=csv.QUOTE_NONNUMERIC ).encode()
Example #24
Source File: saver.py From IRCLogParser with GNU General Public License v3.0 | 6 votes |
def save_csv(matrix, output_directory, output_file_name): """ Saves the input matrix as a CSV File Args: matrix(list): an array containing data to be saved output_drectory(str): location to save graph output_file_name(str): name of the csv file to be saved Returns: null """ if config.DEBUGGER: print "Generating", (output_file_name + ".csv") check_if_dir_exists(output_directory) #create output directory if doesn't exist output_file = output_directory + "/" + output_file_name +".csv" with open(output_file, 'wb') as myfile: wr = csv.writer(myfile, quoting=csv.QUOTE_NONNUMERIC) if matrix is not None: for col in matrix: wr.writerow(col)
Example #25
Source File: etl.py From open-context-py with GNU General Public License v3.0 | 6 votes |
def update_link_rel_open_context_db( project_uuid=PROJECT_UUID, source_prefix=SOURCE_ID_PREFIX, load_files=DESTINATION_PATH, link_sources=LINK_RELATIONS_SOURCES, loaded_link_file_prefix='loaded--', ): """Loads linking relationships into the database""" for source_id, filename in link_sources: df = pd.read_csv((load_files + filename)) df = load_link_relations_df_into_oc( project_uuid, source_id, df ) df.to_csv( (load_files + loaded_link_file_prefix + filename), index=False, quoting=csv.QUOTE_NONNUMERIC )
Example #26
Source File: pheno2sql.py From ukbrest with MIT License | 5 votes |
def _save_column_range(self, csv_file, csv_file_idx, column_names_idx, column_names): table_name = self._get_table_name(column_names_idx, csv_file_idx) output_csv_filename = os.path.join(get_tmpdir(self.tmpdir), table_name + '.csv') full_column_names = ['eid'] + [x[0] for x in column_names] data_reader = pd.read_csv(csv_file, index_col=0, header=0, usecols=full_column_names, chunksize=self.loading_chunksize, dtype=str, encoding=self._get_file_encoding(csv_file)) new_columns = [x[1] for x in column_names] logger.debug('{}'.format(output_csv_filename)) write_headers = True if self.db_type == 'sqlite': write_headers = False for chunk_idx, chunk in enumerate(data_reader): chunk = chunk.rename(columns=self._rename_columns) # chunk = self._replace_null_str(chunk) if chunk_idx == 0: chunk.loc[:, new_columns].to_csv(output_csv_filename, quoting=csv.QUOTE_NONNUMERIC, na_rep=np.nan, header=write_headers, mode='w') else: chunk.loc[:, new_columns].to_csv(output_csv_filename, quoting=csv.QUOTE_NONNUMERIC, na_rep=np.nan, header=False, mode='a') return table_name, output_csv_filename
Example #27
Source File: etl.py From open-context-py with GNU General Public License v3.0 | 5 votes |
def write_grid_problem_csv(df, destination_path, filename): """Export the grid problem dataframe if needed """ if not GRID_PROBLEM_COL in df.columns: # No grid problems in this DF return None bad_indx = (df[GRID_PROBLEM_COL].notnull()) if df[bad_indx].empty: # No problem grid coordinates found return None df_report = df[bad_indx].copy() all_tuple_cols = [(c[0] + ' ' + c[1]) for c in df_report.columns if isinstance(c, tuple)] x_tuple_cols = [c for c in all_tuple_cols if 'Grid X' in c] y_tuple_cols = [c for c in all_tuple_cols if 'Grid Y' in c] tuple_renames = { c:(c[0] + ' ' + c[1]) for c in df_report.columns if isinstance(c, tuple) } x_cols = [x for x, _ in X_Y_GRID_COLS if x in df_report.columns] y_cols = [y for _, y in X_Y_GRID_COLS if y in df_report.columns] df_report.rename(columns=tuple_renames, inplace=True) df_report = df_report[(GRID_PROBLEM_EXP_COLS + x_cols + y_cols + x_tuple_cols + y_tuple_cols)] df_report.sort_values(by=GRID_GROUPBY_COLS, inplace=True) report_path = destination_path + 'bad-grid--' + filename df_report.to_csv( report_path, index=False, quoting=csv.QUOTE_NONNUMERIC )
Example #28
Source File: csvdump.py From integration with Apache License 2.0 | 5 votes |
def OutputCSV (file): if file is None: return writer = csv.writer (file, quoting=csv.QUOTE_NONNUMERIC) writer.writerow (['Name', 'Email', 'Affliation', 'Date', 'Added', 'Removed', 'Changesets']) for date, stat in PeriodCommitHash.items(): # sanitise names " is common and \" sometimes too empl_name = stat.employer.name.replace ('"', '.').replace ('\\', '.') author_name = stat.name.replace ('"', '.').replace ('\\', '.') writer.writerow ([author_name, stat.email, empl_name, stat.date, stat.added, stat.removed, stat.changesets])
Example #29
Source File: test_csv.py From medicare-demo with Apache License 2.0 | 5 votes |
def test_read_quoting(self): self._read_test(['1,",3,",5'], [['1', ',3,', '5']]) self._read_test(['1,",3,",5'], [['1', '"', '3', '"', '5']], quotechar=None, escapechar='\\') self._read_test(['1,",3,",5'], [['1', '"', '3', '"', '5']], quoting=csv.QUOTE_NONE, escapechar='\\') # will this fail where locale uses comma for decimals? self._read_test([',3,"5",7.3, 9'], [['', 3, '5', 7.3, 9]], quoting=csv.QUOTE_NONNUMERIC) self.assertRaises(ValueError, self._read_test, ['abc,3'], [[]], quoting=csv.QUOTE_NONNUMERIC)
Example #30
Source File: test_to_csv.py From elasticintel with GNU General Public License v3.0 | 5 votes |
def test_to_csv_unicodewriter_quoting(self): df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']}) buf = StringIO() df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC, encoding='utf-8') result = buf.getvalue() expected = ('"A","B"\n' '1,"foo"\n' '2,"bar"\n' '3,"baz"\n') assert result == expected