Python csv.Sniffer() Examples
The following are 30
code examples of csv.Sniffer().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
csv
, or try the search function
.
Example #1
Source File: google_analytics.py From orchestra with Apache License 2.0 | 7 votes |
def _modify_column_headers(tmp_file_location, custom_dimension_header_mapping): logger.info('Modifying column headers to be compatible for data upload') with open(tmp_file_location, 'r') as check_header_file: has_header = csv.Sniffer().has_header(check_header_file.read(1024)) if has_header: with open(tmp_file_location, 'r') as read_file: reader = csv.reader(read_file) headers = next(reader) new_headers = [] for header in headers: if header in custom_dimension_header_mapping: header = custom_dimension_header_mapping.get(header) new_header = 'ga:' + header new_headers.append(new_header) all_data = read_file.readlines() final_headers = ','.join(new_headers) + '\n' all_data.insert(0, final_headers) with open(tmp_file_location, 'w') as write_file: write_file.writelines(all_data) else: raise NameError('CSV does not contain headers, please add them ' 'to use the modify column headers functionality')
Example #2
Source File: kifield.py From KiField with MIT License | 6 votes |
def csvfile_to_wb(csv_filename): '''Open a CSV file and return an openpyxl workbook.''' logger.log( DEBUG_DETAILED, 'Converting CSV file {} into an XLSX workbook.'.format(csv_filename)) with open(csv_filename) as csv_file: dialect = csv.Sniffer().sniff(csv_file.read()) if USING_PYTHON2: for attr in dir(dialect): a = getattr(dialect, attr) if type(a) == unicode: setattr(dialect, attr, bytes(a)) csv_file.seek(0) reader = csv.reader(csv_file, dialect) wb = pyxl.Workbook() ws = wb.active for row_index, row in enumerate(reader, 1): for column_index, cell in enumerate(row, 1): if cell not in ('', None): ws.cell(row=row_index, column=column_index).value = cell return (wb, dialect)
Example #3
Source File: test_csv.py From ironpython3 with Apache License 2.0 | 6 votes |
def test_delimiters(self): sniffer = csv.Sniffer() dialect = sniffer.sniff(self.sample3) # given that all three lines in sample3 are equal, # I think that any character could have been 'guessed' as the # delimiter, depending on dictionary order self.assertIn(dialect.delimiter, self.sample3) dialect = sniffer.sniff(self.sample3, delimiters="?,") self.assertEqual(dialect.delimiter, "?") dialect = sniffer.sniff(self.sample3, delimiters="/,") self.assertEqual(dialect.delimiter, "/") dialect = sniffer.sniff(self.sample4) self.assertEqual(dialect.delimiter, ";") dialect = sniffer.sniff(self.sample5) self.assertEqual(dialect.delimiter, "\t") dialect = sniffer.sniff(self.sample6) self.assertEqual(dialect.delimiter, "|") dialect = sniffer.sniff(self.sample7) self.assertEqual(dialect.delimiter, "|") self.assertEqual(dialect.quotechar, "'") dialect = sniffer.sniff(self.sample8) self.assertEqual(dialect.delimiter, '+') dialect = sniffer.sniff(self.sample9) self.assertEqual(dialect.delimiter, '+') self.assertEqual(dialect.quotechar, "'")
Example #4
Source File: initialize_db.py From ontask_b with MIT License | 6 votes |
def get_column_value_list(filenames, column_name, debug=False): """Get the values of the given column from all filenames. Function that given a set of filenames returns the list of values concatenating all the columns with name "column_name" :param filenames: List of filenames :param column_name: Column name to search. :param debug: Boolean controlling the log messages. :return: List of values. """ to_return = [] for file_name in filenames: # Open the file for reading file_in = codecs.open(file_name, 'rU') dialect = csv.Sniffer().sniff(file_in.read(1024)) file_in.seek(0) data_in = csv.reader(file_in, dialect=dialect, delimiter=str(',')) if debug: print('Parsing file ' + file_name) to_return += process_csv_file(data_in, column_name) return to_return
Example #5
Source File: metadata.py From velocyto.py with BSD 2-Clause "Simplified" License | 6 votes |
def load(self, filename: str) -> None: keys = None types = None with open(filename, newline='') as csvfile: dialect = csv.Sniffer().sniff(csvfile.read()) csvfile.seek(0) reader = csv.reader(csvfile, dialect) for row in reader: if len(row) == 0: continue if keys is None: if len(row[0].split(":")) == 2: keys = [r.split(':')[0] for r in row] types = [r.split(':')[1] for r in row] # NOTE: I don't use type anymore else: keys = row types = ["None" for r in row] else: self.items.append(Metadata(keys, row, types))
Example #6
Source File: merritt.py From open-context-py with GNU General Public License v3.0 | 6 votes |
def load_csv_file(self, act_dir, filename): """ Loads a file and parse a csv file """ tab_obj = False dir_file = self.set_check_directory(act_dir) + filename if os.path.exists(dir_file): with open(dir_file, encoding='utf-8', errors='replace') as csvfile: # dialect = csv.Sniffer().sniff(csvfile.read(1024)) # csvfile.seek(0) csv_obj = csv.reader(csvfile) tab_obj = [] for row in csv_obj: row_list = [] for cell in row: row_list.append(cell) tab_obj.append(row_list) else: print('Cannot find: ' + dir_file) return tab_obj
Example #7
Source File: prettytable.py From vulscan with MIT License | 6 votes |
def from_csv(fp, field_names = None, **kwargs): dialect = csv.Sniffer().sniff(fp.read(1024)) fp.seek(0) reader = csv.reader(fp, dialect) table = PrettyTable(**kwargs) if field_names: table.field_names = field_names else: if py3k: table.field_names = [x.strip() for x in next(reader)] else: table.field_names = [x.strip() for x in reader.next()] for row in reader: table.add_row([x.strip() for x in row]) return table
Example #8
Source File: encoder.py From sagemaker-xgboost-container with Apache License 2.0 | 6 votes |
def csv_to_dmatrix(string_like, dtype=None): # type: (str) -> xgb.DMatrix """Convert a CSV object to a DMatrix object. Args: string_like (str): CSV string. Assumes the string has been stripped of leading or trailing newline chars. dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the contents of each column, individually. This argument can only be used to 'upcast' the array. For downcasting, use the .astype(t) method. Returns: (xgb.DMatrix): XGBoost DataMatrix """ sniff_delimiter = csv.Sniffer().sniff(string_like.split('\n')[0][:512]).delimiter delimiter = ',' if sniff_delimiter.isalnum() else sniff_delimiter logging.info("Determined delimiter of CSV input is \'{}\'".format(delimiter)) np_payload = np.array(list(map(lambda x: _clean_csv_string(x, delimiter), string_like.split('\n')))).astype(dtype) return xgb.DMatrix(np_payload)
Example #9
Source File: migrate.py From open-context-py with GNU General Public License v3.0 | 6 votes |
def load_csv_file(self, act_dir, filename): """ Loads a file and parse a csv file """ tab_obj = False dir_file = self.set_check_directory(act_dir) + filename if os.path.exists(dir_file): with open(dir_file, encoding='utf-8', errors='replace') as csvfile: # dialect = csv.Sniffer().sniff(csvfile.read(1024)) # csvfile.seek(0) csv_obj = csv.reader(csvfile) tab_obj = [] for row in csv_obj: row_list = [] for cell in row: row_list.append(cell) tab_obj.append(row_list) else: print('Cannot find: ' + dir_file) return tab_obj
Example #10
Source File: prettytable.py From Hyperflex-Hypercheck with MIT License | 6 votes |
def from_csv(fp, field_names = None, **kwargs): dialect = csv.Sniffer().sniff(fp.read(1024)) fp.seek(0) reader = csv.reader(fp, dialect) table = PrettyTable(**kwargs) if field_names: table.field_names = field_names else: if py3k: table.field_names = [x.strip() for x in next(reader)] else: table.field_names = [x.strip() for x in reader.next()] for row in reader: table.add_row([x.strip() for x in row]) return table
Example #11
Source File: prettytable.py From service-manager with Apache License 2.0 | 6 votes |
def from_csv(fp, field_names = None, **kwargs): dialect = csv.Sniffer().sniff(fp.read(1024)) fp.seek(0) reader = csv.reader(fp, dialect) table = PrettyTable(**kwargs) if field_names: table.field_names = field_names else: if py3k: table.field_names = [x.strip() for x in next(reader)] else: table.field_names = [x.strip() for x in reader.next()] for row in reader: table.add_row([x.strip() for x in row]) return table
Example #12
Source File: prettytable.py From ipc_benchmark with MIT License | 6 votes |
def from_csv(fp, field_names = None, **kwargs): dialect = csv.Sniffer().sniff(fp.read(1024)) fp.seek(0) reader = csv.reader(fp, dialect) table = PrettyTable(**kwargs) if field_names: table.field_names = field_names else: if py3k: table.field_names = [x.strip() for x in next(reader)] else: table.field_names = [x.strip() for x in reader.next()] for row in reader: table.add_row([x.strip() for x in row]) return table
Example #13
Source File: util.py From cooltools with MIT License | 6 votes |
def sniff_for_header(file_path, sep="\t", comment="#"): """ Warning: reads the entire file into a StringIO buffer! """ with open(file_path, "r") as f: buf = io.StringIO(f.read()) sample_lines = [] for line in buf: if not line.startswith(comment): sample_lines.append(line) break for _ in range(10): sample_lines.append(buf.readline()) buf.seek(0) has_header = csv.Sniffer().has_header("\n".join(sample_lines)) if has_header: names = sample_lines[0].strip().split(sep) else: names = None return buf, names
Example #14
Source File: test_csv.py From ironpython2 with Apache License 2.0 | 6 votes |
def test_delimiters(self): sniffer = csv.Sniffer() dialect = sniffer.sniff(self.sample3) # given that all three lines in sample3 are equal, # I think that any character could have been 'guessed' as the # delimiter, depending on dictionary order self.assertIn(dialect.delimiter, self.sample3) dialect = sniffer.sniff(self.sample3, delimiters="?,") self.assertEqual(dialect.delimiter, "?") dialect = sniffer.sniff(self.sample3, delimiters="/,") self.assertEqual(dialect.delimiter, "/") dialect = sniffer.sniff(self.sample4) self.assertEqual(dialect.delimiter, ";") dialect = sniffer.sniff(self.sample5) self.assertEqual(dialect.delimiter, "\t") dialect = sniffer.sniff(self.sample6) self.assertEqual(dialect.delimiter, "|") dialect = sniffer.sniff(self.sample7) self.assertEqual(dialect.delimiter, "|") self.assertEqual(dialect.quotechar, "'") dialect = sniffer.sniff(self.sample8) self.assertEqual(dialect.delimiter, '+') dialect = sniffer.sniff(self.sample9) self.assertEqual(dialect.delimiter, '+') self.assertEqual(dialect.quotechar, "'")
Example #15
Source File: csvcorpus.py From topical_word_embeddings with MIT License | 6 votes |
def __init__(self, fname, labels): """ Initialize the corpus from a file. `labels` = are class labels present in the input file? => skip the first column """ logger.info("loading corpus from %s" % fname) self.fname = fname self.length = None self.labels = labels # load the first few lines, to guess the CSV dialect head = ''.join(itertools.islice(open(self.fname), 5)) self.headers = csv.Sniffer().has_header(head) self.dialect = csv.Sniffer().sniff(head) logger.info("sniffed CSV delimiter=%r, headers=%s" % (self.dialect.delimiter, self.headers))
Example #16
Source File: csvcorpus.py From topical_word_embeddings with MIT License | 6 votes |
def __init__(self, fname, labels): """ Initialize the corpus from a file. `labels` = are class labels present in the input file? => skip the first column """ logger.info("loading corpus from %s" % fname) self.fname = fname self.length = None self.labels = labels # load the first few lines, to guess the CSV dialect head = ''.join(itertools.islice(open(self.fname), 5)) self.headers = csv.Sniffer().has_header(head) self.dialect = csv.Sniffer().sniff(head) logger.info("sniffed CSV delimiter=%r, headers=%s" % (self.dialect.delimiter, self.headers))
Example #17
Source File: test_csv.py From BinderFilter with MIT License | 6 votes |
def test_delimiters(self): sniffer = csv.Sniffer() dialect = sniffer.sniff(self.sample3) # given that all three lines in sample3 are equal, # I think that any character could have been 'guessed' as the # delimiter, depending on dictionary order self.assertIn(dialect.delimiter, self.sample3) dialect = sniffer.sniff(self.sample3, delimiters="?,") self.assertEqual(dialect.delimiter, "?") dialect = sniffer.sniff(self.sample3, delimiters="/,") self.assertEqual(dialect.delimiter, "/") dialect = sniffer.sniff(self.sample4) self.assertEqual(dialect.delimiter, ";") dialect = sniffer.sniff(self.sample5) self.assertEqual(dialect.delimiter, "\t") dialect = sniffer.sniff(self.sample6) self.assertEqual(dialect.delimiter, "|") dialect = sniffer.sniff(self.sample7) self.assertEqual(dialect.delimiter, "|") self.assertEqual(dialect.quotechar, "'")
Example #18
Source File: utils.py From django-djangui with GNU General Public License v3.0 | 6 votes |
def test_delimited(filepath): import csv if six.PY3: handle = open(filepath, 'r', newline='') else: handle = open(filepath, 'rb') with handle as csv_file: try: dialect = csv.Sniffer().sniff(csv_file.read(1024*16), delimiters=',\t') except Exception as e: return False, None csv_file.seek(0) reader = csv.reader(csv_file, dialect) rows = [] try: for index, entry in enumerate(reader): if index == 5: break rows.append(entry) except Exception as e: return False, None return True, rows
Example #19
Source File: prettytable.py From paper.io.sessdsa with GNU General Public License v3.0 | 6 votes |
def from_csv(fp, field_names = None, **kwargs): dialect = csv.Sniffer().sniff(fp.read(1024)) fp.seek(0) reader = csv.reader(fp, dialect) table = PrettyTable(**kwargs) if field_names: table.field_names = field_names else: if py3k: table.field_names = [x.strip() for x in next(reader)] else: table.field_names = [x.strip() for x in reader.next()] for row in reader: table.add_row([x.strip() for x in row]) return table
Example #20
Source File: test_csv.py From Fluid-Designer with GNU General Public License v3.0 | 6 votes |
def test_delimiters(self): sniffer = csv.Sniffer() dialect = sniffer.sniff(self.sample3) # given that all three lines in sample3 are equal, # I think that any character could have been 'guessed' as the # delimiter, depending on dictionary order self.assertIn(dialect.delimiter, self.sample3) dialect = sniffer.sniff(self.sample3, delimiters="?,") self.assertEqual(dialect.delimiter, "?") dialect = sniffer.sniff(self.sample3, delimiters="/,") self.assertEqual(dialect.delimiter, "/") dialect = sniffer.sniff(self.sample4) self.assertEqual(dialect.delimiter, ";") dialect = sniffer.sniff(self.sample5) self.assertEqual(dialect.delimiter, "\t") dialect = sniffer.sniff(self.sample6) self.assertEqual(dialect.delimiter, "|") dialect = sniffer.sniff(self.sample7) self.assertEqual(dialect.delimiter, "|") self.assertEqual(dialect.quotechar, "'") dialect = sniffer.sniff(self.sample8) self.assertEqual(dialect.delimiter, '+') dialect = sniffer.sniff(self.sample9) self.assertEqual(dialect.delimiter, '+') self.assertEqual(dialect.quotechar, "'")
Example #21
Source File: test_csv.py From oss-ftp with MIT License | 6 votes |
def test_delimiters(self): sniffer = csv.Sniffer() dialect = sniffer.sniff(self.sample3) # given that all three lines in sample3 are equal, # I think that any character could have been 'guessed' as the # delimiter, depending on dictionary order self.assertIn(dialect.delimiter, self.sample3) dialect = sniffer.sniff(self.sample3, delimiters="?,") self.assertEqual(dialect.delimiter, "?") dialect = sniffer.sniff(self.sample3, delimiters="/,") self.assertEqual(dialect.delimiter, "/") dialect = sniffer.sniff(self.sample4) self.assertEqual(dialect.delimiter, ";") dialect = sniffer.sniff(self.sample5) self.assertEqual(dialect.delimiter, "\t") dialect = sniffer.sniff(self.sample6) self.assertEqual(dialect.delimiter, "|") dialect = sniffer.sniff(self.sample7) self.assertEqual(dialect.delimiter, "|") self.assertEqual(dialect.quotechar, "'") dialect = sniffer.sniff(self.sample8) self.assertEqual(dialect.delimiter, '+') dialect = sniffer.sniff(self.sample9) self.assertEqual(dialect.delimiter, '+') self.assertEqual(dialect.quotechar, "'")
Example #22
Source File: csvreader.py From clickhouse-mysql-data-reader with MIT License | 6 votes |
def __init__( self, csv_file_path, converter=None, callbacks={} ): super().__init__(converter=converter, callbacks=callbacks) self.csv_file_path = csv_file_path self.csvfile = open(self.csv_file_path) self.sniffer = csv.Sniffer() self.dialect = self.sniffer.sniff(self.csvfile.read(1024)) self.csvfile.seek(0) self.has_header = self.sniffer.has_header(self.csvfile.read(1024)) self.csvfile.seek(0) self.reader = csv.DictReader(self.csvfile, dialect=self.dialect) if self.has_header: print('=======') print(self.reader.fieldnames) print('=======') else: # should raise error? pass
Example #23
Source File: prettytable.py From Hatkey with GNU General Public License v3.0 | 6 votes |
def from_csv(fp, field_names = None, **kwargs): dialect = csv.Sniffer().sniff(fp.read(1024)) fp.seek(0) reader = csv.reader(fp, dialect) table = PrettyTable(**kwargs) if field_names: table.field_names = field_names else: if py3k: table.field_names = [x.strip() for x in next(reader)] else: table.field_names = [x.strip() for x in reader.next()] for row in reader: table.add_row([x.strip() for x in row]) return table
Example #24
Source File: spike_trains_file.py From sonata with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, path, sep=' ', **kwargs): self._node_ids = None self._min_time = None self._max_time = None # self._dt = None try: # check to see if file contains headers with open(path, 'r') as csvfile: sniffer = csv.Sniffer() has_headers = sniffer.has_header(csvfile.read(1024)) except Exception: has_headers = True self._spikes_df = pd.read_csv(path, sep=sep, header=0 if has_headers else None) if not has_headers: self._spikes_df.columns = csv_headers[:self._spikes_df.shape[1]] if col_population not in self._spikes_df.columns: pop_name = kwargs.get(col_population, pop_na) self._spikes_df[col_population] = pop_name
Example #25
Source File: csv_adaptors.py From sonata with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, path, sep=' ', **kwargs): self._n_spikes = None self._populations = None try: # check to see if file contains headers with open(path, 'r') as csvfile: sniffer = csv.Sniffer() has_headers = sniffer.has_header(csvfile.read(1024)) except Exception: has_headers = True self._spikes_df = pd.read_csv(path, sep=sep, header=0 if has_headers else None) if not has_headers: self._spikes_df.columns = csv_headers[0::2] if col_population not in self._spikes_df.columns: pop_name = kwargs.get(col_population, pop_na) self._spikes_df[col_population] = pop_name # TODO: Check all the necessary columns exits self._spikes_df = self._spikes_df[csv_headers]
Example #26
Source File: prettytable.py From smod-1 with GNU General Public License v2.0 | 6 votes |
def from_csv(fp, field_names = None, **kwargs): dialect = csv.Sniffer().sniff(fp.read(1024)) fp.seek(0) reader = csv.reader(fp, dialect) table = PrettyTable(**kwargs) if field_names: table.field_names = field_names else: if py3k: table.field_names = [x.strip() for x in next(reader)] else: table.field_names = [x.strip() for x in reader.next()] for row in reader: table.add_row([x.strip() for x in row]) return table
Example #27
Source File: upload.py From DIVE-backend with GNU General Public License v3.0 | 6 votes |
def get_dialect(file_obj, sample_size=1024*1024): try: sample = file_obj.read(sample_size) except StopIteration: sample = file_obj.readline() file_obj.seek(0) sniffer = csv.Sniffer() dialect = sniffer.sniff(sample) result = { 'delimiter': dialect.delimiter, 'doublequote': dialect.doublequote, 'escapechar': dialect.escapechar, 'lineterminator': dialect.lineterminator, 'quotechar': dialect.quotechar, } return result
Example #28
Source File: csvcorpus.py From topical_word_embeddings with MIT License | 6 votes |
def __init__(self, fname, labels): """ Initialize the corpus from a file. `labels` = are class labels present in the input file? => skip the first column """ logger.info("loading corpus from %s" % fname) self.fname = fname self.length = None self.labels = labels # load the first few lines, to guess the CSV dialect head = ''.join(itertools.islice(open(self.fname), 5)) self.headers = csv.Sniffer().has_header(head) self.dialect = csv.Sniffer().sniff(head) logger.info("sniffed CSV delimiter=%r, headers=%s" % (self.dialect.delimiter, self.headers))
Example #29
Source File: test_csv.py From ironpython3 with Apache License 2.0 | 5 votes |
def test_sniff(self): sniffer = csv.Sniffer() dialect = sniffer.sniff(self.sample1) self.assertEqual(dialect.delimiter, ",") self.assertEqual(dialect.quotechar, '"') self.assertEqual(dialect.skipinitialspace, True) dialect = sniffer.sniff(self.sample2) self.assertEqual(dialect.delimiter, ":") self.assertEqual(dialect.quotechar, "'") self.assertEqual(dialect.skipinitialspace, False)
Example #30
Source File: csv.py From pass-import with GNU General Public License v3.0 | 5 votes |
def is_format(self): """Return True if the file is a CSV file.""" try: dialect = csv.Sniffer().sniff(self.file.read(4096), delimiters=self.delimiter) if dialect.quotechar != self.quotechar: # pragma: no cover return False self.file.seek(0) self.reader = csv.DictReader(self.file, dialect=dialect) except (csv.Error, UnicodeDecodeError): return False return True