Java Code Examples for org.supercsv.prefs.CsvPreference#TAB_PREFERENCE
The following examples show how to use
org.supercsv.prefs.CsvPreference#TAB_PREFERENCE .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CitationsFileWriter.java From occurrence with Apache License 2.0 | 6 votes |
/** * Creates the dataset citation file using the the search query response. * * @param datasetUsages record count per dataset * @param citationFileName output file name * @param occDownloadService occurrence downlaod service * @param downloadKey download key */ public static void createCitationFile(Map<UUID, Long> datasetUsages, String citationFileName, OccurrenceDownloadService occDownloadService, String downloadKey) { if (datasetUsages != null && !datasetUsages.isEmpty()) { try (ICsvBeanWriter beanWriter = new CsvBeanWriter(new FileWriterWithEncoding(citationFileName, Charsets.UTF_8), CsvPreference.TAB_PREFERENCE)) { for (Entry<UUID, Long> entry : datasetUsages.entrySet()) { if (entry.getKey() != null) { beanWriter.write(new Facet.Count(entry.getKey().toString(), entry.getValue()), HEADER, PROCESSORS); } } beanWriter.flush(); persistUsages(occDownloadService, downloadKey, datasetUsages); } catch (IOException e) { LOG.error("Error creating citations file", e); throw Throwables.propagate(e); } } }
Example 2
Source File: SimpleCsvDownloadActor.java From occurrence with Apache License 2.0 | 5 votes |
/** * Executes the job.query and creates a data file that will contains the records from job.from to job.to positions. */ private void doWork(DownloadFileWork work) throws IOException { final DatasetUsagesCollector datasetUsagesCollector = new DatasetUsagesCollector(); try (ICsvMapWriter csvMapWriter = new CsvMapWriter(new FileWriterWithEncoding(work.getJobDataFileName(), StandardCharsets.UTF_8), CsvPreference.TAB_PREFERENCE)) { SearchQueryProcessor.processQuery(work, occurrence -> { try { Map<String, String> occurrenceRecordMap = buildInterpretedOccurrenceMap(occurrence, DownloadTerms.SIMPLE_DOWNLOAD_TERMS); populateVerbatimCsvFields(occurrenceRecordMap, occurrence); //collect usages datasetUsagesCollector.collectDatasetUsage(occurrenceRecordMap.get(GbifTerm.datasetKey.simpleName()), occurrenceRecordMap.get(DcTerm.license.simpleName())); //write results csvMapWriter.write(occurrenceRecordMap, COLUMNS); } catch (Exception e) { throw Throwables.propagate(e); } } ); } finally { // Release the lock work.getLock().unlock(); LOG.info("Lock released, job detail: {} ", work); } getSender().tell(new Result(work, datasetUsagesCollector.getDatasetUsages(), datasetUsagesCollector.getDatasetLicenses()), getSelf()); }
Example 3
Source File: DownloadDwcaActor.java From occurrence with Apache License 2.0 | 5 votes |
/** * Executes the job.query and creates a data file that will contains the records from job.from to job.to positions. */ public void doWork(DownloadFileWork work) throws IOException { DatasetUsagesCollector datasetUsagesCollector = new DatasetUsagesCollector(); try ( ICsvMapWriter intCsvWriter = new CsvMapWriter(new FileWriterWithEncoding(work.getJobDataFileName() + TableSuffixes.INTERPRETED_SUFFIX, Charsets.UTF_8), CsvPreference.TAB_PREFERENCE); ICsvMapWriter verbCsvWriter = new CsvMapWriter(new FileWriterWithEncoding(work.getJobDataFileName() + TableSuffixes.VERBATIM_SUFFIX, Charsets.UTF_8), CsvPreference.TAB_PREFERENCE); ICsvBeanWriter multimediaCsvWriter = new CsvBeanWriter(new FileWriterWithEncoding(work.getJobDataFileName() + TableSuffixes.MULTIMEDIA_SUFFIX, Charsets.UTF_8), CsvPreference.TAB_PREFERENCE)) { SearchQueryProcessor.processQuery(work, occurrence -> { try { // Writes the occurrence record obtained from Elasticsearch as Map<String,Object>. if (occurrence != null) { datasetUsagesCollector.incrementDatasetUsage(occurrence.getDatasetKey().toString()); intCsvWriter.write(OccurrenceMapReader.buildInterpretedOccurrenceMap(occurrence), INT_COLUMNS); verbCsvWriter.write(OccurrenceMapReader.buildVerbatimOccurrenceMap(occurrence), VERB_COLUMNS); writeMediaObjects(multimediaCsvWriter, occurrence); } } catch (Exception e) { throw Throwables.propagate(e); } }); } finally { // Unlock the assigned lock. work.getLock().unlock(); LOG.info("Lock released, job detail: {} ", work); } getSender().tell(new Result(work, datasetUsagesCollector.getDatasetUsages()), getSelf()); }
Example 4
Source File: TSVExporter.java From robe with GNU Lesser General Public License v3.0 | 4 votes |
public TSVExporter(Class dataClass) { super(dataClass, CsvPreference.TAB_PREFERENCE); }
Example 5
Source File: TSVImporter.java From robe with GNU Lesser General Public License v3.0 | 4 votes |
public TSVImporter(Class dataClass) { super(dataClass, CsvPreference.TAB_PREFERENCE); }
Example 6
Source File: Serialization.java From joinery with GNU General Public License v3.0 | 4 votes |
public static DataFrame<Object> readCsv(final InputStream input, String separator, NumberDefault numDefault, String naString, boolean hasHeader) throws IOException { CsvPreference csvPreference; switch (separator) { case "\\t": csvPreference = CsvPreference.TAB_PREFERENCE; break; case ",": csvPreference = CsvPreference.STANDARD_PREFERENCE; break; case ";": csvPreference = CsvPreference.EXCEL_NORTH_EUROPE_PREFERENCE; break; case "|": csvPreference = new CsvPreference.Builder('"', '|', "\n").build(); break; default: throw new IllegalArgumentException("Separator: " + separator + " is not currently supported"); } try (CsvListReader reader = new CsvListReader(new InputStreamReader(input), csvPreference)) { final List<String> header; final DataFrame<Object> df; final CellProcessor[] procs; if(hasHeader) { header = Arrays.asList(reader.getHeader(true)); procs = new CellProcessor[header.size()]; df = new DataFrame<>(header); } else { // Read the first row to figure out how many columns we have reader.read(); header = new ArrayList<String>(); for (int i = 0; i < reader.length(); i++) { header.add("V"+i); } procs = new CellProcessor[header.size()]; df = new DataFrame<>(header); // The following line executes the procs on the previously read row again df.append(reader.executeProcessors(procs)); } for (List<Object> row = reader.read(procs); row != null; row = reader.read(procs)) { df.append(new ArrayList<>(row)); } return df.convert(numDefault, naString); } }