Java Code Examples for com.opencsv.CSVWriter#writeNext()
The following examples show how to use
com.opencsv.CSVWriter#writeNext() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MatchServiceTest.java From fuzzy-matcher with Apache License 2.0 | 7 votes |
public static void writeOutput(Set<Set<Match<Document>>> result) throws IOException { CSVWriter writer = new CSVWriter(new FileWriter("src/test/resources/output.csv")); writer.writeNext(new String[]{"Key", "Matched Key", "Score", "Name", "Address", "Email", "Phone"}); result.forEach(matches -> { String[] arr = {"Group"}; writer.writeNext(arr); matches.stream().forEach(match -> { Document md = match.getMatchedWith(); String[] matchArrs = Stream.concat(Stream.of("", md.getKey(), Double.toString(match.getResult())), getOrderedElements(md.getElements()).map(e -> e.getValue())).toArray(String[]::new); writer.writeNext(matchArrs); }); }); writer.close(); }
Example 2
Source File: LoadIO.java From hbase-tools with Apache License 2.0 | 6 votes |
private void write(CSVWriter writer, Entry entry, Level level, LoadRecord loadRecord) { String[] record = new String[RegionLoadAdapter.loadEntries.length + 2]; if (entry == Entry.header) { record[0] = load.getLevelClass().getLevelTypeString(); record[1] = HEADER_TIMESTAMP; } else { record[0] = level.toString(); record[1] = String.valueOf(load.getTimestampIteration()); } for (LoadEntry loadEntry : RegionLoadAdapter.loadEntries) { if (entry == Entry.header) { record[RegionLoadAdapter.loadEntryOrdinal(loadEntry) + 2] = loadEntry.name(); } else { Number number = loadRecord.get(loadEntry); record[RegionLoadAdapter.loadEntryOrdinal(loadEntry) + 2] = number == null ? "" : number.toString(); } } writer.writeNext(record); }
Example 3
Source File: Csv.java From http-builder-ng with Apache License 2.0 | 6 votes |
/** * Used to encode the request content using the OpenCsv writer. * * @param config the configuration * @param ts the server request content accessor */ public static void encode(final ChainedHttpConfig config, final ToServer ts) { if (handleRawUpload(config, ts)) { return; } final ChainedHttpConfig.ChainedRequest request = config.getChainedRequest(); final Csv.Context ctx = (Csv.Context) config.actualContext(request.actualContentType(), Csv.Context.ID); final Object body = checkNull(request.actualBody()); checkTypes(body, new Class[]{Iterable.class}); final StringWriter writer = new StringWriter(); final CSVWriter csvWriter = ctx.makeWriter(new StringWriter()); Iterable<?> iterable = (Iterable<?>) body; for (Object o : iterable) { csvWriter.writeNext((String[]) o); } ts.toServer(stringToStream(writer.toString(), request.actualCharset())); }
Example 4
Source File: GtfsTools.java From pt2matsim with GNU General Public License v2.0 | 6 votes |
/** * Experimental class to write transfers.txt (i.e. after creating additional walk transfer) */ public static void writeTransfers(Collection<Transfer> transfers, String path) throws IOException { CSVWriter transfersWiter = new CSVWriter(new FileWriter(path + GtfsDefinitions.Files.TRANSFERS.fileName), ','); String[] columns = GtfsDefinitions.Files.TRANSFERS.columns; String[] optionalColumns = GtfsDefinitions.Files.TRANSFERS.optionalColumns; String[] header = Stream.concat(Arrays.stream(columns), Arrays.stream(optionalColumns)).toArray(String[]::new); transfersWiter.writeNext(header, true); for(Transfer transfer : transfers) { // FROM_STOP_ID, TO_STOP_ID, TRANSFER_TYPE, (MIN_TRANSFER_TIME) String[] line = new String[header.length]; line[0] = transfer.getFromStopId(); line[1] = transfer.getToStopId(); line[2] = String.valueOf(transfer.getTransferType().index); String minTransferTime = (transfer.getMinTransferTime() != null ? transfer.getMinTransferTime().toString() : ""); line[3] = minTransferTime; transfersWiter.writeNext(line); } transfersWiter.close(); }
Example 5
Source File: LoadIO.java From hbase-tools with Apache License 2.0 | 6 votes |
private void write(CSVWriter writer, Entry entry, Level level, LoadRecord loadRecord) { String[] record = new String[RegionLoadAdapter.loadEntries.length + 2]; if (entry == Entry.header) { record[0] = load.getLevelClass().getLevelTypeString(); record[1] = HEADER_TIMESTAMP; } else { record[0] = level.toString(); record[1] = String.valueOf(load.getTimestampIteration()); } for (LoadEntry loadEntry : RegionLoadAdapter.loadEntries) { if (entry == Entry.header) { record[RegionLoadAdapter.loadEntryOrdinal(loadEntry) + 2] = loadEntry.name(); } else { Number number = loadRecord.get(loadEntry); record[RegionLoadAdapter.loadEntryOrdinal(loadEntry) + 2] = number == null ? "" : number.toString(); } } writer.writeNext(record); }
Example 6
Source File: GtfsTools.java From pt2matsim with GNU General Public License v2.0 | 6 votes |
/** * Experimental class to write stop_times.txt from a (filtered) collection of trips. stop_times.txt is * usually the largest file. */ public static void writeStopTimes(Collection<Trip> trips, String folder) throws IOException { CSVWriter stopTimesWriter = new CSVWriter(new FileWriter(folder + GtfsDefinitions.Files.STOP_TIMES.fileName), ','); String[] header = GtfsDefinitions.Files.STOP_TIMES.columns; stopTimesWriter.writeNext(header, true); for(Trip trip : trips) { for(StopTime stopTime : trip.getStopTimes()) { // TRIP_ID, STOP_SEQUENCE, ARRIVAL_TIME, DEPARTURE_TIME, STOP_ID String[] line = new String[header.length]; line[0] = stopTime.getTrip().getId(); line[1] = String.valueOf(stopTime.getSequencePosition()); line[2] = Time.writeTime(stopTime.getArrivalTime()); line[3] = Time.writeTime(stopTime.getDepartureTime()); line[4] = stopTime.getStop().getId(); stopTimesWriter.writeNext(line); } } stopTimesWriter.close(); }
Example 7
Source File: AddressServiceImpl.java From axelor-open-suite with GNU Affero General Public License v3.0 | 5 votes |
@Override public int export(String path) throws IOException { List<Address> addresses = addressRepo.all().filter("self.certifiedOk IS FALSE").fetch(); CSVWriter csv = new CSVWriter(new java.io.FileWriter(path), "|".charAt(0), CSVWriter.NO_QUOTE_CHARACTER); List<String> header = new ArrayList<>(); header.add("Id"); header.add("AddressL1"); header.add("AddressL2"); header.add("AddressL3"); header.add("AddressL4"); header.add("AddressL5"); header.add("AddressL6"); header.add("CodeINSEE"); csv.writeNext(header.toArray(new String[header.size()])); List<String> items = new ArrayList<>(); for (Address a : addresses) { items.add(a.getId() != null ? a.getId().toString() : ""); items.add(a.getAddressL2() != null ? a.getAddressL2() : ""); items.add(a.getAddressL3() != null ? a.getAddressL3() : ""); items.add(a.getAddressL4() != null ? a.getAddressL4() : ""); items.add(a.getAddressL5() != null ? a.getAddressL5() : ""); items.add(a.getAddressL6() != null ? a.getAddressL6() : ""); items.add(a.getInseeCode() != null ? a.getInseeCode() : ""); csv.writeNext(items.toArray(new String[items.size()])); items.clear(); } csv.close(); LOG.info("{} exported", path); return addresses.size(); }
Example 8
Source File: TraitFileClean.java From systemsgenetics with GNU General Public License v3.0 | 5 votes |
/** * @param args the command line arguments * @throws java.io.FileNotFoundException */ // MAKE TRAIT FILE static void trait(List<String> iids, File traitFile) throws FileNotFoundException, IOException { CSVWriter writer = new CSVWriter(new FileWriter(traitFile), '\t', '\0', '\0', "\n"); String[] outLine = new String[iids.size() + 1]; int c = 0; outLine[c++] = "PHENO"; for (String iid : iids) { outLine[c++] = iid; } writer.writeNext(outLine); Random randomno1 = new Random(); for (int j = 1; j < 1001; ++j) { c = 0; outLine[c++] = "PH" + j; for (int i = 0; i < iids.size(); ++i) { outLine[c++] = String.valueOf(randomno1.nextGaussian()); } writer.writeNext(outLine); } writer.close(); }
Example 9
Source File: RecordingRESTDataProvider.java From quandl4j with Apache License 2.0 | 5 votes |
private void initWriter(final File rootPath) { _rootPath = rootPath; try { File file = new File(_rootPath, INDEX_FILE_NAME); _writer = new CSVWriter(new FileWriter(file)); _writer.writeNext(new String[] { RESTReponseIndexColumns.URI.getColumnLabel(), RESTReponseIndexColumns.FILE.getColumnLabel(), RESTReponseIndexColumns.EXCEPTION_CLASS.getColumnLabel(), RESTReponseIndexColumns.EXCEPTION_MESSAGE.getColumnLabel() }); } catch (IOException ioe) { throw new RuntimeException(ioe); } }
Example 10
Source File: FileUtils.java From tutorials with MIT License | 5 votes |
public void writeLine(Line line) { try { if (CSVWriter == null) initWriter(); String[] lineStr = new String[2]; lineStr[0] = line.getName(); lineStr[1] = line .getAge() .toString(); CSVWriter.writeNext(lineStr); } catch (Exception e) { logger.error("Error while writing line in file: " + this.fileName); } }
Example 11
Source File: CsvWriterExamples.java From tutorials with MIT License | 5 votes |
public static String csvWriterOneByOne(List<String[]> stringArray, Path path) { try { CSVWriter writer = new CSVWriter(new FileWriter(path.toString())); for (String[] array : stringArray) { writer.writeNext(array); } writer.close(); } catch (Exception ex) { Helpers.err(ex); } return Helpers.readFile(path); }
Example 12
Source File: PermissionAssistantService.java From axelor-open-suite with GNU Affero General Public License v3.0 | 5 votes |
private void writeGroup(CSVWriter csvWriter, PermissionAssistant assistant) { String[] groupRow = null; Integer count = header.size(); ResourceBundle bundle = I18n.getBundle(new Locale(assistant.getLanguage())); List<String> headerRow = new ArrayList<String>(); headerRow.addAll(getTranslatedStrings(header, bundle)); if (assistant.getTypeSelect() == PermissionAssistantRepository.TYPE_GROUPS) { groupRow = new String[header.size() + (assistant.getGroupSet().size() * groupHeader.size())]; for (Group group : assistant.getGroupSet()) { groupRow[count + 1] = group.getCode(); headerRow.addAll(getTranslatedStrings(groupHeader, bundle)); count += groupHeader.size(); } } else if (assistant.getTypeSelect() == PermissionAssistantRepository.TYPE_ROLES) { groupRow = new String[header.size() + (assistant.getRoleSet().size() * groupHeader.size())]; for (Role role : assistant.getRoleSet()) { groupRow[count + 1] = role.getName(); headerRow.addAll(getTranslatedStrings(groupHeader, bundle)); count += groupHeader.size(); } } LOG.debug("Header row created: {}", headerRow); csvWriter.writeNext(groupRow); csvWriter.writeNext(headerRow.toArray(groupRow)); writeObject(csvWriter, assistant, groupRow.length, bundle); }
Example 13
Source File: HostDirMapDao.java From burp_data_collector with Apache License 2.0 | 5 votes |
public void exportDir(String dirName, int dirCount) throws SQLException, IOException { String sql = "SELECT stat.dir, sum(dirCount) AS allCount\n" + "FROM ((SELECT hdm.dir, count(*) AS dirCount FROM host_dir_map hdm GROUP BY hdm.dir)\n" + " UNION ALL\n" + " (SELECT dir, count AS dirCount FROM dir)) stat\n" + "GROUP BY stat.dir\n" + "HAVING allCount >= ?\n" + "ORDER BY allCount DESC"; PreparedStatement preparedStatement = getPreparedStatement(sql); preparedStatement.setInt(1, dirCount); ResultSet resultSet = preparedStatement.executeQuery(); File dirFile = new File(dirName + DIR_FILE); File dirImportFile = new File(dirName + DIR_IMPORT_FILE); FileOutputStream dirFileOutputStream = new FileOutputStream(dirFile); FileWriter fileWriter = new FileWriter(dirImportFile); CSVWriter csvWriter = new CSVWriter(fileWriter); String[] fileHead = new String[]{"dir", "count"}; csvWriter.writeNext(fileHead); while (resultSet.next()) { String dir = resultSet.getString(1); String row = dir + "\n"; int count = resultSet.getInt(2); dirFileOutputStream.write(row.getBytes()); csvWriter.writeNext(new String[]{dir, String.valueOf(count)}, true); } dirFileOutputStream.close(); csvWriter.close(); }
Example 14
Source File: HostPathMapDao.java From burp_data_collector with Apache License 2.0 | 5 votes |
public void exportPath(String dirName, int pathCount) throws SQLException, IOException { String sql = "SELECT stat.path, sum(pathCount) AS allCount\n" + "FROM ((SELECT hpm.path, count(*) AS pathCount FROM host_path_map hpm GROUP BY hpm.path)\n" + " UNION ALL\n" + " (SELECT path, count AS pathCount FROM path)) stat\n" + "GROUP BY stat.path\n" + "HAVING allCount >= ?\n" + "ORDER BY allCount DESC"; PreparedStatement preparedStatement = getPreparedStatement(sql); preparedStatement.setInt(1, pathCount); ResultSet resultSet = preparedStatement.executeQuery(); File pathFile = new File(dirName + PATH_FILE); File pathImportFile = new File(dirName + PATH_IMPORT_FILE); FileOutputStream pathOutputStream = new FileOutputStream(pathFile); FileWriter fileWriter = new FileWriter(pathImportFile); CSVWriter csvWriter = new CSVWriter(fileWriter); String[] fileHead = {"path", "count"}; csvWriter.writeNext(fileHead); while (resultSet.next()) { String path = resultSet.getString(1); String row = path + "\n"; int count = resultSet.getInt(2); pathOutputStream.write(row.getBytes()); csvWriter.writeNext(new String[]{path, String.valueOf(count)}, true); } pathOutputStream.close(); csvWriter.close(); }
Example 15
Source File: TraitFileClean.java From systemsgenetics with GNU General Public License v3.0 | 5 votes |
static void coupling(List<String> iids, File couplingFile) throws IOException { CSVWriter writer2 = new CSVWriter(new FileWriter(couplingFile), '\t', '\0', '\0', "\n"); String[] output2Line = new String[2]; for (String iid : iids) { int c = 0; output2Line[c++] = iid; output2Line[c++] = iid; writer2.writeNext(output2Line); } writer2.close(); }
Example 16
Source File: HostParameterMapDao.java From burp_data_collector with Apache License 2.0 | 5 votes |
public void exportParameter(String dirName, int parameterCount) throws SQLException, IOException { String sql = "SELECT stat.parameter, sum(parameterPathCount) AS allCount\n" + "FROM ((SELECT hpm.parameter, count(*) AS parameterPathCount FROM host_parameter_map hpm GROUP BY hpm.parameter)\n" + " UNION ALL\n" + " (SELECT parameter, count AS parameterPathCount FROM parameter)) stat\n" + "GROUP BY stat.parameter\n" + "HAVING allCount >= ?\n" + "ORDER BY allCount DESC"; PreparedStatement preparedStatement = getPreparedStatement(sql); preparedStatement.setInt(1, parameterCount); ResultSet resultSet = preparedStatement.executeQuery(); File parameterFile = new File(dirName + PARAMETER_FILE); File parameterImportFile = new File(dirName + PARAMETER_IMPORT_FILE); FileOutputStream parameterOutputStream = new FileOutputStream(parameterFile); FileWriter fileWriter = new FileWriter(parameterImportFile); CSVWriter csvWriter = new CSVWriter(fileWriter); String[] fileHead = new String[]{"parameter", "count"}; csvWriter.writeNext(fileHead); while (resultSet.next()) { String parameter = resultSet.getString(1); String row = parameter + "\n"; int count = resultSet.getInt(2); parameterOutputStream.write(row.getBytes()); csvWriter.writeNext(new String[]{parameter, String.valueOf(count)}, true); } parameterOutputStream.close(); csvWriter.close(); }
Example 17
Source File: ImproveHpoPredictionBasedOnChildTerms.java From systemsgenetics with GNU General Public License v3.0 | 4 votes |
/** * @param args the command line arguments * @throws java.lang.Exception */ public static void main(String[] args) throws Exception { final File predictionMatrixFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\predictions\\hpo_predictions.txt.gz"); final File annotationMatrixFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\PathwayMatrix\\ALL_SOURCES_ALL_FREQUENCIES_phenotype_to_genes.txt_matrix.txt.gz"); final File predictedHpoTermFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\predictions\\hpo_predictions_auc_bonferroni.txt"); // final File predictionMatrixFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\predictions\\hpo_predictions_testSet.txt"); // final File annotationMatrixFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\PathwayMatrix\\hpo_annotation_testSet.txt"); // final File predictedHpoTermFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\predictions\\hpo_predictions_testSet_auc_bonferroni.txt"); final File hpoOboFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\HPO\\135\\hp.obo"); final File ouputLogFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\predictions\\hpo_predictions_improved.log"); final File updatedPredictionMatrixFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\predictions\\hpo_predictions_improved.txt.gz"); LinkedHashSet<String> predictedHpoTerms = readPredictedHpoTermFile(predictedHpoTermFile); DoubleMatrixDataset<String, String> predictionMatrixFull = DoubleMatrixDataset.loadDoubleData(predictionMatrixFile.getAbsolutePath()); DoubleMatrixDataset<String, String> annotationMatrixFull = DoubleMatrixDataset.loadDoubleData(annotationMatrixFile.getAbsolutePath()); DoubleMatrixDataset<String, String> predictionMatrixPredicted = predictionMatrixFull.viewColSelection(predictedHpoTerms); DoubleMatrixDataset<String, String> annotationMatrixPredicted = annotationMatrixFull.viewColSelection(predictedHpoTerms); Ontology hpoOntology = HpoFinder.loadHpoOntology(hpoOboFile); ImproveHpoPredictionBasedOnChildTerms improver = new ImproveHpoPredictionBasedOnChildTerms(predictionMatrixPredicted, annotationMatrixPredicted, hpoOntology); HashMap<String, UpdatedPredictionInfo> checkedHpoInfo = improver.run(); System.out.println("Done with improving"); CSVWriter writer = new CSVWriter(new FileWriter(ouputLogFile), '\t', '\0', '\0', "\n"); String[] outputLine = new String[11]; int c = 0; outputLine[c++] = "HPO"; outputLine[c++] = "Gene_count"; outputLine[c++] = "Origanl_AUC"; outputLine[c++] = "Orignal_Pvalue"; outputLine[c++] = "Updated_AUC"; outputLine[c++] = "Updated_Pvalue"; outputLine[c++] = "Is_significant"; outputLine[c++] = "Distance_to_top"; outputLine[c++] = "Number_of_child_terms"; outputLine[c++] = "Number_of_child_terms_used"; outputLine[c++] = "Child_terms_used"; writer.writeNext(outputLine); for (UpdatedPredictionInfo pi : checkedHpoInfo.values()) { c = 0; outputLine[c++] = pi.getHpo(); outputLine[c++] = String.valueOf(pi.getGeneCount()); outputLine[c++] = String.valueOf(pi.getOriginalAuc()); outputLine[c++] = String.valueOf(pi.getOriginalPvalue()); outputLine[c++] = String.valueOf(pi.getUpdatedAuc()); outputLine[c++] = String.valueOf(pi.getUpdatedPvalue()); outputLine[c++] = String.valueOf(pi.isIsSignificant()); outputLine[c++] = "-"; outputLine[c++] = String.valueOf(pi.getChildTermCount()); outputLine[c++] = String.valueOf(pi.getUsedChildTerms().size()); outputLine[c++] = String.join(";", pi.getUsedChildTerms()); writer.writeNext(outputLine); } writer.close(); improver.writeUpdatedMatrix(updatedPredictionMatrixFile); }
Example 18
Source File: HpoFinder.java From systemsgenetics with GNU General Public License v3.0 | 4 votes |
/** * @param args the command line arguments */ public static void main(String[] args) throws IOException, ParseException { final File hpoOboFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\HPO\\135\\hp.obo"); final File hpoPredictionInfoFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\Data31995Genes05-12-2017\\PCA_01_02_2018\\predictions\\hpo_predictions_auc_bonferroni.txt"); final File queryFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\BenchmarkSamples\\originalHpo.txt"); final File outputFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\BenchmarkSamples\\selectedHpo.txt"); final double correctedPCutoff = 0.05; Map<String, PredictionInfo> predictionInfo = HpoFinder.loadPredictionInfo(hpoPredictionInfoFile); Ontology hpoOntology = HpoFinder.loadHpoOntology(hpoOboFile); HpoFinder hpoFinder = new HpoFinder(hpoOntology, predictionInfo); CSVWriter writer = new CSVWriter(new FileWriter(outputFile), '\t', '\0', '\0', "\n"); int c = 0; String[] outputLine = new String[7]; outputLine[c++] = "originalHPO"; outputLine[c++] = "originalDescription"; outputLine[c++] = "matchHPO"; outputLine[c++] = "matchDescription"; outputLine[c++] = "matchPvalue"; outputLine[c++] = "matchauc"; outputLine[c++] = "multiMatch"; writer.writeNext(outputLine); BufferedReader queryReader = new BufferedReader(new FileReader(queryFile)); String queryHpo; while ((queryHpo = queryReader.readLine()) != null) { if (hpoOntology.containsTerm(queryHpo)) { Term queryHpoTerm = hpoOntology.getTerm(queryHpo); List<Term> alternativeTerms = hpoFinder.getPredictableTerms(queryHpoTerm, correctedPCutoff); for (Term alternativeTerm : alternativeTerms) { PredictionInfo info = predictionInfo.get(alternativeTerm.getName()); c = 0; outputLine[c++] = queryHpo; outputLine[c++] = queryHpoTerm.getDescription(); outputLine[c++] = alternativeTerm.getName(); outputLine[c++] = alternativeTerm.getDescription(); outputLine[c++] = String.valueOf(info.getpValue()); outputLine[c++] = String.valueOf(info.getAuc()); outputLine[c++] = alternativeTerms.size() > 1 ? "x" : "-"; writer.writeNext(outputLine); //System.out.println(alternativeTerm.getName() + " P-value: " + info.getpValue() + " AUC: " + info.getAuc() + " " + alternativeTerm.getDescription()); } if (alternativeTerms.isEmpty()) { c = 0; outputLine[c++] = queryHpo; outputLine[c++] = queryHpoTerm.getDescription(); outputLine[c++] = "NA"; outputLine[c++] = "NA"; outputLine[c++] = "NA"; outputLine[c++] = "NA"; outputLine[c++] = "NA"; writer.writeNext(outputLine); } } else { c = 0; outputLine[c++] = queryHpo; outputLine[c++] = "NA"; outputLine[c++] = "NA"; outputLine[c++] = "NA"; outputLine[c++] = "NA"; outputLine[c++] = "NA"; outputLine[c++] = "NA"; writer.writeNext(outputLine); } } writer.close(); }
Example 19
Source File: FilterPrioBasedOnMutatedGenes2.java From systemsgenetics with GNU General Public License v3.0 | 4 votes |
/** * @param args the command line arguments */ public static void main(String[] args) throws FileNotFoundException, IOException { // final File sampleFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\BenchmarkSamples\\Prioritisations\\samplesWithGeno.txt"); // final File genoFolder = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\BenchmarkSamples\\Prioritisations\\gavinRes\\"); // final File prioFolder = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\BenchmarkSamples\\Prioritisations"); // final File resultFolder = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\BenchmarkSamples\\Prioritisations\\rankingCandidateGenes"); // final File sampleFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\PrioritizeRequests\\Prioritisations\\samples.txt"); // final File genoFolder = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\PrioritizeRequests\\CandidateGenes\\"); // final File prioFolder = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\PrioritizeRequests\\Prioritisations"); // final File resultFolder = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\PrioritizeRequests\\rankingCandidateGenes"); final File sampleFile = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\BenchmarkSamples\\New5gpm\\hpo5gpm.txt"); final File genoFolder = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\BenchmarkSamples\\New5gpm\\Genes\\"); final File prioFolder = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\BenchmarkSamples\\New5gpm\\Prioritisations\\"); final File resultFolder = new File("C:\\UMCG\\Genetica\\Projects\\GeneNetwork\\BenchmarkSamples\\New5gpm\\RankingCandidateGenes\\"); final CSVParser parser = new CSVParserBuilder().withSeparator('\t').withIgnoreQuotations(true).build(); final CSVReader sampleFileReader = new CSVReaderBuilder(new BufferedReader(new FileReader(sampleFile))).withSkipLines(0).withCSVParser(parser).build(); String[] nextLine; while ((nextLine = sampleFileReader.readNext()) != null) { String sample = nextLine[0]; String genoSampleName = sample + ".txt"; File genoFile = new File(genoFolder, genoSampleName); File prioFile = new File(prioFolder, sample + ".txt"); File rankingFile = new File(resultFolder, sample + ".txt"); System.out.println("------------------------------------------------------------------"); System.out.println("Sample: " + sample); System.out.println("Geno: " + genoFile.getAbsolutePath()); System.out.println("Prio: " + prioFile.getAbsolutePath()); System.out.println("Ranking: " + rankingFile.getAbsolutePath()); HashSet<String> genesWithMutation = getMutatedGenes(genoFile, 0, 0); final CSVReader prioFileReader = new CSVReaderBuilder(new BufferedReader(new FileReader(prioFile))).withSkipLines(0).withCSVParser(parser).build(); CSVWriter writer = new CSVWriter(new FileWriter(rankingFile), '\t', '\0', '\0', "\n"); String[] outputLine = prioFileReader.readNext(); writer.writeNext(outputLine); while ((outputLine = prioFileReader.readNext()) != null) { if (genesWithMutation.contains(outputLine[1])) { writer.writeNext(outputLine); } } writer.close(); prioFileReader.close(); } }
Example 20
Source File: DataBackupCreateService.java From axelor-open-suite with GNU Affero General Public License v3.0 | 4 votes |
private CSVInput writeCSVData( MetaModel metaModel, CSVWriter csvWriter, DataBackup dataBackup, long totalRecord, List<String> subClasses, String dirPath) { CSVInput csvInput = new CSVInput(); boolean headerFlag = true; List<String> dataArr = null; List<String> headerArr = new ArrayList<>(); List<Model> dataList = null; try { Mapper metaModelMapper = Mapper.of(Class.forName(metaModel.getFullName())); Property[] pro = metaModelMapper.getProperties(); Integer fetchLimit = dataBackup.getFetchLimit(); boolean isRelativeDate = dataBackup.getIsRelativeDate(); boolean updateImportId = dataBackup.getUpdateImportId(); csvInput.setFileName(metaModel.getName() + ".csv"); csvInput.setTypeName(metaModel.getFullName()); csvInput.setBindings(new ArrayList<>()); for (int i = 0; i < totalRecord; i = i + fetchLimit) { dataList = getMetaModelDataList(metaModel, i, fetchLimit, subClasses); if (dataList != null && dataList.size() > 0) { for (Object dataObject : dataList) { dataArr = new ArrayList<>(); for (Property property : pro) { if (isPropertyExportable(property)) { if (headerFlag) { String headerStr = getMetaModelHeader(dataObject, property, csvInput, isRelativeDate); headerArr.add(headerStr); } dataArr.add( getMetaModelData( metaModel.getName(), metaModelMapper, property, dataObject, dirPath, isRelativeDate, updateImportId)); } } if (headerFlag) { if (byteArrFieldFlag) { csvInput.setCallable( "com.axelor.apps.base.service.app.DataBackupRestoreService:importObjectWithByteArray"); byteArrFieldFlag = false; } csvWriter.writeNext(headerArr.toArray(new String[headerArr.size()]), true); headerFlag = false; } csvWriter.writeNext(dataArr.toArray(new String[dataArr.size()]), true); } } } if (AutoImportModelMap.containsKey(csvInput.getTypeName())) { csvInput.setSearch(AutoImportModelMap.get(csvInput.getTypeName()).toString()); } else if (Class.forName(metaModel.getFullName()).getSuperclass() == App.class) { csvInput.setSearch("self.code = :code"); } } catch (ClassNotFoundException e) { } return csvInput; }