java.io.FileFilter Scala Examples
The following examples show how to use java.io.FileFilter.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: StandardizationQueryV1.scala From daf-semantics with Apache License 2.0 | 5 votes |
package it.almawave.daf.standardization.v1 import com.typesafe.config.Config import java.nio.file.Paths import java.nio.file.Files import it.almawave.linkeddata.kb.catalog.VocabularyBox import java.io.FileFilter import java.io.File import java.nio.file.Path import org.slf4j.LoggerFactory def details(voc_box: VocabularyBox, level: Int, uri: String, lang: String) = { val onto_id = detect_ontology(voc_box) val query_path: Path = detailsQueryFile(onto_id) .map(_.toPath()) .getOrElse(default_query_details) // disabled for too many logs! logger.debug(s"daf.standardization> try ${voc_box.id} with details query: ${query_path}") val query = new String(Files.readAllBytes(query_path)) query .replace("${vocabularyID}", voc_box.id) .replace("${level}", level.toString()) .replace("${uri}", uri) .replace("${lang}", lang) } }
Example 2
Source File: File.scala From nescala with GNU General Public License v2.0 | 5 votes |
package com.owlandrews.nescala.helpers import com.owlandrews.nescala.Console object File { import java.io.File import java.net.URL import java.io.{FileFilter, FileInputStream, FileOutputStream, ObjectInputStream, ObjectOutputStream} import javax.imageio.ImageIO import scala.util.Try import scala.xml.XML import scala.language.postfixOps import sys.process._ import com.typesafe.config.ConfigFactory def Download(url: String, filename: String) = (for{ url <- Try(new URL(url)) conn <- Try(url.openConnection().connect()) file <- Try(new File(filename)) } yield Try(url #> file !!)) map {x => new File(filename)} def Writer(filename: String)(op: java.io.PrintWriter => Unit) = { val p = new java.io.PrintWriter(new File(filename)) try op(p) finally p.close() } def Write(filename: String, content: String) = { val res = new java.io.PrintWriter(new File(filename)) res.write(content) res.close() } def Filter = new FileFilter { override def accept(pathname: File): Boolean = pathname.getName.toLowerCase.endsWith(".nes") } def Image(file:Try[File]) = file.map(ImageIO.read) def Image(filename:String) = Try(ImageIO.read(resource(filename))) def Xml(filename:String) = XML.load(resource("/database.xml")) def Config(filename:String) = { val file = new File(filename) file.exists() match { case true => ConfigFactory.parseFile(file) case false => ConfigFactory.empty() } } def SaveState(console:Console) = { val fos = new FileOutputStream(s"$ApplicationFolder/${console.cartridge.CRC}.save") val oos = new ObjectOutputStream(fos) oos.writeObject(console) oos.close() } def LoadState(crc:String):Try[Console] = Try { val fis = new FileInputStream(s"$ApplicationFolder/$crc.save") val ois = new ObjectInputStreamWithCustomClassLoader(fis) val console = ois.readObject.asInstanceOf[Console] ois.close() console } // Taken from: https://gist.github.com/ramn/5566596 private class ObjectInputStreamWithCustomClassLoader(fileInputStream: FileInputStream) extends ObjectInputStream(fileInputStream) { override def resolveClass(desc: java.io.ObjectStreamClass): Class[_] = { try { Class.forName(desc.getName, false, getClass.getClassLoader) } catch { case ex: ClassNotFoundException => super.resolveClass(desc) } } } lazy val ApplicationFolder: File = { val settingDirectory = System.getProperty("user.home") + "/.nescala" val settings = new java.io.File(settingDirectory) if (!settings.exists()) settings.mkdir() settings } private def resource(filename:String) = getClass.getResourceAsStream(filename) }
Example 3
Source File: History.scala From sbt-flaky with Apache License 2.0 | 5 votes |
package flaky.history import java.io.{File, FileFilter, InputStream} import java.text.SimpleDateFormat import java.util.Date import flaky.{Flaky, FlakyTestReport, Io} import org.apache.commons.vfs2.VFS import scala.xml.XML class History(project: String, historyDir: File, flakyReportDir: File, projectDir: File) { private val zipFileFilter = new FileFilter { override def accept(pathname: File): Boolean = pathname.getName.endsWith(".zip") } private def runFiles(historyDir: File): List[File] = historyDir.listFiles(zipFileFilter).toList def addCurrentToHistory(): Unit = { val timestamp = System.currentTimeMillis() val date = new SimpleDateFormat(History.dateFormat).format(new Date(timestamp)) val gitCommit = Git(projectDir).currentId().toOption val historyReportDescription = HistoryReportDescription(timestamp, gitCommit) HistoryReportDescription.save(historyReportDescription, new File(flakyReportDir, History.descriptorFile)) Zip.compressFolder(new File(historyDir, s"$date.zip"), flakyReportDir) } def removeToOldFromHistory(maxToKeep: Int): Unit = { runFiles(historyDir) .take(Math.max(runFiles(historyDir).size - maxToKeep, 0)) .foreach(_.delete()) } def createHistoryReport(): HistoryReport = { val historicalRuns: List[HistoricalRun] = runFiles(historyDir) .map(History.loadHistory) val date = new SimpleDateFormat("HH:mm dd-MM-YYYY").format(new Date()) HistoryReport(project, date, historicalRuns) } def processHistory(): HistoryReport = { historyDir.mkdirs() addCurrentToHistory() removeToOldFromHistory(20) createHistoryReport() } } case class HistoryReportDescription(timestamp: Long, gitCommitHash: Option[String]) object HistoryReportDescription { def load(in: InputStream): HistoryReportDescription = { val descriptorXml = XML.load(in) val timestamp = (descriptorXml \ "timestamp").text.trim.toLong val gitHash = (descriptorXml \ "gitCommitHash").text.trim HistoryReportDescription(timestamp, Some(gitHash)) } def save(historyReportDescription: HistoryReportDescription, file: File): Unit = { val xml = <HistoryReportDescription> <timestamp> {historyReportDescription.timestamp} </timestamp> <gitCommitHash> {historyReportDescription.gitCommitHash.getOrElse("")} </gitCommitHash> </HistoryReportDescription> val prettyXml = new scala.xml.PrettyPrinter(80, 2).format(xml) Io.writeToFile(file, prettyXml) } } object History { val descriptorFile = "descriptor.xml" val dateFormat = "yyyyMMdd-HHmmss" def loadHistory: (File) => HistoricalRun = { file => { val manager = VFS.getManager val uri = file.toURI.toString.replace("file:/", "zip:/") val fo = manager.resolveFile(uri) val report: FlakyTestReport = Flaky.createReportFromHistory(fo) val descriptorFile = Option(fo.getChild(History.descriptorFile)) val dateFromFileName = file.getName.replace(".zip","") val hrd = descriptorFile .filter(_.exists()) .map(f => HistoryReportDescription.load(f.getContent.getInputStream)) .getOrElse(HistoryReportDescription(new SimpleDateFormat(dateFormat).parse(dateFromFileName).getTime, None)) HistoricalRun(hrd, report) } } }
Example 4
Source File: FileUtils.scala From sctags with Apache License 2.0 | 5 votes |
package sctags import java.io.{File, FileFilter} import java.util.regex.Pattern import scala.collection.mutable.ArrayBuffer import scala.language.implicitConversions object FileUtils { implicit def fun2fileFilter(fun: File => Boolean) = new FileFilter { def accept(f: File) = fun(f) } implicit def fileFilter2richFilter(filter: FileFilter): RichFilter = new RichFilter(filter) final class RichFilter(val self: FileFilter) extends Proxy { def unary_! = new FileFilter { def accept(f: File) = !self.accept(f) } def join(other: FileFilter, op: (Boolean,Boolean)=>Boolean) = new FileFilter { def accept(f: File) = op(self.accept(f), other.accept(f)) } def &&(other: FileFilter) = join(other, _ && _); def ||(other: FileFilter) = join(other, _ || _); } object DirectoryFilter extends FileFilter { def accept(f: File) = f.isDirectory; } class NameMatchFilter(val re: Pattern) extends FileFilter { def this(re: String) = this(Pattern.compile(re)); def accept(f: File) = re.matcher(f.getName).matches; } object AcceptAllFilter extends FileFilter { def accept(f: File) = true; } def listFilesRecursive(base: File, filter: FileFilter): Seq[File] = { val files = new ArrayBuffer[File]; def processdir(dir: File) { val directories = dir.listFiles(DirectoryFilter).foreach(d => processdir(d)) val matchedFiles = dir.listFiles(filter) files ++= matchedFiles } processdir(base) files } }
Example 5
Source File: ImageReader.scala From scala-deeplearn-examples with Apache License 2.0 | 5 votes |
package io.brunk.examples import java.io.{File, FileFilter} import java.lang.Math.toIntExact import org.datavec.api.io.filters.BalancedPathFilter import org.datavec.api.io.labels.ParentPathLabelGenerator import org.datavec.api.split.{FileSplit, InputSplit} import org.datavec.image.loader.BaseImageLoader import org.datavec.image.recordreader.ImageRecordReader import org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator import org.deeplearning4j.datasets.iterator.MultipleEpochsIterator import org.deeplearning4j.eval.Evaluation import org.nd4j.linalg.dataset.api.iterator.DataSetIterator import org.nd4j.linalg.dataset.api.preprocessor.ImagePreProcessingScaler import scala.collection.JavaConverters._ object ImageReader { val channels = 3 val height = 150 val width = 150 val batchSize = 50 val numClasses = 2 val epochs = 100 val splitTrainTest = 0.8 val random = new java.util.Random() def createImageIterator(path: String): (MultipleEpochsIterator, DataSetIterator) = { val baseDir = new File(path) val labelGenerator = new ParentPathLabelGenerator val fileSplit = new FileSplit(baseDir, BaseImageLoader.ALLOWED_FORMATS, random) val numExamples = toIntExact(fileSplit.length) val numLabels = fileSplit.getRootDir.listFiles(new FileFilter { override def accept(pathname: File): Boolean = pathname.isDirectory }).length val pathFilter = new BalancedPathFilter(random, labelGenerator, numExamples, numLabels, batchSize) //val inputSplit = fileSplit.sample(pathFilter, splitTrainTest, 1 - splitTrainTest) val inputSplit = fileSplit.sample(pathFilter, 70, 30) val trainData = inputSplit(0) val validationData = inputSplit(1) val recordReader = new ImageRecordReader(height, width, channels, labelGenerator) val scaler = new ImagePreProcessingScaler(0, 1) recordReader.initialize(trainData, null) val dataIter = new RecordReaderDataSetIterator(recordReader, batchSize, 1, numClasses) scaler.fit(dataIter) dataIter.setPreProcessor(scaler) val trainIter = new MultipleEpochsIterator(epochs, dataIter) val valRecordReader = new ImageRecordReader(height, width, channels, labelGenerator) valRecordReader.initialize(validationData, null) val validationIter = new RecordReaderDataSetIterator(valRecordReader, batchSize, 1, numClasses) scaler.fit(validationIter) validationIter.setPreProcessor(scaler) (trainIter, validationIter) } }