本文整理汇总了Scala中java.io.BufferedWriter类的典型用法代码示例。如果您正苦于以下问题:Scala BufferedWriter类的具体用法?Scala BufferedWriter怎么用?Scala BufferedWriter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了BufferedWriter类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。
示例1: copyDataStructures
//设置package包名称以及导入依赖的类
package ppl.delite.framework.codegen
import java.io.{BufferedWriter, FileWriter, File}
trait Utils {
def copyDataStructures(from: String, to: String, dsmap: String => String = s => s) {
val dsDir = new File(from)
if (!dsDir.exists) return
val outDir = new File(to)
outDir.mkdirs()
copyDirectory(dsDir)
def copyDirectory(dir: File) {
for (f <- dir.listFiles) {
if (f.isDirectory)
copyDirectory(f)
else {
val outFile = to + File.separator + f.getName
val out = new BufferedWriter(new FileWriter(outFile))
for (line <- io.Source.fromFile(f).getLines) {
var remappedLine = dsmap(line)
remappedLine = remappedLine.replaceAll("ppl.delite.framework.datastruct", "generated")
out.write(remappedLine + System.getProperty("line.separator"))
}
out.close()
}
}
}
}
}
开发者ID:leratojeffrey,项目名称:OptiSDR-Compiler,代码行数:32,代码来源:Utils.scala
示例2: FileReaderTest4SourceAndClosing2
//设置package包名称以及导入依赖的类
import java.io.{BufferedWriter, FileWriter}
import org.scalatest._
import scala.io.Source
class FileReaderTest4SourceAndClosing2 extends FlatSpec with Matchers {
"Hello" should "have tests" in {
implicit def toClosingSource(source: Source) = new {
val lines = source.getLines
def getLinesAndClose() = new Iterator[String] {
def hasNext = if (!lines.hasNext) {source.close; false} else true
def next = lines.next
}
}
val w = new BufferedWriter(new FileWriter("/tmp/csv13.txt"))
(9 to 12).map(i => s"""/tmp/csv${i}.txt""").foreach(fn => {
Source.fromFile(fn).getLinesAndClose().foreach(ln => {
w.write(ln)
w.write("\r\n")
})
}
)
}
}
开发者ID:ralreiroe,项目名称:embarcadero,代码行数:32,代码来源:FileReaderTest4SourceAndClosing2.scala
示例3: HTMLFile
//设置package包名称以及导入依赖的类
package ch.epfl.sbtplugin
import java.io.{BufferedWriter, File, FileWriter}
object HTMLFile {
val content =
"""
|<!DOCTYPE html>
|<html>
|<meta charset="UTF-8">
|<title>Scala.js Call Graph Visualization</title>
|<link rel="stylesheet" type="text/css" href="https://rawgit.com/lionelfleury/scala-js-call-graph/release/style.css">
|<body>
|<div id="header"><h1>Scala.js Call Graph Visualization</h1></div>
|<div id="nav" style="overflow:auto"></div>
|<div id="main" style="overflow:auto"></div>
|<script type="text/javascript" src="https://rawgit.com/lionelfleury/scala-js-call-graph/release/scalajs-callgraph-opt.js"></script>
|<script type="text/javascript" src="https://rawgit.com/lionelfleury/scala-js-call-graph/release/scalajs-callgraph-jsdeps.min.js"></script>
|<script type="text/javascript" src="https://rawgit.com/lionelfleury/scala-js-call-graph/release/scalajs-callgraph-launcher.js"></script>
|</body>
|</html>
""".stripMargin
def writeToFile(file: File): Unit = {
val bw = new BufferedWriter(new FileWriter(file))
bw.write(content)
bw.flush()
bw.close()
}
}
开发者ID:lionelfleury,项目名称:scala-js-call-graph,代码行数:33,代码来源:HTMLFile.scala
示例4: CSVWriteBufOwnTest
//设置package包名称以及导入依赖的类
import java.io.{PrintWriter, BufferedWriter, FileWriter}
import com.github.tototoshi.csv.{CSVWriter, DefaultCSVFormat}
import org.scalatest._
class CSVWriteBufOwnTest extends FlatSpec with Matchers {
"Hello" should "have tests" in {
val bw = new PrintWriter(new BufferedWriter(new FileWriter("/tmp/csv2.txt"), 8192*256*4))
lazy val doit = {
println("working")
(1 to 1000000).foreach(i => {
bw.print(List(1, 2, 3, 4, 5, 6).mkString(","))
bw.print("\r\n")
})
println("finished")
}
def timeSpentDoing(f: => Unit) = {
val start = System.currentTimeMillis
println(start)
f
println(System.currentTimeMillis - start)
}
timeSpentDoing(doit)
}
}
开发者ID:ralreiroe,项目名称:embarcadero,代码行数:32,代码来源:CSVWriteBufOwnTest.scala
示例5: CSVWriteBufOwnParallelTest
//设置package包名称以及导入依赖的类
import java.io.{BufferedWriter, FileWriter, PrintWriter}
import java.util.concurrent.Executors
import org.scalatest._
import scala.concurrent.{ExecutionContext, Await, Future}
import scala.concurrent.duration.Duration
import scala.util.{Success, Failure}
class CSVWriteBufOwnParallelTest extends FlatSpec with Matchers {
"Hello" should "have tests" in {
lazy val doit: Int => Unit = i => {
val bw = new PrintWriter(new BufferedWriter(new FileWriter(s"""/tmp/csv${i}.txt"""), 8192 * 256 * 4))
(1 to 10000000).foreach(i => {
bw.print(List(1, 2, 3, 4, 5, 6).mkString(","))
bw.print("\r\n")
})
bw.close()
}
def timeSpentDoing(f: => Unit) = {
val start = System.currentTimeMillis
println(start)
f
System.currentTimeMillis - start
}
implicit val ec = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(5))
val futures = (9 to 12).map(n => Future{ timeSpentDoing( doit(n) ) })
val combined = Future.sequence(futures)
Await.ready(combined, Duration.Inf)
combined onComplete {
case Success(duration) => println(duration)
case Failure(ex) => println(ex)
}
}
}
开发者ID:ralreiroe,项目名称:embarcadero,代码行数:46,代码来源:CSVWriteBufOwnParallelTest.scala
示例6: FileReaderTest3Iterator
//设置package包名称以及导入依赖的类
import java.io.{BufferedWriter, BufferedReader, FileReader, FileWriter}
import org.scalatest._
class FileReaderTest3Iterator extends FlatSpec with Matchers {
"Hello" should "have tests" in {
def getContents(fileName: String): Iterator[String] = {
val fr = new BufferedReader(new FileReader(fileName))
def iterator = new Iterator[String] {
def hasNext = line != null
def next = {
val retVal = line
line = getLine
retVal
}
def getLine = {
var line: String = null
try {
line = fr.readLine
} catch {
case _: Throwable => line = null; fr.close()
}
line
}
var line = getLine
}
iterator
}
val w = new BufferedWriter(new FileWriter("/tmp/csv4.txt"))
Seq("/tmp/csv.txt", "/tmp/csv2.txt").foreach(fn => {
getContents(fn).foreach(ln => {
w.write(ln)
w.write("\r\n")
})
}
)
}
}
开发者ID:ralreiroe,项目名称:embarcadero,代码行数:48,代码来源:FileReaderTest3Iterator.scala
示例7: HanaActivatorTester
//设置package包名称以及导入依赖的类
import java.io.{BufferedWriter, File, FileWriter}
import java.nio.file.{Files, Paths}
object HanaActivatorTester {
private val extensions = List("xsapp",
"xsprivileges",
"hdbstructure",
"hdbprocedure",
"xsjslib",
"html",
"hdbrole",
"xsaccess")
def main(args: Array[String]): Unit = {
val workPath = Paths.get(".").toAbsolutePath.normalize.toString
val filename = "dummy"
extensions.foreach {
e =>
val helloPath = workPath + File.separator + filename + "." + e
val bw = new BufferedWriter(new FileWriter(new File(helloPath), true))
bw.write(" ")
bw.close()
}
val workDir = new File(workPath)
val orderedExtension = workDir.listFiles.toList.sorted(new HanaExtensionComparator)
extensions.foreach { e =>
val helloPath = workPath + File.separator + filename + "." + e
Files.deleteIfExists(Paths.get(helloPath))
}
}
}
开发者ID:janosbinder,项目名称:hana-activator,代码行数:33,代码来源:HanaActivatorTester.scala
示例8: LogAnalyzerExportSmallData
//设置package包名称以及导入依赖的类
package com.databricks.apps.logs.chapter3
import java.io.{BufferedWriter, FileWriter, PrintWriter}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import com.databricks.apps.logs.{ApacheAccessLog, LogAnalyzerRDD}
/**
* LogAnalyzerExportSmallData shows how to export data of small size to a file.
*
* Example command to run:
* % ${YOUR_SPARK_HOME}/bin/spark-submit
* --class "com.databricks.apps.logs.chapter3.LogAnalyzerExportSmallData"
* --master spark://YOUR_SPARK_MASTER
* target/scala-2.11/spark-logs-analyzer_2.11-2.0.jar
* ../../data/apache.access.log output.log
*/
object LogAnalyzerExportSmallData extends App {
// Initialize SparkSession instance.
val spark = SparkSession
.builder()
.appName("Log Analyzer SQL in Scala")
.getOrCreate()
val logFile = args(0)
val accessLogs: RDD[ApacheAccessLog] = spark
.sparkContext
.textFile(logFile)
.map(ApacheAccessLog.parseLogLine)
val logAnalyzerRDD = LogAnalyzerRDD(spark)
val logStatistics = logAnalyzerRDD.processRdd(accessLogs)
val outputFile = args(1)
val out = new PrintWriter(new BufferedWriter(new FileWriter(outputFile)))
val contentSizeStats = logStatistics.contentSizeStats
out.println("Content Size Avg: %s, Min: %s, Max: %s"
.format(contentSizeStats._1 / contentSizeStats._2,
contentSizeStats._3,
contentSizeStats._4))
out.println(s"""Response code counts: ${logStatistics.responseCodeToCount.mkString("[", ",", "]")}""")
out.println(s"""IPAddresses > 10 times: ${logStatistics.ipAddresses.mkString("[", ",", "]")}""")
out.println(s"""Top Endpoints: ${logStatistics.topEndpoints.mkString("[", ",", "]")}""")
out.close()
spark.stop()
}
开发者ID:krish121,项目名称:Spark-reference-applications,代码行数:52,代码来源:LogAnalyzerExportSmallData.scala
示例9: VideoManager
//设置package包名称以及导入依赖的类
package fgc.formatter
import java.io.BufferedWriter
import java.io.File
import java.io.FileWriter
import fgc.model.VideoData
import fgc.parser.YouTubeChannelParser
import fgc.normalizer.Normalizer
import fgc.logger.Logger
object VideoManager {
private val DATA_FILE_PATH = "../data/formatted/video.json"
def toFile(videoDatas: List[VideoData]): String = {
val sortedVideos = (
videoDatas
.sortBy(r => (r.timestamp, r.id)).reverse
.map(_.json)
)
val serialized = "[\n" + sortedVideos.mkString(",\n") + "\n]"
val file = new File(DATA_FILE_PATH)
val bw = new BufferedWriter(new FileWriter(file))
bw.write(serialized)
bw.close
serialized
}
def loadVideos(): List[VideoData] = {
(
YouTubeChannelParser.Parsers
.map(p => p.loadVideos)
.flatten
.map(_.trim.fixCharacters)
)
}
def formatVideos(rawVideos: List[VideoData]): List[VideoData] = {
Normalizer.normalize(rawVideos)
}
}
object Formatter {
def run(): Boolean = {
println("running formatter")
println("parsing videos")
val videos = VideoManager.loadVideos
println("normalizing videos")
val formatted = VideoManager.formatVideos(videos)
VideoManager.toFile(formatted)
Logger.logParsing
(videos != formatted)
}
def main(args: Array[String]) {
println(run)
}
}
开发者ID:mpaulweeks,项目名称:fgc-video,代码行数:59,代码来源:formatter.scala
示例10: Logger
//设置package包名称以及导入依赖的类
package fgc.logger
import java.io.BufferedWriter
import java.io.File
import java.io.FileWriter
import scala.collection.mutable
object Logger {
private var parsingLogs = mutable.Map[String, mutable.ListBuffer[String]]()
def parsingFailure(channelName: String, videoTitle: String): Unit = {
var channelLog = parsingLogs.getOrElseUpdate(channelName, new mutable.ListBuffer)
channelLog += s"$videoTitle"
}
def logParsing(): Unit = {
parsingLogs.foreach { case (channelName, logs ) =>
val file = new File(s"logs/parse-$channelName.txt")
val bw = new BufferedWriter(new FileWriter(file))
logs.foreach { line =>
bw.write(line + "\n")
}
bw.close
}
parsingLogs = mutable.Map()
}
}
开发者ID:mpaulweeks,项目名称:fgc-video,代码行数:28,代码来源:logger.scala
示例11: withWriter
//设置package包名称以及导入依赖的类
package tutor.utils
import java.io.{BufferedWriter, File, FileWriter, Writer}
trait WriteSupport {
def withWriter(path: String)(f: Writer => Unit): Unit ={
var writer: Writer = null
try {
val file = new File(path)
if (!file.exists()) file.createNewFile()
writer = new BufferedWriter(new FileWriter(file))
f(writer)
writer.flush()
} finally {
if (writer != null) writer.close()
}
}
}
开发者ID:notyy,项目名称:CodeAnalyzerTutorial,代码行数:20,代码来源:WriteSupport.scala
示例12: SessionDataFileWriter
//设置package包名称以及导入依赖的类
package com.malaska.spark.training.streaming.dstream.sessionization
import java.io.BufferedWriter
import java.io.FileWriter
object SessionDataFileWriter {
val eol = System.getProperty("line.separator");
def main(args: Array[String]) {
if (args.length == 0) {
println("SessionDataFileWriter {numberOfRecords} {outputFile} ");
return;
}
val writer = new BufferedWriter(new FileWriter(args(1)))
val loops = args(0).toInt
for (i <- 1 to loops) {
writer.write(SessionDataGenerator.getNextEvent + eol)
}
writer.close
}
}
开发者ID:TedBear42,项目名称:spark_training,代码行数:26,代码来源:SessionDataFileWriter.scala
示例13: SessionDataFileHDFSWriter
//设置package包名称以及导入依赖的类
package com.malaska.spark.training.streaming.dstream.sessionization
import java.io.BufferedWriter
import java.io.FileWriter
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.conf.Configuration
import java.io.OutputStreamWriter
import org.apache.hadoop.fs.Path
import java.util.Random
object SessionDataFileHDFSWriter {
val eol = System.getProperty("line.separator");
def main(args: Array[String]) {
if (args.length == 0) {
println("SessionDataFileWriter {tempDir} {distDir} {numberOfFiles} {numberOfEventsPerFile} {waitBetweenFiles}");
return;
}
val conf = new Configuration
conf.addResource(new Path("/etc/hadoop/conf/core-site.xml"))
conf.addResource(new Path("/etc/hadoop/conf/mapred-site.xml"))
conf.addResource(new Path("/etc/hadoop/conf/hdfs-site.xml"))
val fs = FileSystem.get(new Configuration)
val rootTempDir = args(0)
val rootDistDir = args(1)
val files = args(2).toInt
val loops = args(3).toInt
val waitBetweenFiles = args(4).toInt
val r = new Random
for (f <- 1 to files) {
val rootName = "/weblog." + System.currentTimeMillis()
val tmpPath = new Path(rootTempDir + rootName + ".tmp")
val writer = new BufferedWriter(new OutputStreamWriter(fs.create(tmpPath)))
print(f + ": [")
val randomLoops = loops + r.nextInt(loops)
for (i <- 1 to randomLoops) {
writer.write(SessionDataGenerator.getNextEvent + eol)
if (i%100 == 0) {
print(".")
}
}
println("]")
writer.close
val distPath = new Path(rootDistDir + rootName + ".dat")
fs.rename(tmpPath, distPath)
Thread.sleep(waitBetweenFiles)
}
println("Done")
}
}
开发者ID:TedBear42,项目名称:spark_training,代码行数:58,代码来源:SessionDataFileHDFSWriter.scala
示例14: name
//设置package包名称以及导入依赖的类
package mjis
import java.io.BufferedWriter
trait Phase[+O] {
def name: String = getClass.getSimpleName.toLowerCase
protected def getResult(): O
def dumpResult(writer: BufferedWriter): Unit = {}
lazy val result: O = getResult()
def forceResult(): Unit = result
def findings: List[Finding] = List()
}
trait AnalysisPhase[O] extends Phase[O] {
def success: Boolean = findings.forall(_.severity != Severity.ERROR)
}
开发者ID:jspam,项目名称:mjis,代码行数:18,代码来源:Phase.scala
示例15: GccRunner
//设置package包名称以及导入依赖的类
package mjis
import java.io.{InputStreamReader, BufferedReader, BufferedWriter}
import scala.collection.mutable.ListBuffer
class GccRunner(a: Unit, config: Config) extends Phase[Unit] {
override protected def getResult(): Unit = {
val gcc = Runtime.getRuntime.exec(s"gcc -m64 -Wl,-e,main -nostdlib -o ${config.outFile} ${config.asmOutFile}")
val stderr = new BufferedReader(new InputStreamReader(gcc.getErrorStream))
gcc.waitFor()
val stream = Stream.continually(stderr.readLine()).takeWhile(_ != null)
if (gcc.exitValue() != 0 || stream.nonEmpty) {
_findings += new Finding() {
override def pos: Position = Position.NoPosition
override def msg: String = s"GCC returned exit status ${gcc.exitValue}\n${stream.mkString("\n")}"
override def severity: Severity = Severity.ERROR
}
}
}
val _findings = ListBuffer[Finding]()
override def findings: List[Finding] = _findings.toList
override def dumpResult(writer: BufferedWriter): Unit = {}
}
开发者ID:jspam,项目名称:mjis,代码行数:28,代码来源:GccRunner.scala
示例16: ResultWriter
//设置package包名称以及导入依赖的类
package de.aquanauten.insights
import java.io.{BufferedWriter, File, FileWriter, Writer}
object ResultWriter {
def writeFile(fileName: String)(writeFn: Writer => Unit): Unit = {
val file = new File(fileName)
file.getParentFile.mkdirs()
val writer = new BufferedWriter(new FileWriter(file))
try {writeFn(writer)} finally {writer.close()}
}
def writeDependencies(packageFile: String, packageDependencies: Dependencies): Unit = {
// write dot file
writeFile(packageFile + ".dot") (_.write(Rendering.render(packageDependencies)(DotRendering.DependencyRendering)))
// write plantuml file
writeFile(packageFile + ".puml") (_.write(Rendering.render(packageDependencies)(PlantUMLRendering.DependencyRendering)))
// write json file
writeFile(packageFile + ".json") (_.write(Rendering.render(packageDependencies)(JsonRendering.DependencyRendering)))
}
def writeClasses(targetDir: String, classFile: String, compileUnit: CompileUnit): Unit = {
// global view
writeFile(s"$targetDir/$classFile.json") { _.write(JsonRendering.render(compileUnit)) }
writeFile(s"$targetDir/$classFile.puml") { _.write(PlantUMLRendering.render(compileUnit)) }
// per package view
compileUnit.classes.groupBy(_.packageName).foreach { case (packageName, classes) =>
writeFile(s"$targetDir/package/puml/$packageName.puml")(_.write(PlantUMLRendering.render(CompileUnit(classes))))
}
}
}
开发者ID:aquamatthias,项目名称:scala-insights,代码行数:34,代码来源:ResultWriter.scala
示例17: LocalDataRepo
//设置package包名称以及导入依赖的类
package util
import java.io.{BufferedWriter, File, FileOutputStream, FileWriter}
import java.nio.file.{Files, Paths}
import com.github.tototoshi.csv.{CSVReader, CSVWriter, DefaultCSVFormat}
import com.google.inject.Singleton
import scala.io.Source
@Singleton
class LocalDataRepo extends BaseDataRepo {
class CSVFormat(_delimiter: Char) extends DefaultCSVFormat {
override val delimiter: Char = _delimiter
}
override def read(filePath: String): Array[Byte] = {
Files.readAllBytes(Paths.get(filePath))
}
override def readCsv(filePath: String, delimiter: Char): Seq[Seq[String]] = {
CSVReader.open(new File(filePath))(new CSVFormat(delimiter)).all()
}
override def readCsvAsMap(filePath: String, delimiter: Char): Seq[Map[String, String]] = {
CSVReader.open(new File(filePath))(new CSVFormat(delimiter)).allWithHeaders()
}
override def readLines(filePath: String): Seq[String] = {
Source.fromFile(filePath).getLines().toList
}
override def write(data: Array[Byte], filePath: String): Unit = {
new FileOutputStream(new File(filePath)).write(data)
}
override def writeCsv(data: Seq[Seq[Any]], filePath: String, delimiter: Char): Unit = {
CSVWriter.open(new File(filePath))(new CSVFormat(delimiter)).writeAll(data)
}
override def writeLines(data: Seq[String], filePath: String): Unit = {
new BufferedWriter(new FileWriter(filePath)).write(data.mkString(scala.util.Properties.lineSeparator))
}
}
开发者ID:davidheryanto,项目名称:play-with-guice-in-scala-crude-example,代码行数:47,代码来源:LocalDataRepo.scala
示例18: writeJson
//设置package包名称以及导入依赖的类
package io.circe
import java.io.{ BufferedWriter, ByteArrayOutputStream, OutputStreamWriter, StringWriter, Writer }
import java.nio.ByteBuffer
package object jackson extends WithJacksonMapper with JacksonParser with JacksonCompat {
private[this] def writeJson(w: Writer, j: Json): Unit = {
val gen = jsonGenerator(w)
makeWriter(mapper).writeValue(gen, j)
w.flush()
}
final def jacksonPrint(json: Json): String = {
val sw = new StringWriter
writeJson(sw, json)
sw.toString
}
private[this] class EnhancedByteArrayOutputStream extends ByteArrayOutputStream {
def toByteBuffer: ByteBuffer = ByteBuffer.wrap(this.buf, 0, this.size)
}
final def jacksonPrintByteBuffer(json: Json): ByteBuffer = {
val bytes = new EnhancedByteArrayOutputStream
writeJson(new BufferedWriter(new OutputStreamWriter(bytes, "UTF-8")), json)
bytes.toByteBuffer
}
}
开发者ID:circe,项目名称:circe-jackson,代码行数:31,代码来源:package.scala
示例19: FileUtiles
//设置package包名称以及导入依赖的类
package utils
import java.io.{BufferedWriter, File, FileWriter}
import scala.io.Source
object FileUtiles {
def readFile(fullPath:String) :String = {
Source.fromFile(fullPath).getLines()
val source = scala.io.Source.fromFile(fullPath)
try {
source.mkString
}finally {
source.close()
}
}
def writeToFile(fullPath:String, writeFunc:BufferedWriter => Unit) = {
val file = new File(fullPath)
val bw = new BufferedWriter(new FileWriter(file))
writeFunc(bw)
bw.close()
}
}
开发者ID:RoyShmuli,项目名称:Grym-exercise-akka,代码行数:29,代码来源:FileUtiles.scala
示例20: IoHelpers
//设置package包名称以及导入依赖的类
package sutil.std
import scala.util.Try
import java.io.{File, FileWriter, BufferedWriter}
object IoHelpers {
def outputToFile(base: File, specific: String): Seq[String] => Try[Unit] =
outputToFile(new File(base, specific))
def outputToFile(fi: File): Seq[String] => Try[Unit] =
lines =>
Try {
val w = new BufferedWriter(new FileWriter(fi))
try {
lines foreach { line =>
w.write(line)
w.newLine()
}
} finally { w.close() }
}
}
开发者ID:malcolmgreaves,项目名称:s-util,代码行数:24,代码来源:IoHelpers.scala
注:本文中的java.io.BufferedWriter类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论