• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Scala OutputStreamWriter类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Scala中java.io.OutputStreamWriter的典型用法代码示例。如果您正苦于以下问题:Scala OutputStreamWriter类的具体用法?Scala OutputStreamWriter怎么用?Scala OutputStreamWriter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了OutputStreamWriter类的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。

示例1: HDFS

//设置package包名称以及导入依赖的类
package org.mireynol.util

import java.io.{BufferedInputStream, OutputStreamWriter}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.mutable.ListBuffer
import scala.io.Source

object HDFS {

  def log : Logger = LoggerFactory.getLogger( HDFS.getClass )

  val hadoop : FileSystem = {
    val conf = new Configuration( )
    conf.set( "fs.defaultFS", "hdfs://localhost:9000" )
    FileSystem.get( conf )
  }

  def readAndMap( path : String, mapper : ( String ) => Unit ) = {
    if ( hadoop.exists( new Path( path ) ) ) {
      val is = new BufferedInputStream( hadoop.open( new Path( path ) ) )
      Source.fromInputStream( is ).getLines( ).foreach( mapper )
    }
    else {
      // TODO - error logic here
    }
  }

  def write( filename : String, content : Iterator[ String ] ) = {
    val path = new Path( filename )
    val out = new OutputStreamWriter( hadoop.create( path, false ) )
    content.foreach( str => out.write( str + "\n" ) )
    out.flush( )
    out.close( )
  }

  def ls( path : String ) : List[ String ] = {
    val files = hadoop.listFiles( new Path( path ), false )
    val filenames = ListBuffer[ String ]( )
    while ( files.hasNext ) filenames += files.next( ).getPath( ).toString( )
    filenames.toList
  }

  def rm( path : String, recursive : Boolean ) : Unit = {
    if ( hadoop.exists( new Path( path ) ) ) {
      println( "deleting file : " + path )
      hadoop.delete( new Path( path ), recursive )
    }
    else {
      println( "File/Directory" + path + " does not exist" )
      log.warn( "File/Directory" + path + " does not exist" )
    }
  }

  def cat( path : String ) = Source.fromInputStream( hadoop.open( new Path( path ) ) ).getLines( ).foreach( println )

} 
开发者ID:reynoldsm88,项目名称:spark-drools,代码行数:61,代码来源:HDFS.scala


示例2: GZipServletResponseWrapper

//设置package包名称以及导入依赖的类
package co.informatica.mvc.filters

import java.io.{IOException, OutputStreamWriter, PrintWriter}
import javax.servlet.ServletOutputStream
import javax.servlet.http.{HttpServletResponse, HttpServletResponseWrapper}

class GZipServletResponseWrapper(val response: HttpServletResponse) extends HttpServletResponseWrapper(response) {
  private var gzipOutputStream: GZipServletOutputStream = null
  private var printWriter: PrintWriter = null

  def close(): Unit = { //PrintWriter.close does not throw exceptions.
    //Hence no try-catch block.
    if (this.printWriter != null) this.printWriter.close()
    if (this.gzipOutputStream != null) this.gzipOutputStream.close
  }

  override def flushBuffer(): Unit = { //PrintWriter.flush() does not throw exception
    if (this.printWriter != null) this.printWriter.flush()
    var exception1: IOException = null
    try
        if (this.gzipOutputStream != null) this.gzipOutputStream.flush
    catch {
      case e: IOException =>
        exception1 = e
    }
    var exception2: IOException = null
    try
      super.flushBuffer()
    catch {
      case e: IOException =>
        exception2 = e
    }
    if (exception1 != null) throw exception1
    if (exception2 != null) throw exception2
  }

  override def getOutputStream: ServletOutputStream = {
    if (this.printWriter != null) throw new IllegalStateException("PrintWriter obtained already - cannot get OutputStream")
    if (this.gzipOutputStream == null) this.gzipOutputStream = new GZipServletOutputStream(getResponse.getOutputStream)
    this.gzipOutputStream
  }

  override def getWriter: PrintWriter = {
    if (this.printWriter == null && this.gzipOutputStream != null) throw new IllegalStateException("OutputStream obtained already - cannot get PrintWriter")
    if (this.printWriter == null) {
      this.gzipOutputStream = new GZipServletOutputStream(getResponse.getOutputStream)
      this.printWriter = new PrintWriter(new OutputStreamWriter(this.gzipOutputStream, getResponse.getCharacterEncoding))
    }
    this.printWriter
  }

  override def setContentLength(len: Int): Unit = {
    //ignore, since content length of zipped content
    //does not match content length of unzipped content.
  }
} 
开发者ID:jkevingutierrez,项目名称:MVC,代码行数:57,代码来源:GZipServletResponseWrapper.scala


示例3: SessionDataFileHDFSWriter

//设置package包名称以及导入依赖的类
package com.malaska.spark.training.streaming.dstream.sessionization

import java.io.BufferedWriter
import java.io.FileWriter
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.conf.Configuration
import java.io.OutputStreamWriter
import org.apache.hadoop.fs.Path
import java.util.Random

object SessionDataFileHDFSWriter {
  
  val eol = System.getProperty("line.separator");  
  
  def main(args: Array[String]) {
    if (args.length == 0) {
        println("SessionDataFileWriter {tempDir} {distDir} {numberOfFiles} {numberOfEventsPerFile} {waitBetweenFiles}");
        return;
    }
    val conf = new Configuration
    conf.addResource(new Path("/etc/hadoop/conf/core-site.xml"))
    conf.addResource(new Path("/etc/hadoop/conf/mapred-site.xml"))
    conf.addResource(new Path("/etc/hadoop/conf/hdfs-site.xml"))
    
    val fs = FileSystem.get(new Configuration)
    val rootTempDir = args(0)
    val rootDistDir = args(1)
    val files = args(2).toInt
    val loops = args(3).toInt
    val waitBetweenFiles = args(4).toInt
    val r = new Random
    for (f <- 1 to files) {
      val rootName = "/weblog." + System.currentTimeMillis()
      val tmpPath = new Path(rootTempDir + rootName + ".tmp")
      val writer = new BufferedWriter(new OutputStreamWriter(fs.create(tmpPath)))
      
      print(f + ": [")
      
      val randomLoops = loops + r.nextInt(loops)
      
      for (i <- 1 to randomLoops) {
        writer.write(SessionDataGenerator.getNextEvent + eol)
        if (i%100 == 0) {
          print(".")
        }
      }
      println("]")
      writer.close
      
      val distPath = new Path(rootDistDir + rootName + ".dat")
      
      fs.rename(tmpPath, distPath)
      Thread.sleep(waitBetweenFiles)
    }
    println("Done")
  }
} 
开发者ID:TedBear42,项目名称:spark_training,代码行数:58,代码来源:SessionDataFileHDFSWriter.scala


示例4: ZipImplicits

//设置package包名称以及导入依赖的类
package anchorman.docx

import anchorman.media.MediaFile

import java.io.OutputStreamWriter
import java.util.zip.{ZipEntry, ZipOutputStream}

import scala.xml.{MinimizeMode, NodeSeq, XML}

object ZipImplicits {
  implicit class ZipOutputStreamOps(out: ZipOutputStream) {
    def writeXmlFile(path: String, nodes: NodeSeq): Unit = {
      out.putNextEntry(new ZipEntry(path))

      val writer = new OutputStreamWriter(out)
      try XML.write(writer, nodes.head, "UTF-8", true, null, MinimizeMode.Always)
      finally writer.flush()
    }

    def writeMediaFile(path: String, file: MediaFile): Unit = {
      out.putNextEntry(new ZipEntry(path))
      try out.write(file.content)
      finally out.flush()
    }
  }
} 
开发者ID:davegurnell,项目名称:anchorman,代码行数:27,代码来源:ZipImplicits.scala


示例5: writeJson

//设置package包名称以及导入依赖的类
package io.circe

import java.io.{ BufferedWriter, ByteArrayOutputStream, OutputStreamWriter, StringWriter, Writer }
import java.nio.ByteBuffer


package object jackson extends WithJacksonMapper with JacksonParser with JacksonCompat {

  private[this] def writeJson(w: Writer, j: Json): Unit = {
    val gen = jsonGenerator(w)
    makeWriter(mapper).writeValue(gen, j)
    w.flush()
  }

  final def jacksonPrint(json: Json): String = {
    val sw = new StringWriter
    writeJson(sw, json)
    sw.toString
  }

  private[this] class EnhancedByteArrayOutputStream extends ByteArrayOutputStream {
    def toByteBuffer: ByteBuffer = ByteBuffer.wrap(this.buf, 0, this.size)
  }

  final def jacksonPrintByteBuffer(json: Json): ByteBuffer = {
    val bytes = new EnhancedByteArrayOutputStream
    writeJson(new BufferedWriter(new OutputStreamWriter(bytes, "UTF-8")), json)
    bytes.toByteBuffer
  }
} 
开发者ID:circe,项目名称:circe-jackson,代码行数:31,代码来源:package.scala


示例6: TextFile

//设置package包名称以及导入依赖的类
package com.ox.bigdata.util.file

import java.io.{File, FileOutputStream, OutputStreamWriter}

import com.ox.bigdata.util.Using

class TextFile(filename: String) extends Using {

  def write(data: List[List[String]]): Unit = {
    val strData = data.map(x => x.mkString(",") + ",").mkString("\n") + "\n"
    writeToFile(filename, strData)
  }

  def write(data: String): Unit = {
    writeToFile(filename, data)
  }

  def delete(): Unit = {
    new File(filename) delete()
  }

  private def writeToFile(fileName: String, data: String): Unit = {
    using(new OutputStreamWriter(new FileOutputStream(fileName),"UTF-8")) {
      fileWriter => fileWriter.write(data)
    }
  }
} 
开发者ID:black-ox,项目名称:simple,代码行数:28,代码来源:TextFile.scala


示例7: PrometheusService

//设置package包名称以及导入依赖的类
package com.example.akka.http

import java.io.{ OutputStreamWriter, PipedInputStream, PipedOutputStream }

import scala.concurrent.{ ExecutionContext, Future }

import akka.http.scaladsl.model.{ HttpCharsets, HttpEntity, MediaType }
import akka.http.scaladsl.server.{ Directives, Route }
import akka.stream.scaladsl.StreamConverters
import io.prometheus.client.CollectorRegistry
import io.prometheus.client.exporter.common.TextFormat

object PrometheusService extends Directives {

  lazy val prometheusTextType =
    MediaType.customWithFixedCharset("text", "plain", HttpCharsets.`UTF-8`, params = Map("version" -> "0.0.4"))

  def route(implicit executionContext: ExecutionContext): Route = {
    path("metrics") {
      complete {
        val in = new PipedInputStream
        val out = new OutputStreamWriter(new PipedOutputStream(in), HttpCharsets.`UTF-8`.value)
        val byteSource = StreamConverters.fromInputStream(() => in)
        Future {
          try {
            TextFormat.write004(out, CollectorRegistry.defaultRegistry.metricFamilySamples())
            out.flush()
          } finally {
            out.close()
          }
        }
        HttpEntity(prometheusTextType, byteSource)
      }
    }
  }
} 
开发者ID:pjfanning,项目名称:prometheus-akka-sample,代码行数:37,代码来源:PrometheusService.scala


示例8: TextFile

//设置package包名称以及导入依赖的类
package com.boost.bigdata.utils.file

import java.io.{File, FileOutputStream, OutputStreamWriter}

import com.boost.bigdata.utils.Using

class TextFile(filename: String) extends Using {

  def write(data: List[List[String]]): Unit = {
    val strData = data.map(x => x.mkString(",") + ",").mkString("\n") + "\n"
    writeToFile(filename, strData)
  }

  def write(data: String): Unit = {
    writeToFile(filename, data)
  }

  def delete(): Unit = {
    new File(filename) delete()
  }

  private def writeToFile(fileName: String, data: String): Unit = {
    using(new OutputStreamWriter(new FileOutputStream(fileName),"UTF-8")) {
      fileWriter => fileWriter.write(data)
    }
  }
} 
开发者ID:steven-prgm,项目名称:bigdata-scala,代码行数:28,代码来源:TextFile.scala


示例9: SocketClient

//设置package包名称以及导入依赖的类
package socket.benchmark

import java.io.{OutputStreamWriter, PrintWriter}
import java.net.{InetSocketAddress, Socket}

class SocketClient(val serverAddress: InetSocketAddress, msgCount: Int) {
  val serverSocket = {
    val socket = new Socket()
    socket.setSoTimeout(1000)
    socket.connect(serverAddress)
    socket
  }

  def sendAndForgetBlocking(msg: String) = {
    val (elapsed, _) = measure {
      1 to msgCount foreach { i =>
        writeBlockingMsg(s"$i$msg")
      }
    }
    elapsed
  }

  def close() = serverSocket.close()

  private def writeBlockingMsg(msg: String): Unit = {
    val out = new PrintWriter(new OutputStreamWriter(serverSocket.getOutputStream, "utf-8"), true)
    out.println(msg)
    out.flush()
  }

  private def measure[T](callback: ? T): (Long, T) = {
    val start = System.currentTimeMillis
    val res = callback
    val elapsed = System.currentTimeMillis - start
    (elapsed, res)
  }

} 
开发者ID:focusj,项目名称:address-service,代码行数:39,代码来源:SocketClient.scala


示例10: RSAKeyGenerator

//设置package包名称以及导入依赖的类
package allawala.chassis.util

import java.io.{File, FileOutputStream, OutputStreamWriter}
import java.nio.file.Paths
import java.security.{Key, KeyPair, KeyPairGenerator, Security}

import com.typesafe.scalalogging.StrictLogging
import org.bouncycastle.jce.provider.BouncyCastleProvider
import org.bouncycastle.util.io.pem.{PemObject, PemWriter}


object RSAKeyGenerator extends StrictLogging {
  private val KeySize = 2048
  private val homeDir = System.getProperty("user.home")

  private class PemFile(val key: Key, val description: String) {
    private val pemObject = new PemObject(description, key.getEncoded)

    def write(file: File): Unit = {
      val pemWriter = new PemWriter(new OutputStreamWriter(new FileOutputStream(file)))
      try {
        pemWriter.writeObject(this.pemObject)
      }
      finally {
        pemWriter.close()
      }
    }
  }

  private def generateKeyPair(): KeyPair = {
    val generator = KeyPairGenerator.getInstance("RSA", "BC")
    generator.initialize(KeySize)
    val keyPair = generator.generateKeyPair
    keyPair
  }

  private def writePem(key: Key, description: String, filename: String): Unit = {
    val path = Paths.get(homeDir, filename)
    val file = path.toFile
    val pemFile = new PemFile(key, description)
    pemFile.write(file)
    logger.debug(s"Writing $description to $path/$filename")
  }

  def generate(): Unit = {
    Security.addProvider(new BouncyCastleProvider)

    val keyPair = generateKeyPair()
    val privateKey = keyPair.getPrivate
    val publicKey = keyPair.getPublic

    writePem(privateKey, "RSA PRIVATE KEY","id_rsa")
    writePem(publicKey, "RSA PUBLIC KEY", "id_rsa.pub")
  }
} 
开发者ID:allawala,项目名称:service-chassis,代码行数:56,代码来源:RSAKeyGenerator.scala


示例11: parseRecodingTable

//设置package包名称以及导入依赖的类
package uk.ac.ncl.openlab.intake24.sql.tools.food.localisation

import uk.ac.ncl.openlab.intake24.AssociatedFood
import java.io.OutputStreamWriter
import uk.ac.ncl.openlab.intake24.FoodHeader
import uk.ac.ncl.openlab.intake24.PortionSizeMethod
import java.io.FileWriter
import au.com.bytecode.opencsv.CSVWriter
import uk.ac.ncl.openlab.intake24.NewLocalFoodRecord
import java.io.File
import uk.ac.ncl.openlab.intake24.UserFoodHeader

trait RecodingTableParser {
  def parseRecodingTable(path: String): RecodingTable
}

trait RecodingTableUtil {
  def buildRecodedLocalFoodRecords(logPath: Option[String], englishLocaleName: String, localNutrientTableId: String,
    indexableFoods: Seq[UserFoodHeader], recodingTable: RecodingTable, translatedAssociatedFoods: Map[String, Seq[AssociatedFood]]) = {

    val logWriter = new CSVWriter(logPath.map(logPath => new FileWriter(new File(logPath))).getOrElse(new OutputStreamWriter(System.out)))

    logWriter.writeNext(Array("Intake24 code", "English food description", "Coding decision", s"$englishLocaleName description"))

    val records = indexableFoods.foldLeft(Map[String, NewLocalFoodRecord]()) {
      (result, header) =>

        val logHeaderCols = Array(header.code, header.localDescription)

        val associatedFoods = translatedAssociatedFoods.getOrElse(header.code, Seq())

        val portionSizeMethods = Seq() // Cannot be set here due to circular dependencies

        recodingTable.existingFoodsCoding.get(header.code) match {
          case Some(DoNotUse) => {
            logWriter.writeNext(logHeaderCols ++ Array(s"Not using in $englishLocaleName locale"))
            result + (header.code -> NewLocalFoodRecord(None, true, Map(), Seq(), Seq(), Seq()))
          }
          case Some(UseUKFoodTable(localDescription)) => {
            logWriter.writeNext(logHeaderCols ++ Array("Inheriting UK food composition table code", localDescription))
            result + (header.code -> NewLocalFoodRecord(Some(localDescription), false, Map(), portionSizeMethods, associatedFoods, Seq()))
          }
          case Some(UseLocalFoodTable(localDescription, localTableRecordId)) => {
            logWriter.writeNext(logHeaderCols ++ Array(s"Using $localNutrientTableId food composition table code", localDescription, localTableRecordId))
            result + (header.code -> NewLocalFoodRecord(Some(localDescription), false, Map(localNutrientTableId -> localTableRecordId), portionSizeMethods, associatedFoods, Seq()))
          }
          case None =>
            logWriter.writeNext(logHeaderCols ++ Array(s"Not in $englishLocaleName recoding table!"))
            result
        }
    }

    logWriter.close()

    records
  }
} 
开发者ID:digitalinteraction,项目名称:intake24,代码行数:58,代码来源:RecodingTableParser.scala


示例12: GenTestFolder

//设置package包名称以及导入依赖的类
package com.cloudera.sa.cap1.largefileutil

import java.io.{BufferedWriter, OutputStreamWriter}
import java.util.Random

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}


object GenTestFolder {
  def main(args:Array[String]): Unit = {
    val fs = FileSystem.get(new Configuration())

    val r = new Random
    val numOfFiles = args(0).toInt
    val path = args(1)
    var currentPath = path
    for (i <- 0 until numOfFiles) {
      val n = r.nextInt(100)
      if (n % 12 == 0) {
        currentPath = path
      }
      if (n < 30) {
        //Make empty file
        fs.create(new Path(currentPath + "/" + "zeroFile." + i + ".txt")).close()
        println("new zero file:" + new Path(currentPath + "/" + "zeroFile." + i + ".txt"))
      } else if (n < 80) {
        //Make data file
        val stream = fs.create(new Path(currentPath + "/" + "dataFile." + i + ".txt"))
        val writer = new BufferedWriter(new OutputStreamWriter(stream))
        writer.write("Foobar " + n + " " + i)
        writer.close()
        println("new full file:" + new Path(currentPath + "/" + "dataFile." + i + ".txt"))
      } else {
        //make new folder
        fs.mkdirs(new Path(currentPath + "/" + "folder." + i))
        currentPath = currentPath + "/" + "folder." + i
        println("new Folder:" + new Path(currentPath + "/" + "folder." + i))
      }
    }

  }
} 
开发者ID:tmalaska,项目名称:CleanUpEmptyFilesTool,代码行数:44,代码来源:GenTestFolder.scala


示例13: FileDestination

//设置package包名称以及导入依赖的类
import java.io.{BufferedWriter, FileOutputStream, OutputStreamWriter}
import scala.concurrent.duration.{FiniteDuration, SECONDS}
import com.github.nscala_time.time.Imports._
import org.joda.time.format._
import akka.util.Timeout

package hyperion {

  class FileDestination(id: String, fileName: String, template: String) extends Pipe with Tickable {
    def selfId = id

    val writer = new BufferedWriter(new OutputStreamWriter(
      new FileOutputStream(fileName), "utf-8"))
    var lastMessage = DateTime.now
    implicit val timeout = Timeout(FiniteDuration(1, SECONDS))
    val msgTemplate = if (template == "") new MessageTemplate("<$PRIO> $DATE $HOST $PROGRAM $PID : $MESSAGE \n") else new MessageTemplate(template)
    var processed = 0

    override def preStart() = {
      super.preStart()
      startTicking(FiniteDuration(0, SECONDS), FiniteDuration(1, SECONDS))

    }

    override def postStop() = {
      stopTicking()
      writer.close()
      super.postStop()
    }

    def process = {
      case msg: Message => {
        writer.write(msgTemplate.format(msg))
        processed += 1
        lastMessage = DateTime.now
      }

      case Tick => {

        if (DateTime.now.minus(lastMessage.getMillis).getMillis > 1000L) {
          writer.flush()
        }

      }

      case StatsRequest => {
        sender ! StatsResponse(Map[String, Int]( "processed" -> processed))
      }
    }
  }

} 
开发者ID:talien,项目名称:hyperion,代码行数:53,代码来源:filedestination.scala


示例14: HelloLambda

//设置package包名称以及导入依赖的类
package com.virtuslab.lambda.hello

import java.io.{InputStream, OutputStream, OutputStreamWriter}

import com.amazonaws.services.lambda.runtime.{Context, RequestStreamHandler}
import com.virtuslab.lambda.hello.HelloLambda.formats
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization._

object HelloLambda {
  implicit val formats = Serialization.formats(NoTypeHints)
}

class HelloLambda extends RequestStreamHandler {

  override def handleRequest(input: InputStream, output: OutputStream, context: Context): Unit = {
    context.getLogger.log(s"Log: ${context.getRemainingTimeInMillis}")
    println(s"Test ${context.getAwsRequestId}")

    try {
      val writer = new OutputStreamWriter(output, "UTF-8")
      writer.write(write(s"Fetched with ID: ${context.getAwsRequestId}"))
      writer.flush()
      writer.close()
    } catch {
      case e: Throwable => context.getLogger.log(s"exception? -> ${e.getCause}")
    }
  }
} 
开发者ID:pdolega,项目名称:lambda-samples,代码行数:31,代码来源:HelloLambda.scala


示例15: Response

//设置package包名称以及导入依赖的类
package com.virtuslab.lambda.http

import java.io.{InputStream, OutputStream, OutputStreamWriter}

import com.amazonaws.services.lambda.runtime.{Context, RequestStreamHandler}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization._
import HttpLambda.formats

case class Response(body: Option[String] = None,
                    statusCode: Int = 200,
                    headers: Map[String, Any] = Map.empty[String, Any])



object HttpLambda {
  implicit val formats = Serialization.formats(NoTypeHints)
}

class HttpLambda extends RequestStreamHandler {

  override def handleRequest(input: InputStream, output: OutputStream, context: Context): Unit = {
    context.getLogger.log(s"Log: ${context.getRemainingTimeInMillis}")
    println(s"Test ${context.getAwsRequestId}")
    
    try {
      val response = write(Response(Some(s"Fetched with ID: ${context.getAwsRequestId}")))
      context.getLogger.log(s"Generated response is: ${response}")

      val writer = new OutputStreamWriter(output, "UTF-8")
      writer.write(response)
      writer.flush()
      writer.close()
    } catch {
      case e: Throwable => context.getLogger.log(s"exception? -> ${e}")
    }
  }
} 
开发者ID:pdolega,项目名称:lambda-samples,代码行数:40,代码来源:HttpLambda.scala


示例16: TransactionLog

//设置package包名称以及导入依赖的类
package com.mchange.sc.v1.sbtethereum.repository

import java.io.{BufferedOutputStream,File,FileOutputStream,OutputStreamWriter,PrintWriter}
import java.util.Date
import java.text.SimpleDateFormat

import com.mchange.sc.v2.lang.borrow

import com.mchange.sc.v2.failable._

import com.mchange.sc.v1.consuela._
import com.mchange.sc.v1.consuela.ethereum.{EthHash,EthTransaction}

import scala.io.Codec

object TransactionLog {
  private val TimestampPattern = "yyyy-MM-dd'T'HH-mm-ssZ"

  lazy val File = Directory.map( dir => new java.io.File(dir, "transaction-log") )

  case class Entry( timestamp : Date, txn : EthTransaction.Signed, transactionHash : EthHash ) {
    override def toString() = {
      val ( ttype, payloadKey, payload ) = txn match {
        case m  : EthTransaction.Signed.Message          => ("Message", "data", m.data)
        case cc : EthTransaction.Signed.ContractCreation => ("ContractCreation", "init", cc.init)
      }
      val df = new SimpleDateFormat(TimestampPattern)
      val ts = df.format( timestamp )
      val first  = s"${ts}:type=${ttype},nonce=${txn.nonce.widen},gasPrice=${txn.gasPrice.widen},gasLimit=${txn.gasLimit.widen},value=${txn.value.widen},"
      val middle = if ( payload.length > 0 ) s"${payloadKey}=${payload.hex}," else ""
      val last   = s"v=${txn.v.widen},r=${txn.r.widen},s=${txn.s.widen},transactionHash=${transactionHash.bytes.hex}"
      first + middle + last
    }
  }

  def logTransaction( transaction : EthTransaction.Signed, transactionHash : EthHash ) : Unit = {
    File.flatMap { file =>
      Failable {
        val entry = TransactionLog.Entry( new Date(), transaction, transactionHash ) 
        borrow( new PrintWriter( new OutputStreamWriter( new BufferedOutputStream( new FileOutputStream( file, true ) ), Codec.UTF8.charSet ) ) )( _.println( entry ) )
      }
    }.get // Unit or vomit Exception
  }
} 
开发者ID:swaldman,项目名称:sbt-ethereum,代码行数:45,代码来源:TransactionLog.scala


示例17: json4sSerializer

//设置package包名称以及导入依赖的类
package com.ovoenergy.kafka.serialization.json4s

import java.io.{ByteArrayInputStream, ByteArrayOutputStream, InputStreamReader, OutputStreamWriter}
import java.nio.charset.StandardCharsets

import com.ovoenergy.kafka.serialization.core._
import org.apache.kafka.common.serialization.{Deserializer => KafkaDeserializer, Serializer => KafkaSerializer}
import org.json4s.Formats
import org.json4s.native.Serialization.{read, write}

import scala.reflect.ClassTag
import scala.reflect.runtime.universe._

trait Json4sSerialization {

  def json4sSerializer[T <: AnyRef](implicit jsonFormats: Formats): KafkaSerializer[T] = serializer { (_, data) =>
    val bout = new ByteArrayOutputStream()
    val writer = new OutputStreamWriter(bout, StandardCharsets.UTF_8)

    // TODO Use scala-arm
    try {
      write(data, writer)
      writer.flush()
    } finally {
      writer.close()
    }
    bout.toByteArray
  }

  def json4sDeserializer[T: TypeTag](implicit jsonFormats: Formats): KafkaDeserializer[T] = deserializer { (_, data) =>
    val tt = implicitly[TypeTag[T]]
    implicit val cl = ClassTag[T](tt.mirror.runtimeClass(tt.tpe))
    read[T](new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))
  }

} 
开发者ID:ovotech,项目名称:kafka-serialization,代码行数:37,代码来源:Json4sSerialization.scala


示例18: spraySerializer

//设置package包名称以及导入依赖的类
package com.ovoenergy.kafka.serialization.spray

import java.io.{ByteArrayOutputStream, OutputStreamWriter}
import java.nio.charset.StandardCharsets

import org.apache.kafka.common.serialization.{Deserializer => KafkaDeserializer, Serializer => KafkaSerializer}
import spray.json._
import com.ovoenergy.kafka.serialization.core._

trait SpraySerialization {

  def spraySerializer[T](implicit format: JsonWriter[T]): KafkaSerializer[T] = serializer { (_, data) =>
    val bout = new ByteArrayOutputStream()
    val osw = new OutputStreamWriter(bout, StandardCharsets.UTF_8)

    // TODO use scala-arm
    try {
      osw.write(data.toJson.compactPrint)
      osw.flush()
    } finally {
      osw.close()
    }
    bout.toByteArray
  }

  def sprayDeserializer[T](implicit format: JsonReader[T]): KafkaDeserializer[T] = deserializer { (_, data) =>
    JsonParser(ParserInput(data)).convertTo[T]
  }

} 
开发者ID:ovotech,项目名称:kafka-serialization,代码行数:31,代码来源:SpraySerialization.scala



注:本文中的java.io.OutputStreamWriter类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Scala ScalatestRouteTest类代码示例发布时间:2022-05-23
下一篇:
Scala routesGenerator类代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap