本文整理汇总了Scala中java.io.InputStream类的典型用法代码示例。如果您正苦于以下问题:Scala InputStream类的具体用法?Scala InputStream怎么用?Scala InputStream使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了InputStream类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。
示例1: parse
//设置package包名称以及导入依赖的类
package parsers
import java.io.{InputStream, InputStreamReader}
import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Path, Paths}
import javax.script.ScriptEngineManager
import com.google.common.base.Charsets
import com.google.common.io.CharStreams
import org.luaj.vm2.{LuaTable, LuaValue}
import scala.collection.breakOut
import scala.io.Source
trait FactorioParser[T] {
import FactorioParser._
def parse(path: String): Seq[T] = commonParse(readAll(path))
def parse(path: Path): Seq[T] = commonParse(readAll(path))
def parse(is: InputStream): Seq[T] = {
val str = CharStreams.toString(new InputStreamReader(is, Charsets.UTF_8))
commonParse(str)
}
def transport(table: LuaTable): Option[T]
private[this] def commonParse(target: String): Seq[T] = {
val dataLua = Source.fromURL(getClass.getResource("/data.lua")).mkString
val lua = dataLua + target
val engine = manager.getEngineByName("luaj")
engine.eval(lua)
val array: LuaTable = engine.get("array").asInstanceOf[LuaTable]
tableToSeq(array)(_.checktable()).flatMap(transport)
}
}
object FactorioParser {
private val manager = new ScriptEngineManager()
def readAll(path: String): String = readAll(Paths.get(path))
def readAll(path: Path): String =
new String(Files.readAllBytes(path), StandardCharsets.UTF_8)
def tableToSeq[T](table: LuaTable)(f: LuaValue => T): Seq[T] = {
table.keys().map(table.get).map(f)(breakOut)
}
def tableToMap[K, V](table: LuaTable)(f: LuaValue => K)(g: LuaValue => V): Map[K, V] = {
table.keys().map { key =>
f(key) -> g(table.get(key))
}(breakOut)
}
}
开发者ID:ponkotuy,项目名称:FactorioRecipe,代码行数:57,代码来源:FactorioParser.scala
示例2: StorageLocation
//设置package包名称以及导入依赖的类
package hu.blackbelt.cd.bintray.deploy
import java.io.InputStream
import java.nio.file.Path
import com.typesafe.scalalogging.LazyLogging
case class StorageLocation(bucket: String, key: String) {
override def toString = s"s3://$bucket/$key"
}
case class Project(location: StorageLocation, name: String, version: String)
class Deploy(project: Project) extends LazyLogging {
logger.info("collecting access properties")
Access.collect
logger.info("access info in possession")
def fetch = S3Get.get(project.location.bucket, project.location.key) _
private def selectArt(art: Art)(selector: Art => Path) = {
val subject = selector(art)
val key = s"${art.groupId.replace('.', '/')}/${art.artifactId}/${art.version}/${subject.getFileName}"
(key, subject)
}
def upload(archive: InputStream, batchSize: Int = 30) = {
val artifacts = TarGzExtract.getArtifacts(archive)
val batches = artifacts.sliding(batchSize, batchSize).map(arts => {
val mapped = arts.flatMap { art =>
val select = selectArt(art) _
List(select(_.artifact), select(_.pomFile))
}
Batch(mapped)
}
).toList
val b = new Btray
val ver = b.version("releases", project.name, project.version)
ver.map(
b.uploadTo(_, batches)
)
}
}
开发者ID:tsechov,项目名称:s3-bintray-deploy,代码行数:49,代码来源:Deploy.scala
示例3: CommandReader
//设置package包名称以及导入依赖的类
package pubsub.command
import java.io.BufferedReader
import java.io.InputStreamReader
import java.io.InputStream
import pubsub.Client
class CommandReader(inStream: InputStream, client: Client) {
val inputBuffer = new BufferedReader(new InputStreamReader(inStream))
def fetchCommand(): Command = {
val line = inputBuffer.readLine()
if (line == null || line.startsWith("leave")) {
EndOfClient(client)
}
else {
val quoteIndex = line.indexOf('\'')
val hasPayload = quoteIndex != -1
val parts =
if(!hasPayload) {
line.split(" ").toList
} else {
val (command, payload) = line.splitAt(quoteIndex)
command.split(" ").toList :+ payload
}
parts match {
case "subscribe" :: topic :: Nil => Subscribe(topic, client)
case "unsubscribe" :: topic :: Nil => Unsubscribe(topic, client)
case "rename" :: newName :: Nil => Rename(newName, client)
case "publish" :: topic :: msg :: Nil if hasPayload && msg != "\'" =>
var message = msg
while(!message.endsWith("\'")) {
message += "\n" + inputBuffer.readLine()
}
Publish(topic, message, client)
case _ => MalformedCommand(client)
}
}
}
}
开发者ID:vincenzobaz,项目名称:Parallelism-and-Concurrency-Assignments,代码行数:46,代码来源:CommandReader.scala
示例4: Codec
//设置package包名称以及导入依赖的类
package at.hazm.quebic
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, InputStream, OutputStream}
import java.util.zip.{GZIPInputStream, GZIPOutputStream}
import scala.annotation.tailrec
sealed abstract class Codec(val id:Byte, val name:String) extends Type {
def encode(buffer:Array[Byte]):Array[Byte]
def decode(buffer:Array[Byte]):Array[Byte]
}
object Codec {
val values:Seq[Codec] = Seq(PLAIN, GZIP)
private[this] val valuesMap = values.groupBy(_.id).mapValues(_.head)
def valueOf(id:Byte):Codec = valuesMap(id)
case object PLAIN extends Codec(0, "plain") {
def encode(buffer:Array[Byte]):Array[Byte] = buffer
def decode(buffer:Array[Byte]):Array[Byte] = buffer
}
case object GZIP extends Codec(1, "gzip") {
def encode(buffer:Array[Byte]):Array[Byte] = {
val baos = new ByteArrayOutputStream()
val out = new GZIPOutputStream(baos)
out.write(buffer)
out.finish()
out.finish()
baos.toByteArray
}
def decode(buffer:Array[Byte]):Array[Byte] = {
val in = new GZIPInputStream(new ByteArrayInputStream(buffer))
val out = new ByteArrayOutputStream()
_copy(in, out, new Array[Byte](2014))
out.close()
out.toByteArray
}
}
@tailrec
private[Codec] def _copy(in:InputStream, out:OutputStream, buffer:Array[Byte]):Unit = {
val len = in.read(buffer)
if(len > 0) {
out.write(buffer, 0, len)
_copy(in, out, buffer)
}
}
}
开发者ID:torao,项目名称:quebic,代码行数:55,代码来源:Codec.scala
示例5: readLine
//设置package包名称以及导入依赖的类
package org.edoardo.parser
import java.io.InputStream
def readLine(implicit in: InputStream): String = {
var out = ""
var b: Int = in.read
while (b != 0xA) {
out += b.toChar
b = in.read
}
out
}
}
开发者ID:EdoDodo,项目名称:rl-segmentation,代码行数:16,代码来源:Parser.scala
示例6: streamToBytes
//设置package包名称以及导入依赖的类
package akka.persistence
import java.io.{ ByteArrayOutputStream, InputStream }
package object serialization {
def streamToBytes(inputStream: InputStream): Array[Byte] = {
val len = 16384
val buf = Array.ofDim[Byte](len)
val out = new ByteArrayOutputStream
@scala.annotation.tailrec
def copy(): Array[Byte] = {
val n = inputStream.read(buf, 0, len)
if (n != -1) { out.write(buf, 0, n); copy() } else out.toByteArray
}
copy()
}
}
开发者ID:love1314sea,项目名称:akka-2.3.16,代码行数:21,代码来源:package.scala
示例7: MessageScalaTest
//设置package包名称以及导入依赖的类
package akka.camel
import java.io.InputStream
import org.apache.camel.NoTypeConversionAvailableException
import akka.camel.TestSupport.{ SharedCamelSystem }
import org.scalatest.FunSuite
import org.scalatest.Matchers
import org.apache.camel.converter.stream.InputStreamCache
class MessageScalaTest extends FunSuite with Matchers with SharedCamelSystem {
implicit def camelContext = camel.context
test("mustConvertDoubleBodyToString") {
CamelMessage(1.4, Map.empty).bodyAs[String] should be("1.4")
}
test("mustThrowExceptionWhenConvertingDoubleBodyToInputStream") {
intercept[NoTypeConversionAvailableException] {
CamelMessage(1.4, Map.empty).bodyAs[InputStream]
}
}
test("mustConvertDoubleHeaderToString") {
val message = CamelMessage("test", Map("test" -> 1.4))
message.headerAs[String]("test").get should be("1.4")
}
test("mustReturnSubsetOfHeaders") {
val message = CamelMessage("test", Map("A" -> "1", "B" -> "2"))
message.headers(Set("B")) should be(Map("B" -> "2"))
}
test("mustTransformBodyAndPreserveHeaders") {
CamelMessage("a", Map("A" -> "1")).mapBody((body: String) ? body + "b") should be(CamelMessage("ab", Map("A" -> "1")))
}
test("mustConvertBodyAndPreserveHeaders") {
CamelMessage(1.4, Map("A" -> "1")).withBodyAs[String] should be(CamelMessage("1.4", Map("A" -> "1")))
}
test("mustSetBodyAndPreserveHeaders") {
CamelMessage("test1", Map("A" -> "1")).copy(body = "test2") should be(
CamelMessage("test2", Map("A" -> "1")))
}
test("mustSetHeadersAndPreserveBody") {
CamelMessage("test1", Map("A" -> "1")).copy(headers = Map("C" -> "3")) should be(
CamelMessage("test1", Map("C" -> "3")))
}
test("mustBeAbleToReReadStreamCacheBody") {
val msg = CamelMessage(new InputStreamCache("test1".getBytes("utf-8")), Map.empty)
msg.bodyAs[String] should be("test1")
// re-read
msg.bodyAs[String] should be("test1")
}
}
开发者ID:love1314sea,项目名称:akka-2.3.16,代码行数:57,代码来源:MessageScalaTest.scala
示例8: CaseInsensitiveInputStream
//设置package包名称以及导入依赖的类
package io.github.nawforce.apexlink.utils
import java.io.{InputStream, InputStreamReader, Reader}
import org.antlr.v4.runtime.{ANTLRInputStream, IntStream}
class CaseInsensitiveInputStream(r: Reader, initialSize: Int, readChunkSize: Int)
extends ANTLRInputStream(r, initialSize, readChunkSize) {
//lazy is important here because need initiated data[], which is loaded in super class
private lazy val lowercaseData: Array[Char] = data.map(_.toLower)
def this(r: Reader) {
this(r, initialSize = 1024, readChunkSize = 1024)
}
def this(input: InputStream) {
this(new InputStreamReader(input), initialSize = 1024, readChunkSize = 1024)
}
override def LA(index: Int): Int = {
var i = index
if (i == 0) {
return 0
}
if (i < 0) {
i += 1
if ((p + i - 1) < 0) {
return IntStream.EOF
}
}
if ((p + i - 1) >= n) {
return IntStream.EOF
}
if (null != lowercaseData) {
lowercaseData(p + i - 1)
} else {
data(p + i - 1).toLower
}
}
def dump(): Unit = {
var i = 0
var value = 0
do {
value = LA(i)
i += 1
print(value.asInstanceOf[Char])
} while (value != IntStream.EOF)
}
}
开发者ID:nawforce,项目名称:ApexLink,代码行数:55,代码来源:CaseInsensitiveInputStream.scala
示例9: ConcreteFilesystem
//设置package包名称以及导入依赖的类
package eu.tznvy.jancy.transpiler.helpers
import java.io.InputStream
import java.nio.file.{Files, Path}
import scala.collection.JavaConverters._
import scala.util.Try
class ConcreteFilesystem extends Filesystem {
override def createDirectories(path: Path): Unit =
Files.createDirectories(path)
override def writeFile(path: Path, content: String): Unit = {
Files.createDirectories(path.getParent)
Files.write(path, content.getBytes)
}
override def readFile(path: Path): Option[String] =
Try { Files.readAllLines(path) }
.map(_.asScala.mkString("\n"))
.toOption
override def testPath(path: Path): Boolean =
Files.exists(path)
override def copy(from: InputStream, to: Path): Unit =
Files.copy(from, to)
}
开发者ID:brthanmathwoag,项目名称:jancy,代码行数:30,代码来源:ConcreteFilesystem.scala
示例10: EntityDataFilter
//设置package包名称以及导入依赖的类
package org.dele.misc
import java.io.InputStream
import org.apache.commons.io.IOUtils
import org.dele.misc.EntityData.EntDetail
object EntityDataFilter extends App {
import tgz.TgzUtil._
val defaultEncoding = "UTF-8"
def extractOne(in:InputStream):Map[String,EntDetail] = {
val instr = IOUtils.toString(in, defaultEncoding)
val entData = EntityData.Ser.p(instr)
entData.entity_details.entMap.filter(_._2.curated == 1)
}
def extract(path:String):Map[String, EntDetail] = processAllFiles(path, extractOne).reduce(_ ++ _)
private val datePartLength = 10
def processGroupByDate(em:Map[String,EntDetail], days:Int) = {
val dateGroups = em.groupBy(_._2.created.map(_.substring(0,datePartLength)))
val sortedGroups = dateGroups.toIndexedSeq.sortBy(_._1)(Ordering[Option[String]].reverse).take(days)
sortedGroups.foreach{ g =>
println(s"${g._1} (${g._2.size})")
val sorted = sortByCreatedDesc(g._2.values.toSeq)
sorted.foreach(e => println(s"\t$e"))
}
}
def sortByCreatedDesc(seq:Seq[EntDetail]):Seq[EntDetail] = seq.sortBy(_.created)(Ordering[Option[String]].reverse)
def processBatch(em:Map[String,EntDetail], tag:String, latestCount:Int) = {
val checkedEntities = em.toList.filter(_._2.curated == 1).toMap
println("=====================================================\n")
println(s"\n\n================== batch tag $tag ===================\n\n")
println("=====================================================\n")
println(s"Checked entity count: ${checkedEntities.size}")
//val checkedByDate = checkedEntities.sortBy(_._2.created)(Ordering[Option[String]].reverse).take(20)
processGroupByDate(checkedEntities, latestCount)
//val uncheckedByDate = em.toIndexedSeq.sortBy(_._2.created)(Ordering[Option[String]].reverse).take(30)
//println(checkedByDate.map(_._2).mkString("\n"))
println("\n\n=====================================================\n\n")
//println(uncheckedByDate.map(_._2).mkString("\n"))
processGroupByDate(em, latestCount)
}
def checked(path:String) = {
val entMap = extract(path)
println(entMap.keys.mkString("[\"", "\", \"", "\"]"))
}
checked(
"E:\\VMShare\\facility-161129-21.tgz"
)
}
开发者ID:new2scala,项目名称:text-util,代码行数:60,代码来源:EntityDataFilter.scala
示例11: getFileStream
//设置package包名称以及导入依赖的类
package com.danylchuk.swiftlearner.hotels
import java.io.{BufferedInputStream, InputStream}
import java.util.zip.GZIPInputStream
import scala.collection.mutable.{Map => MutableMap}
import scala.io.Source
private lazy val testDataIdMapped: Vector[SearchRecord] = {
testDataTyped.map { record =>
val userCity = cityIds.getOrElse(record.userCity, 0)
val dest = destIds.getOrElse(record.dest, 0)
SearchRecord(userCity, record.distance, dest)
}.toVector
}
private lazy val trainDataTyped: Iterator[SearchRecord] = readData(trainDataFile)
private lazy val testDataTyped: Iterator[SearchRecord] = readData(testDataFile)
private lazy val trainDataFile = getFileStream("train-data.csv.gz")
private lazy val trainLabelsFile = getFileStream("train-labels.csv.gz")
private lazy val testDataFile = getFileStream("test-data.csv.gz")
private lazy val testLabelsFile = getFileStream("test-labels.csv.gz")
private def getFileStream(name: String): InputStream = {
new BufferedInputStream(new GZIPInputStream(
this.getClass.getClassLoader.getResourceAsStream(name)))
}
private def readLabels(stream: InputStream): Iterator[Int] =
Source.fromInputStream(stream, "UTF8").getLines.map(_.toInt)
private def readData(stream: InputStream): Iterator[SearchRecord] = {
Source.fromInputStream(stream, "UTF8").getLines.map(SearchRecord.fromString)
}
}
case class SearchRecord(userCity: Int, distance: Double, dest: Int)
object SearchRecord {
def fromString(s: String) = {
val fields = s.split(',')
val userCity = fields(0).toInt
val distance = fields(1).toDouble
val dest = fields(2).toInt
SearchRecord(userCity, distance, dest)
}
}
开发者ID:valdanylchuk,项目名称:swiftlearner,代码行数:51,代码来源:SearchData.scala
示例12: createBucket
//设置package包名称以及导入依赖的类
package akka.persistence.s3
import java.io.InputStream
import com.amazonaws.auth.{ BasicAWSCredentials, DefaultAWSCredentialsProviderChain }
import com.amazonaws.services.s3.{ S3ClientOptions, AmazonS3Client }
import com.amazonaws.services.s3.model._
import scala.concurrent.{ Future, ExecutionContext }
trait S3Client {
val s3ClientConfig: S3ClientConfig
lazy val client: AmazonS3Client = {
val client =
if (s3ClientConfig.awsUseDefaultCredentialsProviderChain)
new AmazonS3Client(new DefaultAWSCredentialsProviderChain).withRegion(s3ClientConfig.region)
else
new AmazonS3Client(new BasicAWSCredentials(s3ClientConfig.awsKey, s3ClientConfig.awsSecret))
s3ClientConfig.endpoint.foreach { endpoint =>
client.withEndpoint(endpoint)
()
}
client.setS3ClientOptions(new S3ClientOptions()
.withPathStyleAccess(s3ClientConfig.options.pathStyleAccess)
.withChunkedEncodingDisabled(s3ClientConfig.options.chunkedEncodingDisabled))
client
}
def createBucket(bucketName: String)(implicit ec: ExecutionContext): Future[Bucket] = Future {
client.createBucket(bucketName)
}
def deleteBucket(bucketName: String)(implicit ec: ExecutionContext): Future[Unit] = Future {
client.deleteBucket(bucketName)
}
def putObject(bucketName: String, key: String, input: InputStream, metadata: ObjectMetadata)(implicit ec: ExecutionContext): Future[PutObjectResult] = Future {
client.putObject(new PutObjectRequest(bucketName, key, input, metadata))
}
def getObject(bucketName: String, key: String)(implicit ec: ExecutionContext): Future[S3Object] = Future {
client.getObject(new GetObjectRequest(bucketName, key))
}
def listObjects(request: ListObjectsRequest)(implicit ec: ExecutionContext): Future[ObjectListing] = Future {
client.listObjects(request)
}
def deleteObject(bucketName: String, key: String)(implicit ec: ExecutionContext): Future[Unit] = Future {
client.deleteObject(bucketName, key)
}
def deleteObjects(request: DeleteObjectsRequest)(implicit ec: ExecutionContext): Future[Unit] = Future {
client.deleteObjects(request)
}
}
开发者ID:TanUkkii007,项目名称:akka-persistence-s3,代码行数:59,代码来源:S3Client.scala
示例13: Application
//设置package包名称以及导入依赖的类
package controllers
import java.io.{IOException, InputStream}
import models.TheWord
import play.api._
import play.api.mvc._
import scala.collection.mutable
object Application extends Controller {
implicit val app = Play.current
def index = Action {
val real = getThatText("sample1.txt")
val result = doTheJobFun {
mutable.Seq(real: _*)
}
Ok(views.html.index(real.mkString("\n"), result))
}
def test = Action {
Ok(getThatText("sample1.txt").mkString("\n"))
}
def load(filePath: String): InputStream = {
Play.resourceAsStream("public/inputs/" concat filePath).getOrElse(throw new IOException("file not found: " + filePath))
}
def getThatText(fileName: String) = {
// val source = scala.io.Source.fromFile(app.getFile(fileName))("UTF-8")
val source = scala.io.Source.fromInputStream(load(fileName))("UTF-8")
try source.getLines().toList
catch {
case e: Exception => e.printStackTrace(); null
}
finally source.close()
}
val doTheJobFun = (text: mutable.Seq[String]) => {
text.flatMap(_.split("[.?!:]"))
.map(_.split("\\s+").find(_.nonEmpty).getOrElse(""))
.filter(_.matches("[a-zA-Z].*"))
.filter(!_.equals(""))
.map { p => TheWord(p, p.reverse) }
.toList
}
}
开发者ID:muhrifqii,项目名称:learn-to-play-scala,代码行数:52,代码来源:Application.scala
示例14: PelagiosRDFCrosswalk
//设置package包名称以及导入依赖的类
package models.place.crosswalks
import java.io.InputStream
import models.place._
import org.joda.time.{ DateTime, DateTimeZone }
import org.pelagios.Scalagios
import org.pelagios.api.PeriodOfTime
import java.io.File
import java.io.FileInputStream
object PelagiosRDFCrosswalk {
private def convertPeriodOfTime(period: PeriodOfTime): TemporalBounds = {
val startDate = period.start
val endDate = period.end.getOrElse(startDate)
TemporalBounds(
new DateTime(startDate).withZone(DateTimeZone.UTC),
new DateTime(endDate).withZone(DateTimeZone.UTC))
}
def fromRDF(filename: String): InputStream => Seq[GazetteerRecord] = {
val sourceGazetteer = Gazetteer(filename.substring(0, filename.indexOf('.')))
def convertPlace(place: org.pelagios.api.gazetteer.Place): GazetteerRecord =
GazetteerRecord(
GazetteerRecord.normalizeURI(place.uri),
sourceGazetteer,
DateTime.now().withZone(DateTimeZone.UTC),
None,
place.label,
place.descriptions.map(l => Description(l.chars, l.lang)),
place.names.map(l => Name(l.chars, l.lang)),
place.location.map(_.geometry),
place.location.map(_.pointLocation),
place.temporalCoverage.map(convertPeriodOfTime(_)),
place.category.map(category => Seq(category.toString)).getOrElse(Seq.empty[String]),
None, // country code
None, // population
place.closeMatches.map(GazetteerRecord.normalizeURI(_)),
place.exactMatches.map(GazetteerRecord.normalizeURI(_)))
// Return crosswalk function
{ stream: InputStream =>
Scalagios.readPlaces(stream, filename).map(convertPlace).toSeq }
}
def readFile(file: File): Seq[GazetteerRecord] =
fromRDF(file.getName)(new FileInputStream(file))
}
开发者ID:pelagios,项目名称:recogito2,代码行数:53,代码来源:PelagiosRDFCrosswalk.scala
示例15: DumpImporter
//设置package包名称以及导入依赖的类
package controllers.admin.gazetteers
import java.io.{ InputStream, File, FileInputStream }
import java.util.zip.GZIPInputStream
import models.place.{ GazetteerRecord, PlaceService }
import play.api.Logger
import scala.concurrent.{ Await, ExecutionContext }
import scala.concurrent.duration._
class DumpImporter {
private def getStream(file: File, filename: String) =
if (filename.endsWith(".gz"))
new GZIPInputStream(new FileInputStream(file))
else
new FileInputStream(file)
def importDump(file: File, filename: String, crosswalk: InputStream => Seq[GazetteerRecord])(implicit places: PlaceService, ctx: ExecutionContext) = {
val records = crosswalk(getStream(file, filename))
Logger.info("Importing " + records.size + " records")
Await.result(places.importRecords(records), 60.minute)
}
}
开发者ID:pelagios,项目名称:recogito2,代码行数:25,代码来源:DumpImporter.scala
示例16: Response
//设置package包名称以及导入依赖的类
package korolev.server
import java.io.{ByteArrayInputStream, InputStream}
import java.nio.charset.StandardCharsets
sealed trait Response
object Response {
case class Http(status: Status,
body: Option[InputStream] = None,
headers: Seq[(String, String)] = Seq.empty)
extends Response
object Http {
def apply(status: Status, message: String): Http = {
val bytes = message.getBytes(StandardCharsets.UTF_8)
val body = new ByteArrayInputStream(bytes)
Http(status, Some(body))
}
}
case class WebSocket(publish: String => Unit,
subscribe: (String => Unit) => Unit,
destroyHandler: () => Unit)
extends Response
sealed trait Status {
def code: Int
def phrase: String
}
object Status {
case object Ok extends Status {
val code = 200
val phrase = "OK"
}
case object BadRequest extends Status {
val code = 400
val phrase = "Bad Request"
}
case object Gone extends Status {
val code = 410
val phrase = "Gone"
}
}
}
开发者ID:techyogillc,项目名称:ServerSideScalaCode,代码行数:49,代码来源:Response.scala
示例17: JavaDecoder
//设置package包名称以及导入依赖的类
package knot.data.serialization.j
import java.io.{ByteArrayInputStream, InputStream, ObjectInputStream}
import knot.data.serialization.{Decoder, Deserializer, DeserializerFactory}
import scala.reflect.runtime.universe
class JavaDecoder(in: InputStream) extends ObjectInputStream(in) with Decoder{
}
class JavaDeserializerFactory extends DeserializerFactory[JavaDecoder] {
override def get[T: universe.TypeTag](): Deserializer[JavaDecoder, T] = {
new Deserializer[JavaDecoder, T] {
override def deserialize(bytes: Array[Byte]): T = {
val jd = new JavaDecoder(new ByteArrayInputStream(bytes))
try {
deserialize(jd)
} finally {
jd.close()
}
}
override def deserialize(decoder: JavaDecoder): T = {
decoder.readObject().asInstanceOf[T]
}
}
}
override def register[T: universe.TypeTag](): Unit = {
}
override def register[T: universe.TypeTag](deser: Deserializer[JavaDecoder, T]): Unit = {
}
}
开发者ID:defvar,项目名称:knot,代码行数:37,代码来源:JavaDeserializerFactory.scala
示例18: AmandroidSettings
//设置package包名称以及导入依赖的类
package org.argus.amandroid.core
import java.io.{File, FileInputStream, InputStream}
import org.ini4j.Wini
import org.argus.jawa.core.util.FileUtil
class AmandroidSettings(amandroid_home: String, iniPathOpt: Option[String]) {
private val amandroid_home_uri = FileUtil.toUri(amandroid_home)
private def defaultLibFiles =
amandroid_home + "/androidSdk/android-25/android.jar" + java.io.File.pathSeparator +
amandroid_home + "/androidSdk/support/v4/android-support-v4.jar" + java.io.File.pathSeparator +
amandroid_home + "/androidSdk/support/v13/android-support-v13.jar" + java.io.File.pathSeparator +
amandroid_home + "/androidSdk/support/v7/android-support-v7-appcompat.jar"
private def defaultThirdPartyLibFile = amandroid_home + "/liblist.txt"
private val iniUri = {
iniPathOpt match {
case Some(path) => FileUtil.toUri(path)
case None => FileUtil.appendFileName(amandroid_home_uri, "config.ini")
}
}
private val ini = new Wini(FileUtil.toFile(iniUri))
def timeout: Int = Option(ini.get("analysis", "timeout", classOf[Int])).getOrElse(5)
def dependence_dir: Option[String] = Option(ini.get("general", "dependence_dir", classOf[String]))
def debug: Boolean = ini.get("general", "debug", classOf[Boolean])
def lib_files: String = Option(ini.get("general", "lib_files", classOf[String])).getOrElse(defaultLibFiles)
def third_party_lib_file: String = Option(ini.get("general", "third_party_lib_file", classOf[String])).getOrElse(defaultThirdPartyLibFile)
def actor_conf_file: InputStream = Option(ini.get("concurrent", "actor_conf_file", classOf[String])) match {
case Some(path) => new FileInputStream(path)
case None => getClass.getResourceAsStream("/application.conf")
}
def static_init: Boolean = ini.get("analysis", "static_init", classOf[Boolean])
def parallel: Boolean = ini.get("analysis", "parallel", classOf[Boolean])
def k_context: Int = ini.get("analysis", "k_context", classOf[Int])
def sas_file: String = Option(ini.get("analysis", "sas_file", classOf[String])).getOrElse(amandroid_home + File.separator + "taintAnalysis" + File.separator + "sourceAndSinks" + File.separator + "TaintSourcesAndSinks.txt")
def injection_sas_file: String = Option(ini.get("analysis", "injection_sas_file", classOf[String])).getOrElse(amandroid_home + File.separator + "taintAnalysis" + File.separator + "sourceAndSinks" + File.separator + "IntentInjectionSourcesAndSinks.txt")
}
开发者ID:arguslab,项目名称:Argus-SAF,代码行数:39,代码来源:AmandroidSettings.scala
示例19: ZipUtil
//设置package包名称以及导入依赖的类
package org.argus.jawa.core.util
import java.io.{File, FileOutputStream, InputStream, OutputStream}
import java.util.zip.{ZipEntry, ZipFile}
import scala.collection.JavaConverters._
object ZipUtil {
val BUFSIZE = 4096
val buffer = new Array[Byte](BUFSIZE)
def unZip(source: String, targetFolder: String): Boolean = {
val zipFile = new ZipFile(source)
unzipAllFile(zipFile.entries.asScala.toList, getZipEntryInputStream(zipFile), new File(targetFolder))
}
def getZipEntryInputStream(zipFile: ZipFile)(entry: ZipEntry): InputStream = zipFile.getInputStream(entry)
def unzipAllFile(entryList: List[ZipEntry], inputGetter: (ZipEntry) => InputStream, targetFolder: File): Boolean = {
entryList match {
case entry :: entries =>
if (entry.isDirectory)
new File(targetFolder, entry.getName).mkdirs
else
saveFile(inputGetter(entry), new FileOutputStream(new File(targetFolder, entry.getName)))
unzipAllFile(entries, inputGetter, targetFolder)
case _ =>
true
}
}
def saveFile(fis: InputStream, fos: OutputStream): Unit = {
writeToFile(bufferReader(fis), fos)
fis.close()
fos.close()
}
def bufferReader(fis: InputStream)(buffer: Array[Byte]): (Int, Array[Byte]) = (fis.read(buffer), buffer)
def writeToFile(reader: (Array[Byte]) => ((Int, Array[Byte])), fos: OutputStream): Boolean = {
val (length, data) = reader(buffer)
if (length >= 0) {
fos.write(data, 0, length)
writeToFile(reader, fos)
} else
true
}
}
开发者ID:arguslab,项目名称:Argus-SAF,代码行数:55,代码来源:ZipUtil.scala
示例20: read
//设置package包名称以及导入依赖的类
package chapter10
import java.io.{BufferedInputStream, FileInputStream, InputStream}
trait BufferedInput {
this: InputStream =>
val size: Int = 100
val buffer = new BufferedInputStream(this, size)
override def read(): Int = buffer.read
}
object Test08 extends App {
val txtFile = getClass.getResource("/sample.txt")
val file = new FileInputStream(new java.io.File(txtFile.getPath)) with BufferedInput
var byte = file.read()
while (byte != -1){
print(byte.toChar)
byte = file.read()
}
// print(Iterator.continually(file.read).takeWhile(_ != -1).map(_.toChar).mkString)
}
开发者ID:johnnyqian,项目名称:scala-for-the-impatient,代码行数:25,代码来源:08.scala
注:本文中的java.io.InputStream类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论