本文整理汇总了Scala中java.util.concurrent.TimeUnit类的典型用法代码示例。如果您正苦于以下问题:Scala TimeUnit类的具体用法?Scala TimeUnit怎么用?Scala TimeUnit使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TimeUnit类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。
示例1: Metrics
//设置package包名称以及导入依赖的类
package com.onur.moviedb.metric
import java.lang.management.ManagementFactory
import java.util.concurrent.TimeUnit
import com.codahale.metrics.health.HealthCheckRegistry
import com.codahale.metrics.{JmxReporter, MetricRegistry, Slf4jReporter}
import com.codahale.metrics.jvm.{BufferPoolMetricSet, GarbageCollectorMetricSet, MemoryUsageGaugeSet, ThreadStatesGaugeSet}
import org.slf4j.LoggerFactory
object Metrics {
lazy val metricRegistry: MetricRegistry = {
val metricRegistry: MetricRegistry = new MetricRegistry
Slf4jReporter.forRegistry(metricRegistry)
.outputTo(LoggerFactory.getLogger("metrics"))
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build
.start(30, TimeUnit.SECONDS)
JmxReporter.forRegistry(metricRegistry).build.start()
metricRegistry.register("jvm-gc", new GarbageCollectorMetricSet)
metricRegistry.register("jvm-buffer", new BufferPoolMetricSet(ManagementFactory.getPlatformMBeanServer))
metricRegistry.register("jvm-memory", new MemoryUsageGaugeSet)
metricRegistry.register("jvm-threads", new ThreadStatesGaugeSet)
metricRegistry
}
lazy val healthCheckRegistry = new HealthCheckRegistry
}
开发者ID:onurzdg,项目名称:movie-db,代码行数:31,代码来源:Metrics.scala
示例2: IdJwtHandler
//设置package包名称以及导入依赖的类
package io.scalac.frees.login.handlers.id
import java.security.{PrivateKey, PublicKey}
import java.util.concurrent.TimeUnit
import cats.Id
import io.scalac.frees.login.algebras.{Claims, JwtService}
import io.scalac.frees.login.types.{JWT, UserId}
import pdi.jwt.{Jwt, JwtAlgorithm, JwtCirce, JwtClaim}
import scala.concurrent.duration.FiniteDuration
import scala.util.Try
class IdJwtHandler(
pubKey: PublicKey,
privKey: PrivateKey
) extends JwtService.Handler[Id] {
val twoDays = FiniteDuration(2, TimeUnit.DAYS).toSeconds
val algo = JwtAlgorithm.ES512
override def issue(id: UserId): Id[JWT] = {
val claim = JwtClaim()
.about(id.toString)
.issuedNow
.expiresIn(twoDays)
Jwt.encode(claim, privKey, algo)
}
override def validate(jwt: JWT): Id[Option[Claims]] = {
JwtCirce.decode(jwt, pubKey, Seq(algo)).toOption.flatMap { c =>
for {
userId <- c.subject.flatMap(s => Try(s.toLong).toOption)
expiration <- c.expiration.filter(_ > currentTimeSeconds)
issuedAt <- c.issuedAt.filter(_ <= System.currentTimeMillis())
} yield Claims(userId, issuedAt, expiration)
}
}
private def currentTimeSeconds: Long = System.currentTimeMillis() / 1000
}
开发者ID:LGLO,项目名称:freestyle-login,代码行数:44,代码来源:IdJwtHandler.scala
示例3: PinnedActorSpec
//设置package包名称以及导入依赖的类
package akka.actor.dispatch
import java.util.concurrent.{ CountDownLatch, TimeUnit }
import akka.testkit._
import akka.actor.{ Props, Actor }
import akka.testkit.AkkaSpec
import org.scalatest.BeforeAndAfterEach
import akka.dispatch.{ PinnedDispatcher, Dispatchers }
import scala.concurrent.Await
import akka.pattern.ask
object PinnedActorSpec {
val config = """
pinned-dispatcher {
executor = thread-pool-executor
type = PinnedDispatcher
}
"""
class TestActor extends Actor {
def receive = {
case "Hello" ? sender() ! "World"
case "Failure" ? throw new RuntimeException("Expected exception; to test fault-tolerance")
}
}
}
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class PinnedActorSpec extends AkkaSpec(PinnedActorSpec.config) with BeforeAndAfterEach with DefaultTimeout {
import PinnedActorSpec._
private val unit = TimeUnit.MILLISECONDS
"A PinnedActor" must {
"support tell" in {
var oneWay = new CountDownLatch(1)
val actor = system.actorOf(Props(new Actor { def receive = { case "OneWay" ? oneWay.countDown() } }).withDispatcher("pinned-dispatcher"))
val result = actor ! "OneWay"
assert(oneWay.await(1, TimeUnit.SECONDS))
system.stop(actor)
}
"support ask/reply" in {
val actor = system.actorOf(Props[TestActor].withDispatcher("pinned-dispatcher"))
assert("World" === Await.result(actor ? "Hello", timeout.duration))
system.stop(actor)
}
}
}
开发者ID:love1314sea,项目名称:akka-2.3.16,代码行数:52,代码来源:PinnedActorSpec.scala
示例4: Application
//设置package包名称以及导入依赖的类
import java.text.SimpleDateFormat
import java.util.concurrent.TimeUnit
import java.util.{Date, Properties}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
object Application extends App {
val formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
val simIDs = 10000 to 99999 //99000
val brokers = "192.168.100.211:6667,192.168.100.212:6667,192.168.100.213:6667";
val topic = "newTest";
val props = new Properties
props.put("bootstrap.servers", brokers)
props.put("client.id", "Producer")
props.put("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer")
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
val producer = new KafkaProducer[Integer, String](props)
while (true) {
for (simID <- simIDs) {
val data = Data(
"64846867247",
"?D" + simID,
formatter.format(new Date()),
121.503,
31.3655,
78,
0,
42,
52806.7
)
// println(Data.getString(data))
producer.send(new ProducerRecord[Integer, String](topic, Data.getString(data)))
// TimeUnit.NANOSECONDS.sleep(100)
}
println("-------------------------------"+new Date())
TimeUnit.MINUTES.sleep(18)
}
}
开发者ID:qiuwsh,项目名称:dataSimulator,代码行数:43,代码来源:Application.scala
示例5: TestLatch
//设置package包名称以及导入依赖的类
package akka.testkit
import scala.concurrent.duration.Duration
import akka.actor.ActorSystem
import scala.concurrent.{ Await, CanAwait, Awaitable }
import java.util.concurrent.{ TimeoutException, CountDownLatch, TimeUnit }
import scala.concurrent.duration.FiniteDuration
object TestLatch {
val DefaultTimeout = Duration(5, TimeUnit.SECONDS)
def apply(count: Int = 1)(implicit system: ActorSystem) = new TestLatch(count)
}
class TestLatch(count: Int = 1)(implicit system: ActorSystem) extends Awaitable[Unit] {
private var latch = new CountDownLatch(count)
def countDown() = latch.countDown()
def isOpen: Boolean = latch.getCount == 0
def open() = while (!isOpen) countDown()
def reset() = latch = new CountDownLatch(count)
@throws(classOf[TimeoutException])
def ready(atMost: Duration)(implicit permit: CanAwait) = {
val waitTime = atMost match {
case f: FiniteDuration ? f
case _ ? throw new IllegalArgumentException("TestLatch does not support waiting for " + atMost)
}
val opened = latch.await(waitTime.dilated.toNanos, TimeUnit.NANOSECONDS)
if (!opened) throw new TimeoutException(
"Timeout of %s with time factor of %s" format (atMost.toString, TestKitExtension(system).TestTimeFactor))
this
}
@throws(classOf[Exception])
def result(atMost: Duration)(implicit permit: CanAwait): Unit = {
ready(atMost)
}
}
开发者ID:love1314sea,项目名称:akka-2.3.16,代码行数:40,代码来源:TestLatch.scala
示例6: ActorPathValidationBenchmark
//设置package包名称以及导入依赖的类
package akka.actor
import java.util.concurrent.TimeUnit
import org.openjdk.jmh.annotations.Benchmark
import org.openjdk.jmh.annotations.BenchmarkMode
import org.openjdk.jmh.annotations.Fork
import org.openjdk.jmh.annotations.Measurement
import org.openjdk.jmh.annotations.Mode
import org.openjdk.jmh.annotations.OutputTimeUnit
import org.openjdk.jmh.annotations.Scope
import org.openjdk.jmh.annotations.State
import org.openjdk.jmh.annotations.Warmup
@Fork(2)
@State(Scope.Benchmark)
@BenchmarkMode(Array(Mode.Throughput))
@Warmup(iterations = 5)
@Measurement(iterations = 10)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
class ActorPathValidationBenchmark {
final val a = "actor-1"
final val s = "687474703a2f2f74686566727569742e636f6d2f26683d37617165716378357926656e" * 100
final val ElementRegex = """(?:[-\w:@&=+,.!~*'_;]|%\p{XDigit}{2})(?:[-\w:@&=+,.!~*'$_;]|%\p{XDigit}{2})*""".r
// @Benchmark // blows up with stack overflow, we know
def old7000: Option[List[String]] = ElementRegex.unapplySeq(s)
@Benchmark
def handLoop7000: Boolean = ActorPath.isValidPathElement(s)
@Benchmark
def oldActor_1: Option[List[String]] = ElementRegex.unapplySeq(a)
@Benchmark
def handLoopActor_1: Boolean = ActorPath.isValidPathElement(a)
}
开发者ID:love1314sea,项目名称:akka-2.3.16,代码行数:42,代码来源:ActorPathValidationBenchmark.scala
示例7: ActorCreationBenchmark
//设置package包名称以及导入依赖的类
package akka.actor
import java.util.concurrent.TimeUnit
import org.openjdk.jmh.annotations._
@State(Scope.Benchmark)
@BenchmarkMode(Array(Mode.SingleShotTime))
@Fork(5)
@Warmup(iterations = 1000)
@Measurement(iterations = 4000)
class ActorCreationBenchmark {
implicit val system: ActorSystem = ActorSystem()
final val props = Props[MyActor]
var i = 1
def name = {
i += 1
"some-rather-long-actor-name-actor-" + i
}
@TearDown(Level.Trial)
def shutdown() {
system.shutdown()
system.awaitTermination()
}
@Benchmark
@OutputTimeUnit(TimeUnit.MICROSECONDS)
def synchronousStarting =
system.actorOf(props, name)
}
class MyActor extends Actor {
override def receive: Receive = {
case _ ?
}
}
开发者ID:love1314sea,项目名称:akka-2.3.16,代码行数:41,代码来源:ActorCreationBenchmark.scala
示例8: CastleConfig
//设置package包名称以及导入依赖的类
package com.box.castle.core.config
import java.util.concurrent.TimeUnit
import CastleConfig.{DefaultCacheSizeInBytes, DefaultBufferSizeInBytes, DefaultBrokerTimeout, DefaultGracefulShutdownTimeout}
import com.box.castle.router.RouterConfig
import com.box.kafka.Broker
import scala.concurrent.duration.FiniteDuration
case class CastleConfig(private val namespaceRaw: String,
brokers: Set[Broker],
leaderConfig: LeaderConfig,
committerConfigs: Iterable[CommitterConfig],
castleZookeeperConfig: CastleZookeeperConfig,
routerConfig: RouterConfig,
brokerTimeout: FiniteDuration = DefaultBrokerTimeout,
bufferSizeInBytes: Int = DefaultBufferSizeInBytes,
cacheSizeInBytes: Long = DefaultCacheSizeInBytes,
gracefulShutdownTimeout: FiniteDuration = DefaultGracefulShutdownTimeout) {
require(bufferSizeInBytes > 0, "bufferSizeInBytes must be positive")
require(brokers.nonEmpty, "must specify at least one broker")
require(committerConfigs.nonEmpty, "Must specify at least one committer config")
require(committerConfigs.map(cfg => cfg.id).toSet.size == committerConfigs.size, "Committer ids must be unique")
val namespace = namespaceRaw.trim()
require(namespace.replaceAll("[^A-Za-z0-9-_]", "_") == namespace, "Castle namespace must consist of alphanumeric characters, dashes (-), and underscores (_)")
}
object CastleConfig {
val DefaultBufferSizeInBytes: Int = (1024 * 1024 * 4) - 128 // 4 MB minus overhead
val DefaultCacheSizeInBytes: Long = 1024 * 1024 * 1024 // 1 GB
val DefaultBrokerTimeout = FiniteDuration(60, TimeUnit.SECONDS)
val DefaultGracefulShutdownTimeout = FiniteDuration(10, TimeUnit.SECONDS)
}
开发者ID:Box-Castle,项目名称:core,代码行数:38,代码来源:CastleConfig.scala
示例9: HermesGameTickerModuleSettings
//设置package包名称以及导入依赖的类
package proton.game.hermes
import java.time.Duration
import java.util.concurrent.TimeUnit
import proton.game.GameTickerModuleSettings
import scala.concurrent.duration.FiniteDuration
class HermesGameTickerModuleSettings(baseSettings: GameTickerModuleSettings,
val port: Int,
val chunkSize: Int,
val gameTimeoutDuration: Duration,
val namesDDataTimeoutDuration: Duration,
val chunkedTimeoutDuration: Duration,
val chunkedAppendTimeoutDuration: Duration,
val chunkedRepositoryTimeoutDuration: Duration)
extends GameTickerModuleSettings(baseSettings) {
val namesDDataTimeout = FiniteDuration(namesDDataTimeoutDuration.toNanos, TimeUnit.NANOSECONDS)
val gameTimeout = FiniteDuration(gameTimeoutDuration.toNanos, TimeUnit.NANOSECONDS)
val chunkedTimeout = FiniteDuration(chunkedTimeoutDuration.toNanos, TimeUnit.NANOSECONDS)
val chunkedAppendTimeout = FiniteDuration(chunkedAppendTimeoutDuration.toNanos, TimeUnit.NANOSECONDS)
val chunkedRepositoryTimeout = FiniteDuration(chunkedRepositoryTimeoutDuration.toNanos, TimeUnit.NANOSECONDS)
}
开发者ID:Morgan-Stanley,项目名称:proton,代码行数:25,代码来源:HermesGameTickerModuleSettings.scala
示例10: LibratoClient
//设置package包名称以及导入依赖的类
package com.ovoenergy.lambda.client
import java.io.Closeable
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import com.librato.metrics._
import com.ovoenergy.lambda.domain.KafkaMetrics
class LibratoClient(email: String, apiToken: String, apiUrl:String, environment: String) extends Closeable {
val poster = new DefaultHttpPoster(apiUrl, email, apiToken)
val batchSize = 300
val timeout = 10L
val timeoutUnit = TimeUnit.SECONDS
val agent = "BENZAITEN"
val sanitizer = Sanitizer.NO_OP
val batch = new LibratoBatch(batchSize, sanitizer, timeout, timeoutUnit, agent, poster)
def addMetrics(metrics: KafkaMetrics) = {
metrics.partitionMetrics.map { metric =>
batch.addCounterMeasurement(s"$environment.kafka.${metrics.consumerGroup}.partition${metric.partition}.lag", metric.lag)
}
}
def submitMetrics() = {
val source = s"$environment-kafka-metrics-lambda"
val result: BatchResult = batch.post(source, System.currentTimeMillis() / 1000)
if (!result.success()) {
result.getFailedPosts.asScala.foreach { failedRes =>
println(s"Failed: $failedRes")
}
}
}
override def close() = {
poster.close()
}
}
开发者ID:ovotech,项目名称:comms-burrow-polling-lambda,代码行数:41,代码来源:LibratoClient.scala
示例11: postStop
//设置package包名称以及导入依赖的类
package com.init6.connection
import java.util.concurrent.TimeUnit
import akka.actor.{ActorRef, Cancellable}
import com.init6.Init6Actor
import com.init6.users.KillConnection
import scala.concurrent.duration.Duration
trait Init6KeepAliveActor extends Init6Actor {
private var pingTask: Cancellable = _
protected var keptAlive = 0
override def postStop(): Unit = {
Option(pingTask).foreach(_.cancel())
super.postStop()
}
def keepAlive(actor: ActorRef, f: () => Unit): Unit = {
keepAlive(actor, f, 25, TimeUnit.SECONDS)
}
def keepAlive(actor: ActorRef, f: () => Unit, delay: Long, unit: TimeUnit): Unit = {
val pingDuration = Duration(25, TimeUnit.SECONDS)
import context.dispatcher
pingTask = system.scheduler.schedule(
pingDuration,
pingDuration
)({
if (keptAlive < 4) {
keptAlive += 1
f()
} else {
actor ! KillConnection
}
})
}
}
开发者ID:fjaros,项目名称:init6,代码行数:44,代码来源:Init6KeepAliveActor.scala
示例12: DataProducer
//设置package包名称以及导入依赖的类
package org.hpi.esb.datasender
import java.util.concurrent.{ScheduledFuture, ScheduledThreadPoolExecutor, TimeUnit}
import org.apache.kafka.clients.producer.KafkaProducer
import org.hpi.esb.commons.util.Logging
import org.hpi.esb.datasender.config.Configurable
import org.hpi.esb.datasender.output.writers.DatasenderRunResultWriter
import org.hpi.esb.util.OffsetManagement
class DataProducer(resultHandler: DatasenderRunResultWriter, kafkaProducer: KafkaProducer[String, String],
dataReader: DataReader, topics: List[String], numberOfThreads: Int,
sendingInterval: Int, sendingIntervalTimeUnit: TimeUnit,
duration: Long, durationTimeUnit: TimeUnit, singleColumnMode: Boolean) extends Logging with Configurable {
val executor: ScheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor(numberOfThreads)
val producerThread = new DataProducerThread(this, kafkaProducer, dataReader, topics,
singleColumnMode, duration, durationTimeUnit)
val topicOffsets = getTopicOffsets()
var t: ScheduledFuture[_] = _
def shutDown(): Unit = {
t.cancel(false)
dataReader.close()
kafkaProducer.close()
executor.shutdown()
logger.info("Shut data producer down.")
val expectedRecordNumber = producerThread.numberOfRecords
resultHandler.outputResults(topicOffsets, expectedRecordNumber)
}
def execute(): Unit = {
val initialDelay = 0
t = executor.scheduleAtFixedRate(producerThread, initialDelay, sendingInterval, sendingIntervalTimeUnit)
val allTopics = topics.mkString(" ")
logger.info(s"Sending records to following topics: $allTopics")
}
def getTopicOffsets(): Map[String, Long] = {
topics.map(topic => {
val currentOffset = OffsetManagement.getNumberOfMessages(topic, partition = 0)
topic -> currentOffset
}).toMap[String, Long]
}
}
开发者ID:BenReissaus,项目名称:EnterpriseStreamingBenchmark,代码行数:50,代码来源:DataProducer.scala
示例13: BufferReaderReadIntBenchmarks
//设置package包名称以及导入依赖的类
package roc
package postgresql
package transport
import java.util.concurrent.TimeUnit
import org.openjdk.jmh.annotations._
@Fork(2)
@State(Scope.Thread)
class BufferReaderReadIntBenchmarks extends BufferTestData {
var br = BufferReader(Array.empty[Byte])
@Setup(Level.Invocation)
def allocateBuffer(): Unit = {
val bw = BufferWriter(new Array[Byte](4))
bw.writeInt(TestInt)
br = BufferReader(bw.toBytes)
()
}
@Benchmark
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.MICROSECONDS)
def measureBufferReaderReadInt(): Int = {
br.readInt
}
}
开发者ID:finagle,项目名称:roc,代码行数:29,代码来源:BufferReaderBenchmarks.scala
示例14: BufIntReaderBenchmark
//设置package包名称以及导入依赖的类
package roc
package postgresql
package transport
import java.util.concurrent.TimeUnit
import org.openjdk.jmh.annotations._
@Fork(1)
@State(Scope.Thread)
class BufIntReaderBenchmark extends BufferTestData {
var buf = Buf(0)
@Setup(Level.Invocation)
def instantiateBuf(): Unit = {
buf = Buf(4)
buf.writeInt(TestInt)
()
}
@TearDown(Level.Invocation)
def releaseBuf(): Unit = buf.release()
@Benchmark
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.MICROSECONDS)
def measureBufReadInt(): Int = {
buf.readInt
}
}
开发者ID:finagle,项目名称:roc,代码行数:32,代码来源:BufReaderBenchmarks.scala
示例15: ConfigParserTest
//设置package包名称以及导入依赖的类
package cz.alenkacz.db.postgresscala
import java.util.concurrent.TimeUnit
import com.github.mauricio.async.db.Configuration
import com.github.mauricio.async.db.pool.PoolConfiguration
import com.typesafe.config.ConfigFactory
import cz.alenkacz.db.postgresscala.ConfigParser.PostgresConfiguration
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{FlatSpec, Matchers}
import scala.concurrent.duration._
import scala.concurrent.duration.Duration
@RunWith(classOf[JUnitRunner])
class ConfigParserTest extends FlatSpec with Matchers {
private val config = ConfigFactory.load("parser-test.conf")
"Postgres configuration" should "be loaded from connection string" in {
val actual = ConfigParser.parse(config.getConfig("connection-string"))
val expected = PostgresConfiguration(Configuration("admin", "localhost", 5432, Some("admin"), Some("test_db")), Duration(5, TimeUnit.SECONDS), None)
actual should be(expected)
}
it should "throw exception when connection string is missing" in {
assertThrows[InvalidConfigurationException](ConfigParser.parse(config.getConfig("missing-connection-string")))
}
it should "parse full config" in {
val actual = ConfigParser.parse(config.getConfig("full-config"))
val expected = PostgresConfiguration(Configuration("admin", "localhost", 5432, Some("admin"), Some("test_db"), maximumMessageSize = 167, connectTimeout = 888 seconds, testTimeout = 777 seconds, queryTimeout = Some(666 seconds)), Duration(5, TimeUnit.SECONDS), Some(PoolConfiguration(3, 5000, 10)))
actual should be(expected)
}
it should "not return pooled connection by default" in {
val actual = ConfigParser.parse(config.getConfig("without-pooling"))
actual.poolConfiguration should be(None)
}
}
开发者ID:alenkacz,项目名称:postgres-scala,代码行数:44,代码来源:ConfigParserTest.scala
示例16: MsgPackToArrayBench
//设置package包名称以及导入依赖的类
package knot.msgpack
import java.util.concurrent.TimeUnit
import org.msgpack.core.MessagePack
import org.openjdk.jmh.annotations.{Benchmark, Measurement, Scope, State}
@State(Scope.Benchmark)
@Measurement(timeUnit = TimeUnit.MILLISECONDS)
class MsgPackToArrayBench {
val encoder = MsgPackEncoder()
val packer = MessagePack.newDefaultBufferPacker()
@Benchmark
def toArray_knot() = {
go_encoder()
val r = encoder.toArray()
require(r.size > 0)
encoder.clear()
}
@Benchmark
def toArray_org() = {
go_packer()
val r = packer.toByteArray()
require(r.size > 0)
packer.clear()
}
private def go_encoder() = {
encoder
.put(false)
.put(true)
.put(Int.MinValue)
.put(Int.MaxValue)
.put(Short.MinValue)
.put(Short.MaxValue)
.put(Long.MinValue)
.put(Long.MaxValue)
.put(Float.MinValue)
.put(Float.MaxValue)
.flush()
}
private def go_packer() = {
packer
.packBoolean(false)
.packBoolean(true)
.packInt(Int.MinValue)
.packInt(Int.MaxValue)
.packShort(Short.MinValue)
.packShort(Short.MaxValue)
.packLong(Long.MinValue)
.packLong(Long.MaxValue)
.packFloat(Float.MinValue)
.packFloat(Float.MaxValue)
.flush()
}
}
开发者ID:defvar,项目名称:knot,代码行数:61,代码来源:MsgPackToArrayBench.scala
示例17: MsgPackBufferBench
//设置package包名称以及导入依赖的类
package knot.msgpack
import java.nio.ByteOrder
import java.util.concurrent.TimeUnit
import knot.data.buffers.CommonBuffer
import org.msgpack.core.buffer.MessageBuffer
import org.openjdk.jmh.annotations._
@State(Scope.Benchmark)
@Measurement(timeUnit = TimeUnit.MILLISECONDS)
class MsgPackBufferBench {
val org = MessageBuffer.allocate(8192)
val knot = CommonBuffer.allocate(8192, ByteOrder.nativeOrder())
@Benchmark
def knot_1_1000() = {
for (i <- (0 until 1000)) {
knot.putInt(i, i)
}
}
@Benchmark
def org_1000() = {
for (i <- (0 until 1000)) {
org.putInt(i, i)
}
}
}
开发者ID:defvar,项目名称:knot,代码行数:32,代码来源:MsgPackBufferBench.scala
示例18: MsgPackMapBench
//设置package包名称以及导入依赖的类
package knot.msgpack
import java.util.concurrent.TimeUnit
import org.msgpack.core.MessagePack
import org.msgpack.value.ValueFactory.{newInteger, newMap}
import org.openjdk.jmh.annotations.{Benchmark, Measurement, Scope, State}
@State(Scope.Benchmark)
@Measurement(timeUnit = TimeUnit.MILLISECONDS)
class MsgPackMapBench {
val msgbuffer = MsgPackEncoder()
val packer = MessagePack.newDefaultBufferPacker()
val bufferMap = Map((1, 10.toShort), (2, 20.toShort))
val packerMap = newMap(
newInteger(1), newInteger(10.toShort),
newInteger(2), newInteger(20.toShort))
@Benchmark
def map_knot() = {
msgbuffer.put(bufferMap).flush()
msgbuffer.clear()
}
@Benchmark
def map_org() = {
packer.packValue(packerMap).flush()
packer.clear()
}
}
开发者ID:defvar,项目名称:knot,代码行数:32,代码来源:MsgPackMapBench.scala
示例19: MsgPackSerializeBench
//设置package包名称以及导入依赖的类
package knot.msgpack
import java.util.concurrent.TimeUnit
import com.fasterxml.jackson.core.JsonFactory
import com.fasterxml.jackson.databind.ObjectMapper
import knot.msgpack.testValues.{JavaMedia, Media}
import org.msgpack.jackson.dataformat.MessagePackFactory
import org.openjdk.jmh.annotations._
@State(Scope.Benchmark)
@Measurement(timeUnit = TimeUnit.MILLISECONDS)
class MsgPackSerializeBench {
val knotSer = new MsgPackSerializerFactory().get[Media]()
val jacksonFactory = new MessagePackFactory()
jacksonFactory.configure(JsonFactory.Feature.USE_THREAD_LOCAL_FOR_BUFFER_RECYCLING, false)
val jacksonMapper = new ObjectMapper(jacksonFactory)
val src = new Media().setup()
val jsrc = JavaMedia.setup()
var knotSize: Int = 0
var jacksonSize: Int = 0
@TearDown
def teardown() = {
println(s"knot:$knotSize")
println(s"jackson:$jacksonSize")
}
@Benchmark
def ser_knot() = {
val ar = knotSer.serialize(src)
knotSize = ar.length
}
@Benchmark
def ser_jackson() = {
val ar = jacksonMapper.writeValueAsBytes(jsrc)
jacksonSize = ar.length
}
}
开发者ID:defvar,项目名称:knot,代码行数:43,代码来源:MsgPackSerializeBench.scala
示例20: SubscribeSignalSpec
//设置package包名称以及导入依赖的类
package knot.core.emitters
import java.util.concurrent.{CountDownLatch, TimeUnit}
import knot.core.Signals.{Start, Subscribe}
import knot.core.Workbench
import knot.core.adapter.LocalAdapter
import knot.core.config.Configs
import knot.core.sources.IteratorSource
import knot.testKit.ThroughParser
import org.scalatest.{BeforeAndAfter, FunSpec}
class SubscribeSignalSpec extends FunSpec with BeforeAndAfter {
implicit val wb: Workbench = Workbench.on("test")
var ctl = new CountDownLatch(1)
private def getTestEmitter: (LocalAdapter, LocalAdapter) = {
val s = PublishEmitter[Int](wb, Configs.defaultPartConfig, IteratorSource(0 until 1))
val p = ProcessEmitter[Int, Int](wb, Configs.defaultPartConfig, ThroughParser[Int](_ => ctl.countDown()))
(LocalAdapter(s), LocalAdapter(p))
}
before {
ctl = new CountDownLatch(1)
}
describe("subscribe signal") {
it("source subscribe -> start") {
val te = getTestEmitter
te._1.send(Subscribe(te._2))
Thread.sleep(100)
te._1.send(Start)
ctl.await(10, TimeUnit.SECONDS)
}
it("source start -> subscribe") {
val te = getTestEmitter
te._1.send(Start)
Thread.sleep(100)
te._1.send(Subscribe(te._2))
ctl.await(10, TimeUnit.SECONDS)
}
it("parser start -> subscribe") {
val te = getTestEmitter
te._1.send(Start)
te._2.send(Start)
Thread.sleep(100)
te._1.send(Subscribe(te._2))
ctl.await(10, TimeUnit.SECONDS)
}
}
}
开发者ID:defvar,项目名称:knot,代码行数:54,代码来源:SubscribeSignalSpec.scala
注:本文中的java.util.concurrent.TimeUnit类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论