本文整理汇总了Scala中org.slf4j.LoggerFactory类的典型用法代码示例。如果您正苦于以下问题:Scala LoggerFactory类的具体用法?Scala LoggerFactory怎么用?Scala LoggerFactory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LoggerFactory类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。
示例1: ApiSpec
//设置package包名称以及导入依赖的类
package au.csiro.data61.magda.registry
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.testkit.TestProbe
import ch.qos.logback.classic.{Level, Logger}
import org.flywaydb.core.Flyway
import org.scalatest.Matchers
import org.scalatest.fixture.FunSpec
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
import scalikejdbc._
abstract class ApiSpec extends FunSpec with ScalatestRouteTest with Matchers with Protocols with SprayJsonSupport {
case class FixtureParam(api: Api, webHookActorProbe: TestProbe)
val databaseUrl = Option(System.getenv("npm_package_config_databaseUrl")).getOrElse("jdbc:postgresql://localhost:5432/postgres")
// Stop Flyway from producing so much spam that Travis terminates the process.
LoggerFactory.getLogger("org.flywaydb").asInstanceOf[Logger].setLevel(Level.WARN)
val flyway = new Flyway()
flyway.setDataSource(databaseUrl, "postgres", "")
flyway.setSchemas("test")
flyway.setLocations("classpath:/sql")
override def testConfigSource =
s"""
|db.default.url = "${databaseUrl}?currentSchema=test"
|authorization.skip = true
|akka.loglevel = INFO
""".stripMargin
override def withFixture(test: OneArgTest) = {
val webHookActorProbe = TestProbe()
val api = new Api(webHookActorProbe.ref, testConfig, system, executor, materializer)
webHookActorProbe.expectMsg(1 millis, WebHookActor.Process)
DB localTx { implicit session =>
sql"DROP SCHEMA IF EXISTS test CASCADE".update.apply()
sql"CREATE SCHEMA test".update.apply()
}
flyway.migrate()
super.withFixture(test.toNoArgTest(FixtureParam(api, webHookActorProbe)))
}
}
开发者ID:TerriaJS,项目名称:magda,代码行数:51,代码来源:ApiSpec.scala
示例2: HomeController
//设置package包名称以及导入依赖的类
package controllers
import javax.inject._
import org.slf4j.LoggerFactory
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.libs.json.{JsObject, JsValue, Json}
import play.api.mvc._
import play.modules.reactivemongo._
import play.modules.reactivemongo.json._
import play.modules.reactivemongo.json.collection.{JSONCollection, _}
import reactivemongo.api.Cursor
import scala.concurrent.Future
@Singleton
class HomeController @Inject() (val reactiveMongoApi: ReactiveMongoApi)
extends Controller with MongoController with ReactiveMongoComponents {
val logger = LoggerFactory.getLogger(this.getClass)
def collection: JSONCollection = db.collection[JSONCollection]("scrawler1")
def index = Action {
Ok(views.html.index(""))
}
def query = Action.async { request =>
val body = request.body
val query = body.asFormUrlEncoded.get("query")
val querySet = query.toSet[String]
val keywords = querySet.flatMap({ string: String =>
string.split(" ")
})
val searchQuery = Json.obj("keywords" -> Json.obj("$in" -> Json.toJson(keywords)))
logger.info(s"Internal query from client: $searchQuery")
val cursor: Cursor[JsObject] = collection.find(searchQuery).cursor[JsObject]
val result: Future[List[JsObject]] = cursor.collect[List]()
val resultJson: Future[JsValue] =
result.map { persons => Json.toJson(persons) }
resultJson.map { results =>
val title = results \\ "title"
val url = results \\ "url"
val description = results \\ "body"
val queryData: Seq[((JsValue, JsValue), JsValue)] = title.zip(url).zip(description)
Ok(views.html.result(queryData))
}
}
}
开发者ID:slideon,项目名称:Scrawler-frontend,代码行数:56,代码来源:HomeController.scala
示例3: AutoFutureRunner
//设置package包名称以及导入依赖的类
package com.oradian.autofuture
import org.slf4j.LoggerFactory
import scalax.file._
object AutoFutureRunner extends App {
if (args.isEmpty) {
System.err.println("Usage: java -jar autofuture.jar [file1] [directory1] ... [directoryN]")
sys.exit(1)
}
val logger = LoggerFactory.getLogger("auto-future")
args foreach { arg =>
val path = Path.apply(arg.replace('\\', '/'), '/').toAbsolute
logger.info("Gathering sources from path: {}", path.path)
for (source <- (path ** "*.scala").par if source.isFile) {
logger.trace("Examining Scala source: {}", source.path)
AutoFuture(source.string) match {
case AutoFuture.Result.Success(body) =>
logger.debug("Auto-Futuring Scala source: {}", source.path)
source.write(body)
case AutoFuture.Result.Error(error) =>
logger.warn("Error while parsing Scala source: {}\n{}", source.path: Any, error: Any)
case AutoFuture.Result.Noop =>
logger.trace("Noop on: {}", source.path)
}
}
}
sys.exit(0)
}
开发者ID:oradian,项目名称:sbt-auto-future,代码行数:34,代码来源:AutoFutureRunner.scala
示例4: WarmupZeppelin
//设置package包名称以及导入依赖的类
package biz.meetmatch.modules.util
import biz.meetmatch.modules.Module
import biz.meetmatch.util.Utils
import com.google.gson.Gson
import org.apache.spark.sql.SparkSession
import org.rogach.scallop.Scallop
import org.slf4j.LoggerFactory
import scalaj.http.Http
object WarmupZeppelin extends Module {
override def execute(scallopts: Scallop)(implicit sparkSession: SparkSession): Unit = {
val interpreterId = Utils.getConfig("zeppelin.interpreter.id")
logger.info(s"Restarting the spark interpreter using url http://localhost:8080/api/interpreter/setting/restart/$interpreterId...")
val interpreterResponse = Http(s"http://localhost:8080/api/interpreter/setting/restart/$interpreterId")
.method("put")
.timeout(connTimeoutMs = 10000, readTimeoutMs = 60000)
.asParamMap
logger.info("Response code: " + interpreterResponse.code)
if (interpreterResponse.code == 200) {
val bootstrapNotebookId = Utils.getConfig("zeppelin.bootstrapNotebookId.id")
logger.info(s"Executing the Bootstrap notebook using url http://localhost:8080/api/notebook/job/$bootstrapNotebookId...")
val notebookResponse = Http(s"http://localhost:8080/api/notebook/job/$bootstrapNotebookId")
.postForm
.timeout(connTimeoutMs = 10000, readTimeoutMs = 60000)
.asParamMap
logger.info("Response code: " + notebookResponse.code)
if (notebookResponse.code == 200) {
def stillRunningParagraphs(): Boolean = {
logger.info("Checking if the Bootstrap notebook has finished...")
val checkNotebookResponse = Http("http://localhost:8080/api/notebook/job/2BKFC4D5W")
.timeout(connTimeoutMs = 10000, readTimeoutMs = 120000)
.asString
logger.info("Response code: " + checkNotebookResponse.code)
val notebookJobResponse = new Gson().fromJson(checkNotebookResponse.body, classOf[GetNotebookJobResponse])
notebookJobResponse.body.exists(_.status != "FINISHED")
}
while (stillRunningParagraphs()) {
logger.info("Keep on polling...")
Thread.sleep(5000)
}
logger.info("The Bootstrap notebook has finished.")
}
}
}
case class GetNotebookJobResponse(status: String, body: Array[ParagraphStatus])
case class ParagraphStatus(id: String, started: String, finished: String, status: String)
override protected val logger = LoggerFactory.getLogger(this.getClass)
}
开发者ID:tolomaus,项目名称:languagedetector,代码行数:60,代码来源:WarmupZeppelin.scala
示例5: E2ESpec
//设置package包名称以及导入依赖的类
package com.hivehome.kafka.connect.sqs
import java.time.Instant
import org.scalatest.{FunSuite, Matchers}
import org.slf4j.LoggerFactory
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class E2ESpec extends FunSuite with Matchers with SQSSupport {
val logger = LoggerFactory.getLogger(getClass.getName)
private val KafkaTopic: String = "connect-test"
override val queueName = "test-sqs" // kafka connect should be setup with this SQS
queueUrl = sqs.getQueueUrl(queueName).getQueueUrl
private val props = Map(
"bootstrap.servers" -> sys.env.getOrElse("KAFKA", "localhost:9092"),
"schema.registry.url" -> sys.env.getOrElse("SCHEMA_REGISTRY", "http://localhost:8081"))
val consumer = KafkaAvroConsumer[String, String](props, topicName = KafkaTopic)
// Test is ignored because it does not run without dependent services
ignore("should route message SQS -> Kafka") {
Future {
// sleep is required so that the message to SQS
// is sent after the consumer is listening on the kafka topic
Thread.sleep(500)
logger.debug("sending message..")
sendMessage(Instant.now().toString)
logger.debug("sent message..")
}
val msgs = consumer.poll(1, accept = _ => true)
msgs should have size 1
}
}
开发者ID:ConnectedHomes,项目名称:sqs-kafka-connect,代码行数:40,代码来源:E2ESpec.scala
示例6: ExecutionContextBackboneCoordinator
//设置package包名称以及导入依赖的类
package ie.zalando.pipeline.backbone.concurrent
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try
import scala.util.control.NonFatal
import org.slf4j.LoggerFactory
import cats.data.Xor
import ie.zalando.pipeline.backbone.Backbone
import ie.zalando.pipeline.backbone.Phases.{ LocalReleasePhase, TransformationPipelineFailure }
class ExecutionContextBackboneCoordinator[DA](backbone: Backbone[DA], executionContext: ExecutionContext) {
import ExecutionContextBackboneCoordinator._
val localInitPhases = backbone.initializeTopLevelContexts
def process(datum: DA): Future[Xor[TransformationPipelineFailure, DA]] = {
Future {
val (dataPhases, releasePhases) = backbone.initializeInLocalContext(-1, localInitPhases).unzip
try {
backbone.transformDatum(backbone.createStateMonad(dataPhases), datum)
} finally {
releasePhases.foreach((phase: LocalReleasePhase) => {
Try({ phase.releaseLocalResources() }).recover { case NonFatal(ex) => log.warn(s"Release phase $phase failed:", ex) }
})
}
}(executionContext)
}
}
object ExecutionContextBackboneCoordinator {
val log = LoggerFactory.getLogger(classOf[ExecutorServiceBackboneCoordinator[_]])
}
开发者ID:retnuh,项目名称:pipeline-backbone,代码行数:37,代码来源:ExecutionContextBackboneCoordinator.scala
示例7: Word2VecRawTextExample
//设置package包名称以及导入依赖的类
package org.dl4scala.examples.nlp.word2vec
import org.datavec.api.util.ClassPathResource
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer
import org.deeplearning4j.text.sentenceiterator.BasicLineIterator
import org.deeplearning4j.text.tokenization.tokenizer.preprocessor.CommonPreprocessor
import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory
import org.slf4j.LoggerFactory
object Word2VecRawTextExample {
private val log = LoggerFactory.getLogger(Word2VecRawTextExample.getClass)
def main(args: Array[String]): Unit = {
// Gets Path to Text file
val filePath: String = new ClassPathResource("raw_sentences.txt").getFile.getAbsolutePath
log.info("Load & Vectorize Sentences....")
// Strip white space before and after for each line
val iter = new BasicLineIterator(filePath)
// Split on white spaces in the line to get words
val t = new DefaultTokenizerFactory
t.setTokenPreProcessor(new CommonPreprocessor)
import org.deeplearning4j.models.word2vec.Word2Vec
log.info("Building model....")
val vec: Word2Vec = new Word2Vec.Builder()
.minWordFrequency(5)
.iterations(1)
.layerSize(100)
.seed(42)
.windowSize(5)
.iterate(iter)
.tokenizerFactory(t)
.build
log.info("Fitting Word2Vec model....")
vec.fit()
log.info("Writing word vectors to text file....")
// Write word vectors to file
WordVectorSerializer.writeWordVectors(vec, "pathToWriteto.txt")
// Prints out the closest 10 words to "day". An example on what to do with these Word Vectors.
log.info("Closest Words:")
val lst = vec.wordsNearest("day", 10)
System.out.println("10 Words closest to 'day': " + lst)
}
}
开发者ID:endymecy,项目名称:dl4scala,代码行数:51,代码来源:Word2VecRawTextExample.scala
示例8: ParagraphVectorsInferenceExample
//设置package包名称以及导入依赖的类
package org.dl4scala.examples.nlp.paragraphvectors
import org.datavec.api.util.ClassPathResource
import org.deeplearning4j.models.embeddings.loader.WordVectorSerializer
import org.deeplearning4j.text.tokenization.tokenizer.preprocessor.CommonPreprocessor
import org.deeplearning4j.text.tokenization.tokenizerfactory.DefaultTokenizerFactory
import org.nd4j.linalg.ops.transforms.Transforms
import org.slf4j.LoggerFactory
object ParagraphVectorsInferenceExample {
private val log = LoggerFactory.getLogger(ParagraphVectorsInferenceExample.getClass)
def main(args: Array[String]): Unit = {
val resource = new ClassPathResource("/paravec/simple.pv")
val t = new DefaultTokenizerFactory
t.setTokenPreProcessor(new CommonPreprocessor)
// we load externally originated model
val vectors = WordVectorSerializer.readParagraphVectors(resource.getFile)
vectors.setTokenizerFactory(t)
vectors.getConfiguration.setIterations(1) // please note, we set iterations to 1 here, just to speedup inference
val inferredVectorA = vectors.inferVector("This is my world .")
val inferredVectorA2 = vectors.inferVector("This is my world .")
val inferredVectorB = vectors.inferVector("This is my way .")
// high similarity expected here, since in underlying corpus words WAY and WORLD have really close context
log.info("Cosine similarity A/B: {}", Transforms.cosineSim(inferredVectorA, inferredVectorB))
// equality expected here, since inference is happening for the same sentences
log.info("Cosine similarity A/A2: {}", Transforms.cosineSim(inferredVectorA, inferredVectorA2))
}
}
开发者ID:endymecy,项目名称:dl4scala,代码行数:35,代码来源:ParagraphVectorsInferenceExample.scala
示例9: PersistentActorMessage
//设置package包名称以及导入依赖的类
package info.unterstein.akka.persistence.api
import com.google.gson._
import java.lang.reflect.Type
import com.google.gson.reflect.TypeToken
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
case class PersistentActorMessage(messageType: String, scheduleDate: Long, originalMessage: Map[String, String])
object PersistentActorMessage {
private val mapToken = new TypeToken[java.util.Map[String, String]](){}.getType
private val log = LoggerFactory.getLogger(PersistentActorMessage.getClass)
private val gson = new GsonBuilder()
.registerTypeAdapter(classOf[Map[String, String]], new MapSerializer())
.create()
private class MapSerializer extends JsonSerializer[Map[String, String]] with JsonDeserializer[Map[String, String]] {
override def serialize(src: Map[String, String], typeOfSrc: Type, context: JsonSerializationContext): JsonElement = {
gson.toJsonTree(src.asJava)
}
override def deserialize(json: JsonElement, typeOfT: Type, context: JsonDeserializationContext): Map[String, String] = {
val result: java.util.Map[String, String] = gson.fromJson(json, mapToken)
result.asScala.toMap
}
}
def jsonToMap(json: String): Map[String, String] = {
try {
val result: java.util.Map[String, String] = gson.fromJson(json, mapToken)
result.asScala.toMap
} catch {
case o_O: Exception =>
log.error("Deserialization failed for " + json, o_O)
throw new RuntimeException("Deserialization failed!", o_O)
}
}
def mapToJson(map: Map[String, String]): String = gson.toJson(map.asJava)
}
开发者ID:unterstein,项目名称:persistent-actor-messages,代码行数:49,代码来源:PersistentActorMessage.scala
示例10: MonitoringServer
//设置package包名称以及导入依赖的类
package com.scalaio.http.monitoring
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ContentTypes._
import akka.http.scaladsl.model.{HttpEntity, HttpResponse, StatusCodes, Uri}
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.stream.Materializer
import com.typesafe.config.Config
import com.yammer.metrics.HealthChecks
import com.yammer.metrics.core.HealthCheckRegistry
import org.slf4j.LoggerFactory
import play.api.libs.json.{JsArray, Json}
import scala.util.{Failure, Success}
import scala.collection.convert.wrapAsScala._
object MonitoringServer {
lazy val logger = LoggerFactory.getLogger(getClass)
def handleHealthchecks(registry: HealthCheckRegistry): Route = {
path("health") {
get {
complete {
val checks = registry.runHealthChecks
val payload = JsArray(checks.map {
case (name, result) =>
Json.obj(
"name" -> name,
"healthy" -> result.isHealthy,
"message" -> result.getMessage
)
}.toSeq)
val status = if (checks.values().forall(_.isHealthy)) OK else InternalServerError
HttpResponse(entity = HttpEntity(`application/json`, Json.stringify(payload)), status = status)
}
}
}
}
def start(serverConfig: Config, registry: HealthCheckRegistry = HealthChecks.defaultRegistry())
(implicit system: ActorSystem, materializer: Materializer): Unit = {
val host = serverConfig.getString("host")
val port = serverConfig.getInt("port")
logger.info(s"Starting monitoring server at: $host:$port")
val routes = handleHealthchecks(registry) ~ redirect(Uri("/health"), StatusCodes.SeeOther)
import system.dispatcher
Http()
.bindAndHandle(routes, host, port).onComplete {
case Success(Http.ServerBinding(address)) =>
logger.info(s"Monitoring server started at :$address")
case Failure(t) =>
logger.error("Error while trying to start monitoring server", t)
}
}
}
开发者ID:fagossa,项目名称:scalaio_akka,代码行数:61,代码来源:MonitoringServer.scala
示例11: GeodeFiniteSourceSpec
//设置package包名称以及导入依赖的类
package akka.stream.alpakka.geode.scaladsl
import akka.stream.scaladsl.Sink
import org.slf4j.LoggerFactory
import scala.concurrent.Await
import scala.concurrent.duration.DurationInt
import scala.language.postfixOps
class GeodeFiniteSourceSpec extends GeodeBaseSpec {
private val log = LoggerFactory.getLogger(classOf[GeodeFiniteSourceSpec])
"Geode finite source" should {
it { geodeSettings =>
"retrieves finite elements from geode" in {
val reactiveGeode = new ReactiveGeode(geodeSettings)
//#query
val source =
reactiveGeode
.query[Person](s"select * from /persons order by id")
.runWith(Sink.foreach(e => log.debug(s"$e")))
//#query
Await.ready(source, 10 seconds)
val animals =
reactiveGeode
.query[Animal](s"select * from /animals order by id")
.runWith(Sink.foreach(e => log.debug(s"$e")))
Await.ready(animals, 10 seconds)
val complexes =
reactiveGeode
.query[Complex](s"select * from /complexes order by id")
.runWith(Sink.foreach(e => log.debug(s"$e")))
Await.ready(complexes, 10 seconds)
reactiveGeode.close()
}
}
}
}
开发者ID:akka,项目名称:alpakka,代码行数:47,代码来源:GeodeFiniteSourceSpec.scala
示例12: ScrapingKitReactorTest
//设置package包名称以及导入依赖的类
package ru.fediq.scrapingkit
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import akka.http.scaladsl.model.{HttpMethods, Uri}
import com.codahale.metrics.Slf4jReporter
import com.codahale.metrics.Slf4jReporter.LoggingLevel
import com.typesafe.config.ConfigFactory
import org.scalatest.FlatSpec
import org.slf4j.LoggerFactory
import ru.fediq.scrapingkit.backend.{InMemoryFifoLinksQueue, InMemoryLinksHistory, NoOpFeedExporter, NoOpPageCache}
import ru.fediq.scrapingkit.model.PageRef
import ru.fediq.scrapingkit.scraper.HtmlCrawlingScraper
import ru.fediq.scrapingkit.util.Metrics
class ScrapingKitReactorTest extends FlatSpec {
"Reactor" should "crawl something" in {
val scraperName = "crawl"
val scrapers = Map(scraperName -> new HtmlCrawlingScraper(scraperName))
val config = ConfigFactory.load()
implicit val system = ActorSystem("reactor-test", config)
val linksQueue = new InMemoryFifoLinksQueue()
val linksHistory = new InMemoryLinksHistory()
val pageCache = new NoOpPageCache()
val exporter = new NoOpFeedExporter()
val reactor = new ScrapingKitReactor(linksQueue, linksHistory, pageCache, exporter, scrapers)
linksQueue.enqueue(PageRef(Uri("http://quotes.toscrape.com/"), HttpMethods.GET, scraperName))
Slf4jReporter
.forRegistry(Metrics.metricRegistry)
.withLoggingLevel(LoggingLevel.INFO)
.outputTo(LoggerFactory.getLogger("METRICS"))
.convertDurationsTo(TimeUnit.MILLISECONDS)
.convertRatesTo(TimeUnit.SECONDS)
.build()
.start(10, TimeUnit.SECONDS)
Thread.sleep(10000)
reactor.close()
}
}
开发者ID:fediq,项目名称:scraping-kit,代码行数:48,代码来源:ScrapingKitReactorTest.scala
示例13: CollectTransformer
//设置package包名称以及导入依赖的类
package transformation
import org.apache.kafka.streams.KeyValue
import org.apache.kafka.streams.kstream.Transformer
import org.apache.kafka.streams.processor.ProcessorContext
import org.apache.kafka.streams.state.KeyValueStore
import org.slf4j.{Logger, LoggerFactory}
abstract class CollectTransformer[INPUT_K, INPUT_V, STORE_V, OUTPUT_K, OUTPUT_V](storeName: String)
extends Transformer[INPUT_K, INPUT_V, KeyValue[OUTPUT_K, OUTPUT_V]] {
val log: Logger = LoggerFactory.getLogger(this.getClass)
var ctx: ProcessorContext = _
var store: KeyValueStore[INPUT_K, STORE_V] = _
override def init(context: ProcessorContext): Unit = {
log.debug(s"Init ...")
ctx = context
store = ctx.getStateStore(storeName).asInstanceOf[KeyValueStore[INPUT_K, STORE_V]]
ctx.schedule(100)
}
override def punctuate(timestamp: Long): KeyValue[OUTPUT_K, OUTPUT_V] = {
log.debug(s"Punctuating ...")
null
}
override def transform(key: INPUT_K, value: INPUT_V): KeyValue[OUTPUT_K, OUTPUT_V] = {
log.debug(s"Transforming event : $value")
val currentStoreValue = store.get(key)
if (currentStoreValue != null && collectComplete(currentStoreValue, value)) {
collectOutput(key, currentStoreValue, value)
} else {
store.put(key, appendToStore(currentStoreValue, value))
null
}
}
def appendToStore(storeValue: STORE_V, appendValue: INPUT_V): STORE_V
def collectComplete(storeValue: STORE_V, appendValue: INPUT_V): Boolean
def collectOutput(inputKey: INPUT_K, storeValue: STORE_V, mergeValue: INPUT_V): KeyValue[OUTPUT_K, OUTPUT_V]
override def close(): Unit = {
log.debug(s"Close ...")
}
}
开发者ID:benwheeler,项目名称:kafka-streams-poc,代码行数:54,代码来源:CollectTransformer.scala
示例14: ConsulServiceLocator
//设置package包名称以及导入依赖的类
package org.wex.cmsfs.lagom.service.discovery.consul
import java.net.URI
import com.lightbend.lagom.internal.client.CircuitBreakers
import com.lightbend.lagom.scaladsl.api.Descriptor.Call
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.lightbend.lagom.scaladsl.client.{CircuitBreakerComponents, CircuitBreakingServiceLocator}
import org.slf4j.{Logger, LoggerFactory}
import play.api.Configuration
import scala.concurrent.{ExecutionContext, Future}
trait ConsulServiceLocatorComponents extends CircuitBreakerComponents {
lazy val serviceLocator: ServiceLocator = new ConsulServiceLocator(configuration, circuitBreakers)(executionContext)
}
class ConsulServiceLocator(configuration: Configuration, circuitBreakers: CircuitBreakers)(implicit ec: ExecutionContext)
extends CircuitBreakingServiceLocator(circuitBreakers) {
private val logger: Logger = LoggerFactory.getLogger(this.getClass)
private val consulServiceExtract = new ConsulServiceExtract(configuration)
override def locate(name: String, serviceCall: Call[_, _]): Future[Option[URI]] = Future {
logger.debug(s"request Service Name: ${name}.")
consulServiceExtract.getService(name)
}
}
开发者ID:shinhwagk,项目名称:cmsfs,代码行数:29,代码来源:ConsulServiceLocatorComponents.scala
示例15: GitSourceDownloaderActor
//设置package包名称以及导入依赖的类
package cloud.hw.actors
import akka.actor.{Actor, Props}
import akka.routing.BalancingPool
import cloud.hw.app.ErrorCase
import cloud.hw.util.GitSourceDownloader
import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory
class GitSourceDownloaderActor extends Actor{
val logger = Logger(LoggerFactory.getLogger(this.getClass))
def receive = {
case GitSourceReceiver(metadata) =>
logger.info("Download git project and send to ElasticsearchAdderActor")
val GitRepoZip = GitSourceDownloader.fromUrl(metadata("downloadUrl"))
var unzippedRepo = GitRepoZip.asInstanceOf[GitSourceDownloader].download
// Creates a balance pool of actors to split up the work.
val router = context.actorOf(BalancingPool(4).props(Props(new ElasticsearchAdderActor())))
// Run through all the lines and give each actor 15 entries to insert into
// Elasticsearch. We used 15 because more than that and we were basically DDOS
// the Elasticsearch server.
while(!unzippedRepo.isEmpty) {
logger.info("Taking 15 entries")
router ! AddGitProjectToElasticsearch(metadata("projectName"), unzippedRepo.take(15))
unzippedRepo = unzippedRepo.drop(15)
}
// Not a Git repo, then don't do anything.
case ErrorCase => logger.info("Error with Git repo download")
case _ => logger.info("Not a vaild message to GitSourceDownloaderActor")
}
}
开发者ID:mendozagabe1618,项目名称:openhub-source-search-engine,代码行数:37,代码来源:GitSourceDownloaderActor.scala
示例16: GitSourceDownloader
//设置package包名称以及导入依赖的类
package cloud.hw.util
import scalaj.http._
import com.typesafe.scalalogging._
import org.slf4j.LoggerFactory
object GitSourceDownloader {
def fromUrl(url: String) = {
val pattern = """https?://github.com/([^/]+)/([^/]+).*""".r
url match {
case pattern(user, repo) => new cloud.hw.util.GitSourceDownloader(user, repo)
case _ => Unit
}
}
}
class GitSourceDownloader(val user: String, val repo: String) {
private val logger = Logger(LoggerFactory.getLogger(this.getClass))
private val url = s"https://codeload.github.com/${user}/${repo}/zip/master"
override def toString = s"source url: $url"
def download = {
logger.info(s"downloading from $url")
val zipBytes = Http(url).asBytes
val stream = new java.io.ByteArrayInputStream(zipBytes.body)
Unzipper.unzipStream(stream)
}
}
开发者ID:mendozagabe1618,项目名称:openhub-source-search-engine,代码行数:30,代码来源:GitSourceDownloader.scala
示例17: OpenHubMetadataFetcher
//设置package包名称以及导入依赖的类
package cloud.hw.util
import com.typesafe.scalalogging._
import org.slf4j.LoggerFactory
import scala.xml.{XML}
object OpenHubMetadataFetcher {
def forUrl(url: String) = new OpenHubMetadataFetcher(url).download()
def forKeyId(apiKey: String, id: Int) = {
forUrl(s"https://www.openhub.net/p/${id}.xml?api_key=${apiKey}&v=1")
}
}
class OpenHubMetadataFetcher(url: String) {
private val logger = Logger(LoggerFactory.getLogger(this.getClass))
def download(): Map[String, String] = {
logger.info(s"downloading from $url")
val id = 0
val xml = XML.load(new java.net.URL(url))
val downUrl = (xml \ "result" \ "project" \ "download_url").text
logger.trace(xml toString)
Map(
"downloadUrl" -> downUrl,
"projectName" -> (xml \ "result" \ "project" \ "name").text,
"tags" -> (xml \ "result" \ "project" \ "tags" \ "tag").map(n => n.text).mkString(",")
)
}
}
开发者ID:mendozagabe1618,项目名称:openhub-source-search-engine,代码行数:35,代码来源:OpenHubMetadataFetcher.scala
示例18: Unzipper
//设置package包名称以及导入依赖的类
package cloud.hw.util
import java.io.{File, FileInputStream, IOException, InputStream}
import java.nio.charset.CodingErrorAction
import java.util.zip.{ZipEntry, ZipInputStream}
import scala.collection.mutable.ArrayBuffer
import scala.io.{Codec, Source}
import com.typesafe.scalalogging._
import org.slf4j.LoggerFactory
object Unzipper {
private val logger = Logger(LoggerFactory.getLogger(this.getClass))
def unzip(zis: ZipInputStream): List[ZipContentEntry] = {
val buffer = new Array[Byte](1024)
var ze: ZipEntry = zis.getNextEntry()
var entries = new ArrayBuffer[ZipContentEntry]
while (ze != null) {
val fileName = ze.getName();
val lines = Source.fromInputStream(zis).getLines().toList
val entry = new ZipContentEntry(fileName, lines)
logger.trace(s"read: $entry")
if (!ze.isDirectory()) {
entries += entry
}
ze = zis.getNextEntry()
}
zis.closeEntry()
zis.close()
entries.toList
}
}
开发者ID:mendozagabe1618,项目名称:openhub-source-search-engine,代码行数:40,代码来源:Unzipper.scala
示例19: MsgPack
//设置package包名称以及导入依赖的类
package com.taxis99.amazon.serializers
import java.util.Base64
import com.typesafe.scalalogging.Logger
import msgpack4z.{MsgOutBuffer, _}
import org.slf4j.LoggerFactory
import play.api.libs.json.{JsValue, Json}
object MsgPack extends ISerializer {
protected val logger: Logger =
Logger(LoggerFactory.getLogger(getClass.getName))
protected val jsonVal = Play2Msgpack.jsValueCodec(PlayUnpackOptions.default)
protected val b64encoder = Base64.getEncoder
protected val b64decoder = Base64.getDecoder
override def decode(value: String): JsValue = {
val bytes: Array[Byte] = b64decoder.decode(value)
val unpacker = MsgInBuffer.apply(bytes)
val unpacked = jsonVal.unpack(unpacker)
unpacked.valueOr { e =>
logger.error("Could not decode message", e)
new ClassCastException("Could not decode message")
Json.obj()
}
}
}
开发者ID:99Taxis,项目名称:common-sqs,代码行数:30,代码来源:MsgPack.scala
示例20: MappingLoader
//设置package包名称以及导入依赖的类
package com.ss.mapping.impl
import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator
import com.lightbend.lagom.scaladsl.api.{Descriptor, ServiceLocator}
import com.lightbend.lagom.scaladsl.devmode.LagomDevModeComponents
import com.lightbend.lagom.scaladsl.persistence.cassandra.CassandraPersistenceComponents
import com.lightbend.lagom.scaladsl.playjson.{JsonSerializer, JsonSerializerRegistry}
import com.lightbend.lagom.scaladsl.server._
import com.softwaremill.macwire._
import com.ss.mapping.api.MappingService
import org.slf4j.LoggerFactory
import play.api.libs.ws.ahc.AhcWSComponents
import scala.collection.immutable.Seq
class MappingLoader extends LagomApplicationLoader {
val logger = LoggerFactory.getLogger(getClass)
override def loadDevMode(context: LagomApplicationContext): LagomApplication =
new MappingApplication(context) with LagomDevModeComponents
override def load(context: LagomApplicationContext): LagomApplication =
new MappingApplication(context) {
override def serviceLocator: ServiceLocator = NoServiceLocator
}
override def describeServices: Seq[Descriptor] = Seq(
readDescriptor[MappingService]
)
}
abstract class MappingApplication(context: LagomApplicationContext)
extends LagomApplication(context)
with CassandraPersistenceComponents
with AhcWSComponents {
override def jsonSerializerRegistry: JsonSerializerRegistry = {
MappingSerializerRegistry
}
override def lagomServer: LagomServer = {
LagomServer.forServices(
bindService[MappingService].to(wire[MappingServiceImpl])
)
}
readSide.register(wire[NodeReadEventsProcessor])
}
object MappingSerializerRegistry extends JsonSerializerRegistry {
override def serializers: Seq[JsonSerializer[_]] = Seq(
JsonSerializer[RegisterMapping],
JsonSerializer[MappingRegistered],
JsonSerializer[LoadMapping.type],
JsonSerializer[UnregisterMapping.type],
JsonSerializer[MappingUnregistered.type]
)
}
开发者ID:ytaras,项目名称:iot_lagom_poc,代码行数:56,代码来源:MappingLoader.scala
注:本文中的org.slf4j.LoggerFactory类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论