本文整理汇总了Scala中scala.math.random类的典型用法代码示例。如果您正苦于以下问题:Scala random类的具体用法?Scala random怎么用?Scala random使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了random类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。
示例1: SparkPi
//设置package包名称以及导入依赖的类
import scala.math.random
import org.apache.spark.sql.SparkSession
object SparkPi {
def main(args: Array[String]) {
val spark = SparkSession
.builder
.appName("Spark Pi")
.master("local[2]")
.getOrCreate()
val slices = if (args.length > 0) args(0).toInt else 2
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.sparkContext.parallelize(1 until n, slices).map { i =>
val x = random * 2 - 1
val y = random * 2 - 1
if (x*x + y*y <= 1) 1 else 0
}.reduce(_ + _)
println("Pi is roughly " + 4.0 * count / (n - 1))
Console.in.read() // wait for key - to provide time to check web console
spark.stop()
}
}
开发者ID:DevipriyaSarkar,项目名称:SparkPi,代码行数:25,代码来源:SparkPi.scala
示例2: SparkPi
//设置package包名称以及导入依赖的类
package com.zmyuan.spark.submit
import kafka.utils.Logging
import org.apache.log4j.Logger
import scala.math.random
import org.apache.spark.sql.SparkSession
object SparkPi {
val loggerName = this.getClass.getName
lazy val logger = Logger.getLogger(loggerName)
def main(args: Array[String]) {
val spark = SparkSession
.builder
.appName("Spark Pi")
.getOrCreate()
val slices = if (args.length > 0) args(0).toInt else 2
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.sparkContext.parallelize(1 until n, slices).map { i =>
val x = random * 2 - 1
val y = random * 2 - 1
if (x*x + y*y < 1) 1 else 0
}.reduce(_ + _)
logger.info("Pi is roughly " + 4.0 * count / (n - 1))
spark.stop()
}
}
// scalastyle:on println
开发者ID:zhudebin,项目名称:spark-submit,代码行数:32,代码来源:SparkPi.scala
示例3: SparkPi
//设置package包名称以及导入依赖的类
import scala.math.random
import org.apache.spark.sql.SparkSession
object SparkPi {
def main(args: Array[String]) {
val spark = SparkSession
.builder
.appName("Spark Pi")
.master("local[2]")
.getOrCreate()
val slices = if (args.length > 0) args(0).toInt else 2
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.sparkContext.parallelize(1 until n, slices).map { i =>
val x = random * 2 - 1
val y = random * 2 - 1
if (x*x + y*y <= 1) 1 else 0
}.reduce(_ + _)
println("Pi is roughly " + 4.0 * count / (n - 1))
// Console.in.read() // wait for key - to provide time to check web console
spark.stop()
}
}
开发者ID:DevipriyaSarkar,项目名称:SparkPiDocker,代码行数:25,代码来源:SparkPi.scala
示例4: SparkPi
//设置package包名称以及导入依赖的类
// scalastyle:off println
package sparkpiscala
import scala.math.random
import org.apache.spark._
object SparkPi {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("Spark Pi")
val spark = new SparkContext(conf)
val slices = if (args.length > 0) args(0).toInt else 2
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.parallelize(1 until n, slices).map { i =>
val x = random * 2 - 1
val y = random * 2 - 1
if (x*x + y*y < 1) 1 else 0
}.reduce(_ + _)
println("Pi is roughly " + 4.0 * count / n)
spark.stop()
}
}
// scalastyle:on println
开发者ID:DavidContrerasICAI,项目名称:sparkCode,代码行数:25,代码来源:SparkPi.scala
示例5: MetricsExampleApp
//设置package包名称以及导入依赖的类
package org.apache.spark.groupon.metrics.example
import org.apache.spark.groupon.metrics.UserMetricsSystem
import org.apache.spark.{SparkContext, SparkConf}
import scala.math.random
object MetricsExampleApp {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("Spark Pi")
val spark = new SparkContext(conf)
UserMetricsSystem.initialize(spark, "SparkPiMetricNamespace")
val slices = if (args.length > 0) args(0).toInt else 2
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.parallelize(1 until n, slices).map( i => {
UserMetricsSystem.timer("MapTimer").time({
val x = random * 2 - 1
val y = random * 2 - 1
UserMetricsSystem.gauge("x").set(x)
if (x * x + y * y < 1) 1 else 0
})
}).reduce(_ + _)
println("Pi is roughly " + 4.0 * count / n)
spark.stop()
}
}
开发者ID:groupon,项目名称:spark-metrics,代码行数:31,代码来源:MetricsExampleApp.scala
示例6: SparkPi
//设置package包名称以及导入依赖的类
package com.stulsoft.pspark.basics
import org.apache.spark.{SparkConf, SparkContext}
import scala.math.random
object SparkPi extends App {
val conf = new SparkConf().setAppName("Spark Pi").setMaster("local[*]")
val sc = new SparkContext(conf)
val slices = if (args.length > 0) args(0).toInt else 2
val n = math.min(1000000L * slices, Int.MaxValue).toInt
// avoid overflow
val count = sc.parallelize(1 until n, slices).map { i =>
val x = random * 2 - 1
val y = random * 2 - 1
if (x * x + y * y <= 1) 1 else 0
}.reduce(_ + _)
println("Pi is roughly " + 4.0 * count / (n - 1))
sc.stop()
}
开发者ID:ysden123,项目名称:poc,代码行数:23,代码来源:SparkPi.scala
示例7: FirstSpark
//设置package包名称以及导入依赖的类
package com.lhcg.util
import scala.math.random
import org.apache.spark._
object FirstSpark {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("Spark Pi")
val spark = new SparkContext(conf)
val slices = if (args.length > 0) args(0).toInt else 2
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.parallelize(1 until n, slices).map { i =>
val x = random * 2 - 1
val y = random * 2 - 1
if (x*x + y*y < 1) 1 else 0
}.reduce(_ + _)
println("Pi is roughly " + 4.0 * count / (n - 1))
spark.stop()
}
}
开发者ID:lhcg,项目名称:lovespark,代码行数:23,代码来源:FirstSpark.scala
示例8: WebServerHttpApp
//设置package包名称以及导入依赖的类
package io.radanalytics.examples.akka.sparkpi
import akka.http.scaladsl.marshallers.xml.ScalaXmlSupport.defaultNodeSeqMarshaller
import akka.http.scaladsl.server.{ HttpApp, Route }
import scala.math.random
import org.apache.spark.sql.SparkSession
object WebServerHttpApp extends HttpApp with App {
def routes: Route =
pathEndOrSingleSlash { // Listens to the top `/`
complete("Scala Akka SparkPi server running. Add the 'sparkpi' route to this URL to invoke the app.")
} ~
path("sparkpi") { // Listens to paths that are exactly `/sparkpi`
get { // Listens only to GET requests
val spark = SparkSession.builder.appName("Scala SparkPi WebApp").getOrCreate()
val slices = if (args.length > 0) args(0).toInt else 2
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.sparkContext.parallelize(1 until n, slices).map { i =>
val x = random * 2 - 1
val y = random * 2 - 1
if (x * x + y * y < 1) 1 else 0
}.reduce(_ + _)
spark.stop()
complete("Pi is roughly " + 4.0 * count / (n - 1))
}
}
// This will start the server until the return key is pressed
startServer("0.0.0.0", 8080)
}
开发者ID:pdmack,项目名称:scala-spark-webapp,代码行数:35,代码来源:WebServerHttpApp.scala
示例9: SparkPi
//设置package包名称以及导入依赖的类
package com.appleeye.spark.driver
import scala.math.random
import org.apache.spark.sql.SparkSession
object SparkPi {
def main(args: Array[String]) {
val spark = SparkSession
.builder.master("local")
.appName("Spark Pi")
.getOrCreate()
val slices = if (args.length > 0) args(0).toInt else 2
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.sparkContext.parallelize(1 until n, slices).map { i =>
val x = random * 2 - 1
val y = random * 2 - 1
if (x*x + y*y <= 1) 1 else 0
}.reduce(_ + _)
println("Pi is roughly " + 4.0 * count / (n - 1))
spark.stop()
}
}
开发者ID:appleeye,项目名称:spark-project-module,代码行数:25,代码来源:SparkPi.scala
注:本文中的scala.math.random类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论