• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Scala Map类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Scala中scala.collection.Map的典型用法代码示例。如果您正苦于以下问题:Scala Map类的具体用法?Scala Map怎么用?Scala Map使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Map类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。

示例1: LRPmmlEvaluator

//设置package包名称以及导入依赖的类
package com.inneractive.cerberus

import java.io.{File, FileInputStream}
import java.util
import javax.xml.transform.stream.StreamSource

import org.dmg.pmml.{FieldName, Model, PMML}
import org.jpmml.evaluator.{EvaluatorUtil, FieldValue, ModelEvaluator, ModelEvaluatorFactory}
import org.jpmml.model.JAXBUtil

import scala.collection.Map
import scala.util.Try



class LRPmmlEvaluator(modelLocation: String, threshold: Option[Double]) {

  val inputStream = new FileInputStream(new File(modelLocation, "/model/PMML/PMML.xml"))
  val pmmModel: PMML = JAXBUtil.unmarshalPMML(new StreamSource(inputStream))

  val evaluatorFactory: ModelEvaluatorFactory = ModelEvaluatorFactory.newInstance()
  val evaluator: ModelEvaluator[_ <: Model] = evaluatorFactory.newModelManager(pmmModel)
  val activeFields: util.List[FieldName] = evaluator.getActiveFields
  val targetFields: util.List[FieldName] = EvaluatorUtil.getTargetFields(evaluator)
  val outputFields: util.List[FieldName] = EvaluatorUtil.getOutputFields(evaluator)

  def arguments(eval: FieldName => (FieldName, FieldValue)) = (activeFields map eval).toMap

  def getResults(result: java.util.Map[FieldName, _]) = {
    val targets = targetFields map (f => EvaluatorUtil.decode(result.get(f)))
    val outputs = outputFields map (f => result.get(f))
    Tuple3(targets.head.asInstanceOf[Double], outputs.head.asInstanceOf[Double], outputs(1).asInstanceOf[Double])
  }

  def callEvaluator(params : Map[FieldName, _]) = getResults(evaluator.evaluate(params))

  def predict(features: Features) = Try {
    val r = callEvaluator(arguments((f: FieldName) => {
      val value = features.vector(f.getValue)
      val activeValue = evaluator.prepare(f, value)
      (f, activeValue)
    }))
    new Prediction("", if (r._1 == 0.0) true else false, if (r._1 == 0) r._2 else r._3)
  }
} 
开发者ID:inneractive-opensrc,项目名称:cerberus,代码行数:46,代码来源:Evaluators.scala


示例2: RDDJoinExamples

//设置package包名称以及导入依赖的类
package  com.highperformancespark.examples.goldilocks

import org.apache.spark.HashPartitioner
import org.apache.spark.rdd.RDD

import scala.collection.Map
import scala.reflect.ClassTag

object RDDJoinExamples {

 
 //tag::coreBroadCast[]
 def manualBroadCastHashJoin[K : Ordering : ClassTag, V1 : ClassTag,
 V2 : ClassTag](bigRDD : RDD[(K, V1)],
  smallRDD : RDD[(K, V2)])= {
  val smallRDDLocal: Map[K, V2] = smallRDD.collectAsMap()
  bigRDD.sparkContext.broadcast(smallRDDLocal)
  bigRDD.mapPartitions(iter => {
   iter.flatMap{
    case (k,v1 ) =>
     smallRDDLocal.get(k) match {
      case None => Seq.empty[(K, (V1, V2))]
      case Some(v2) => Seq((k, (v1, v2)))
     }
   }
  }, preservesPartitioning = true)
 }
 //end:coreBroadCast[]
} 
开发者ID:gourimahapatra,项目名称:high-performance-spark,代码行数:30,代码来源:RDDJoinExamples.scala


示例3: ParallelCollectionLRDD

//设置package包名称以及导入依赖的类
package org.apache.spark.lineage.rdd

import org.apache.spark.OneToOneDependency
import org.apache.spark.lineage.LineageContext
import org.apache.spark.rdd.ParallelCollectionRDD

import scala.collection.Map
import scala.reflect._

private[spark] class ParallelCollectionLRDD[T: ClassTag](
    @transient lc: LineageContext,
    @transient data: Seq[T],
    numSlices: Int,
    locationPrefs: Map[Int, Seq[String]])
  extends ParallelCollectionRDD[T](lc.sparkContext, data, numSlices, locationPrefs)
  with Lineage[T] {

  override def lineageContext = lc

  override def ttag: ClassTag[T] = classTag[T]

  override def tapRight(): TapLRDD[T] = {
    val tap = new TapParallelCollectionLRDD[T](lineageContext,  Seq(new OneToOneDependency(this)))
    setTap(tap)
    setCaptureLineage(true)
    tap
  }
} 
开发者ID:lmd1993,项目名称:bigsiftParallel,代码行数:29,代码来源:ParallelCollectionLRDD.scala


示例4: findNearestStreets

//设置package包名称以及导入依赖的类
package mapdomain.street

import base.{ LazyLoggerSupport, MeterSupport }
import mapdomain.graph._
import mapdomain.repository.street.{ StreetRepositorySupport, StreetVertexRepository }
import mapdomain.utils.GraphUtils

import scala.collection.Map
import scala.collection.concurrent.TrieMap

trait StreetGraphContainer extends GeoGraphContainer[StreetEdge, StreetVertex[StreetEdge]] {
  def findNearestStreets(coordinate: Coordinate, radius: Double): List[StreetEdge]
  def vertices: List[StreetVertex[StreetEdge]]
}

case class LazyStreetGraphContainer() extends StreetGraphContainer with StreetRepositorySupport {

  protected val vertexById = new TrieMap[Long, StreetVertex[StreetEdge]]()
  protected val totalVertices: Long = streetVertexRepository.totalVertices

  override def vertices: List[StreetVertex[StreetEdge]] = {
    if (totalVertices != vertexById.size) {
      vertexById.keys
    }
    StreetVertexRepository.findAll
  }

  override def findNearestStreets(coordinate: Coordinate, radius: Double): List[StreetEdge] = streetEdgeRepository.findNearestStreets(coordinate, radius)

  override def findNearest(coordinate: Coordinate): Option[StreetVertex[StreetEdge]] = streetVertexRepository.findNearest(coordinate)

  
  def purgeStreets: UnsavedStreetGraphContainer = withTimeLogging({
    logger.info(s"Purge the unsaved street graph in order to get a connected graph")
    GraphUtils.getConnectedComponent[StreetEdgeUnsaved, UnsavedStreetVertex, UnsavedStreetGraphContainer](this, UnsavedStreetGraphContainer.apply)
  }, (time: Long) ? logger.info(s"Street graph was purged in $time ms."))

} 
开发者ID:cspinetta,项目名称:footpath-routing,代码行数:39,代码来源:StreetGraphContainer.scala


示例5: Prediction

//设置package包名称以及导入依赖的类
package dagger.ml

import scala.util.Random
import scala.collection.Map

/**
 * Created by narad on 8/8/14.
 */
case class Prediction[T](label2score: Map[T, Float], entropy: Double = 0.0) {

  def randomMaxLabel(random: Random) = {
    assert(maxLabels.nonEmpty)
    val r = random.nextInt(maxLabels.size)
    maxLabels(r)
  }

  lazy val maxScore: Double = label2score.maxBy(_._2)._2

  lazy val maxLabels: Seq[T] = label2score.toSeq.filter(_._2 == maxScore).map(_._1)

  def maxLabelsWithinThreshold(threshold: Double, optionLimit: Int): Seq[(T, Float)] = {
    val withinThreshold = label2score.toSeq.filter(_._2 >= maxScore - threshold).map {
      x => ((x._1, (x._2 - maxScore).toFloat))
    }
    withinThreshold.sortBy(-_._2).take(optionLimit)
  }
  lazy val maxLabel = maxLabels.head

  override def toString = {
    "Prediction:\n" +
      label2score.keys.map { k =>
        "  %s: %f".format(k, label2score(k))
      }
  }
} 
开发者ID:hopshackle,项目名称:mr-dagger-public,代码行数:36,代码来源:Prediction.scala


示例6: PassiveAggressiveUpdate

//设置package包名称以及导入依赖的类
package dagger.ml
import scala.reflect.ClassTag
import scala.collection.Map
import scala.collection.mutable.HashMap

class PassiveAggressiveUpdate[T: ClassTag] extends PerceptronUpdateRule {

  override def update(instance: Instance[T], classifier: AROWClassifier[T], options: AROWOptions): Unit = {
    val prediction = classifier.predict(instance)
    val temp = instance.correctLabels.map(l => (l, prediction.label2score(l))).toArray.sortBy(_._2)
    if (temp.isEmpty) println("No Correct Labels found for: \n" + instance)
    val weightVectors = classifier.weights
    val cachedWeightVectors = classifier.cachedWeights
    val maxLabel = prediction.maxLabel
    val maxScore = prediction.maxScore
    val (minCorrectLabel, minCorrectScore) = temp.head
    val labelList = instance.labels
    val iMaxLabel = labelList.indexOf(maxLabel)
    val icost = instance.costOf(maxLabel)
    val maxWeightLabel = instance.weightLabels(iMaxLabel)
    val iMinCorrectLabel = labelList.indexOf(minCorrectLabel)
    val minCorrectWeightLabel = instance.weightLabels(iMinCorrectLabel)
    val loss = (maxScore - minCorrectScore + math.sqrt(instance.costOf(maxLabel))).toFloat
    val norm = 2 * (AROW.dotMap(instance.feats(iMinCorrectLabel), instance.feats(iMinCorrectLabel)))
    val factor = loss / (norm + (1.0f / (2 * options.SMOOTHING.toFloat)))

    AROW.add(weightVectors(maxWeightLabel), instance.feats(iMaxLabel), -1.0f * factor)
    AROW.add(weightVectors(minCorrectWeightLabel), instance.feats(iMinCorrectLabel), factor)
    if (options.AVERAGING) {
      AROW.add(cachedWeightVectors(maxWeightLabel), instance.feats(iMaxLabel), -1.0f * factor * classifier.averagingCounter)
      AROW.add(cachedWeightVectors(minCorrectWeightLabel), instance.feats(iMinCorrectLabel), factor * classifier.averagingCounter)
    }
  }
} 
开发者ID:hopshackle,项目名称:mr-dagger-public,代码行数:35,代码来源:PassiveAggressiveUpdate.scala


示例7: DecisionTreeUtil

//设置package包名称以及导入依赖的类
package org.sparksamples.decisiontree

import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.DecisionTree
import org.apache.spark.rdd.RDD
import org.sparksamples.Util

import scala.collection.Map
import scala.collection.mutable.ListBuffer


object DecisionTreeUtil {

  def getTrainTestData(): (RDD[LabeledPoint], RDD[LabeledPoint]) = {
    val recordsArray = Util.getRecords()
    val records = recordsArray._1
    val first = records.first()
    val numData = recordsArray._2

    println(numData.toString())
    records.cache()
    print("Mapping of first categorical feature column: " +  Util.get_mapping(records, 2))
    var list = new ListBuffer[Map[String, Long]]()
    for( i <- 2 to 9){
      val m = Util.get_mapping(records, i)
      list += m
    }
    val mappings = list.toList
    var catLen = 0
    mappings.foreach( m => (catLen +=m.size))

    val numLen = records.first().slice(11, 15).size
    val totalLen = catLen + numLen

    val data = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extractFeatures(r, catLen, mappings)))
    }
    val data_dt = {
      records.map(r => LabeledPoint(Util.extractLabel(r), Util.extract_features_dt(r)))
    }

    val splits = data_dt.randomSplit(Array(0.8, 0.2), seed = 11L)
    val training = splits(0).cache()
    val test = splits(1)
    return (training, test)
  }

  def evaluate(train: RDD[LabeledPoint],test: RDD[LabeledPoint],
               categoricalFeaturesInfo: scala.Predef.Map[Int, Int],
                maxDepth :Int, maxBins: Int): Double = {
    val impurity = "variance"
    val decisionTreeModel = DecisionTree.trainRegressor(train, categoricalFeaturesInfo,
      impurity,maxDepth, maxBins )

    val true_vs_predicted = test.map(p => (p.label, decisionTreeModel.predict(p.features)))
    val rmsle = Math.sqrt(true_vs_predicted.map{ case(t, p) => Util.squaredLogError(t, p)}.mean())
    return rmsle
  }

} 
开发者ID:PacktPublishing,项目名称:Machine-Learning-with-Spark-Second-Edition,代码行数:61,代码来源:DecisionTreeUtil.scala


示例8: encode

//设置package包名称以及导入依赖的类
package com.ox.bigdata.util.kafka

import kafka.common.KafkaException
import kafka.utils.Logging

import scala.collection.{Iterable, Map}
import scala.util.parsing.json.JSON


  def encode(obj: Any): String = {
    obj match {
      case null => "null"
      case b: Boolean => b.toString
      case s: String => "\"" + s + "\""
      case n: Number => n.toString
      case m: Map[_, _] =>
        "{" +
          m.map(elem =>
            elem match {
              case t: Tuple2[_, _] => encode(t._1) + ":" + encode(t._2)
              case _ => throw new IllegalArgumentException("Invalid map element (" + elem + ") in " + obj)
            }).mkString(",") +
          "}"
      case a: Array[_] => encode(a.toSeq)
      case i: Iterable[_] => "[" + i.map(encode).mkString(",") + "]"
      case other: AnyRef => throw new IllegalArgumentException("Unknown arguement of type " + other.getClass + ": " + other)
    }
  }
} 
开发者ID:black-ox,项目名称:simple,代码行数:30,代码来源:Json.scala


示例9: CompareColors

//设置package包名称以及导入依赖的类
package org.kneelawk.imagegenerator2.comparecolors

import java.awt.image.BufferedImage

import scala.collection.Map

import org.kneelawk.imagegenerator2.ImageGenerator
import org.kneelawk.imagegenerator2.util.MathUtil
import javax.imageio.ImageIO
import java.io.File

object CompareColors extends ImageGenerator {
  def name: String = "CompareColors"

  def options: Seq[(String, String)] = Array(
    ("path", "Path to starting image"))

  def apply(i: BufferedImage, options: Map[String, String], width: Int, height: Int): Unit = {
    import MathUtil._
    val input = ImageIO.read(new File(options("path")))

    if (input.getWidth != width || input.getHeight != height) throw new IllegalArgumentException("Input and Output image sizes must mach")
    
    for (y <- 0 until height; x <- 0 until width) {
      val r = input.getRGB(mod2(x + 1, 0, width), y)
      val l = input.getRGB(mod2(x - 1, 0, width), y)
      val d = input.getRGB(x, mod2(y + 1, 0, height))
      val u = input.getRGB(x, mod2(y - 1, 0, height))
      val c = input.getRGB(x, y)
      
      if (r != c || l != c || d != c || u != c) {
        i.setRGB(x, y, 0xFFFFFFFF)
      } else {
        i.setRGB(x, y, 0x00000000)
      }
    }
  }
} 
开发者ID:Kneelawk,项目名称:ImageGenerator2,代码行数:39,代码来源:CompareColors.scala


示例10: MultiMap

//设置package包名称以及导入依赖的类
package com.github.tarao.namedcap

import scala.collection.immutable.{Map => ImmutableMap}
import scala.collection.Map

object MultiMap {
  def apply(): MultiMap = new MultiMap(Map.empty)
  def apply[S <: Seq[String]](m: Map[String, S]): MultiMap = new MultiMap(m)
  val empty = MultiMap()
}


class MultiMap(m: Map[String, Seq[String]])
    extends ImmutableMap[String, String] {
  def get(key: String): Option[String] = m.get(key).flatMap(_.headOption)
  def getAll(key: String): Seq[String] = m.getOrElse(key, Seq.empty)
  def +[V >: String](kv: (String, V)): MultiMap = {
    val (key, value) = kv
    val list = m.getOrElse(key, Vector.empty) :+ value.asInstanceOf[String]
    new MultiMap(m + (key -> list))
  }
  def -(key: String): MultiMap = new MultiMap(m - key)
  def iterator: Iterator[(String, String)] =
    m.iterator.flatMap(kv => kv._2.headOption.map(kv._1 -> _))
  override def default(key: String): String =
    m.default(key).headOption.getOrElse {
      throw new NoSuchElementException("key not found: " + key)
    }
} 
开发者ID:tarao,项目名称:namedcap-scala,代码行数:30,代码来源:MultiMap.scala


示例11: GenMapLikePartialTest

//设置package包名称以及导入依赖的类
package org.danielnixon.extrawarts

import org.scalatest.FunSuite
import org.wartremover.test.WartTestTraverser

import scala.collection.{ GenMapLike, Map }

class GenMapLikePartialTest extends FunSuite {
  val map: GenMapLike[String, Int, Map[String, Int]] = Map[String, Int]()

  test("can't use GenMapLike#apply") {
    val result = WartTestTraverser(GenMapLikePartial) {
      val foo = map("foo")
    }
    assertResult(List("[wartremover:GenMapLikePartial] GenMapLike#apply is disabled - use GenMapLike#get instead"), "result.errors")(result.errors)
    assertResult(List.empty, "result.warnings")(result.warnings)
  }

  test("doesn't detect other `apply` methods") {
    val result = WartTestTraverser(GenMapLikePartial) {
      case class A(apply: Int)
      println(A(1).apply)
    }
    assertResult(List.empty, "result.errors")(result.errors)
    assertResult(List.empty, "result.warnings")(result.warnings)
  }

  test("GenMapLikePartial wart obeys SuppressWarnings") {
    val result = WartTestTraverser(GenMapLikePartial) {
      @SuppressWarnings(Array("org.danielnixon.extrawarts.GenMapLikePartial"))
      val foo = map("foo")
    }
    assertResult(List.empty, "result.errors")(result.errors)
    assertResult(List.empty, "result.warnings")(result.warnings)
  }
} 
开发者ID:danielnixon,项目名称:extrawarts,代码行数:37,代码来源:GenMapLikePartialTest.scala


示例12: parallelize

//设置package包名称以及导入依赖的类
package com.datawizards.sparklocal.rdd

import org.apache.spark.Partitioner
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import scala.collection.{GenIterable, GenMap, Map}
import scala.reflect.ClassTag

trait PairRDDFunctionsAPI[K, V] {
  protected lazy val spark: SparkSession = SparkSession.builder().getOrCreate()
  protected def parallelize[That: ClassTag](d: Seq[That]): RDD[That] = spark.sparkContext.parallelize(d)
  protected def parallelize[That: ClassTag](d: GenIterable[That]): RDD[That] = parallelize(d.toList)

  def mapValues[U: ClassTag](f: (V) => U): RDDAPI[(K, U)]
  def keys: RDDAPI[K]
  def values: RDDAPI[V]
  def flatMapValues[U: ClassTag](f: (V) => TraversableOnce[U]): RDDAPI[(K, U)]
  def countByKey(): GenMap[K, Long]
  def reduceByKey(func: (V, V) => V): RDDAPI[(K, V)]
  def reduceByKey(func: (V, V) => V, numPartitions: Int): RDDAPI[(K, V)]
  def reduceByKey(partitioner: Partitioner, func: (V, V) => V): RDDAPI[(K, V)]
  def reduceByKeyLocally(func: (V, V) => V): Map[K, V]
  def groupByKey(): RDDAPI[(K, GenIterable[V])]
  def groupByKey(numPartitions: Int): RDDAPI[(K, GenIterable[V])]
  def groupByKey(partitioner: Partitioner): RDDAPI[(K, GenIterable[V])]
  def foldByKey(zeroValue: V)(func: (V, V) => V): RDDAPI[(K, V)]
  def foldByKey(zeroValue: V, numPartitions: Int)(func: (V, V) => V): RDDAPI[(K, V)]
  def foldByKey(zeroValue: V, partitioner: Partitioner)(func: (V, V) => V): RDDAPI[(K, V)]
  def join[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (V, W))]
  def join[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (V, W))]
  def join[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (V, W))]
  def leftOuterJoin[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (V, Option[W]))]
  def leftOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (V, Option[W]))]
  def leftOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (V, Option[W]))]
  def rightOuterJoin[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (Option[V], W))]
  def rightOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (Option[V], W))]
  def rightOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (Option[V], W))]
  def fullOuterJoin[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (Option[V], Option[W]))]
  def fullOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (Option[V], Option[W]))]
  def fullOuterJoin[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (Option[V], Option[W]))]
  def cogroup[W1: ClassTag, W2: ClassTag, W3: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], other3: RDDAPI[(K, W3)], partitioner: Partitioner): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2], GenIterable[W3]))]
  def cogroup[W: ClassTag](other: RDDAPI[(K, W)], partitioner: Partitioner): RDDAPI[(K, (GenIterable[V], GenIterable[W]))]
  def cogroup[W1: ClassTag, W2: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], partitioner: Partitioner): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2]))]
  def cogroup[W1: ClassTag, W2: ClassTag, W3: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], other3: RDDAPI[(K, W3)]): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2], GenIterable[W3]))]
  def cogroup[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, (GenIterable[V], GenIterable[W]))]
  def cogroup[W1: ClassTag, W2: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)]): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2]))]
  def cogroup[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, (GenIterable[V], GenIterable[W]))]
  def cogroup[W1: ClassTag, W2: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], numPartitions: Int): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2]))]
  def cogroup[W1: ClassTag, W2: ClassTag, W3: ClassTag](other1: RDDAPI[(K, W1)], other2: RDDAPI[(K, W2)], other3: RDDAPI[(K, W3)], numPartitions: Int): RDDAPI[(K, (GenIterable[V], GenIterable[W1], GenIterable[W2], GenIterable[W3]))]
  def collectAsMap(): GenMap[K, V]
  def subtractByKey[W: ClassTag](other: RDDAPI[(K, W)]): RDDAPI[(K, V)]
  def subtractByKey[W: ClassTag](other: RDDAPI[(K, W)], numPartitions: Int): RDDAPI[(K, V)]
  def subtractByKey[W: ClassTag](other: RDDAPI[(K, W)], p: Partitioner): RDDAPI[(K, V)]
  def aggregateByKey[U: ClassTag](zeroValue: U)(seqOp: (U, V) => U, combOp: (U, U) => U): RDDAPI[(K, U)]
  def aggregateByKey[U: ClassTag](zeroValue: U, partitioner: Partitioner)(seqOp: (U, V) => U, combOp: (U, U) => U): RDDAPI[(K, U)]
  def aggregateByKey[U: ClassTag](zeroValue: U, numPartitions: Int)(seqOp: (U, V) => U, combOp: (U, U) => U): RDDAPI[(K, U)]
  def partitionBy(partitioner: Partitioner): RDDAPI[(K, V)]

} 
开发者ID:piotr-kalanski,项目名称:spark-local,代码行数:61,代码来源:PairRDDFunctionsAPI.scala


示例13: values

//设置package包名称以及导入依赖的类
package com.danielwestheide.kontextfrei

import scala.collection.Map
import scala.reflect.ClassTag

private[kontextfrei] trait DCollectionPairFunctions[DCollection[_]] {

  def values[A: ClassTag, B: ClassTag](x: DCollection[(A, B)]): DCollection[B]

  def keys[A: ClassTag, B: ClassTag](x: DCollection[(A, B)]): DCollection[A]

  def cogroup[A: ClassTag, B: ClassTag, C: ClassTag](x: DCollection[(A, B)])(
      y: DCollection[(A, C)]): DCollection[(A, (Iterable[B], Iterable[C]))]

  def leftOuterJoin[A: ClassTag, B: ClassTag, C: ClassTag](
      x: DCollection[(A, B)])(
      y: DCollection[(A, C)]): DCollection[(A, (B, Option[C]))]

  def rightOuterJoin[A: ClassTag, B: ClassTag, C: ClassTag](
      x: DCollection[(A, B)])(
      y: DCollection[(A, C)]): DCollection[(A, (Option[B], C))]

  def fullOuterJoin[A: ClassTag, B: ClassTag, C: ClassTag](
      x: DCollection[(A, B)])(
      y: DCollection[(A, C)]): DCollection[(A, (Option[B], Option[C]))]

  def mapValues[A: ClassTag, B: ClassTag, C: ClassTag](x: DCollection[(A, B)])(
      f: B => C): DCollection[(A, C)]

  def flatMapValues[A: ClassTag, B: ClassTag, C: ClassTag](
      xs: DCollection[(A, B)])(f: B => TraversableOnce[C]): DCollection[(A, C)]

  def foldByKey[A: ClassTag, B: ClassTag](xs: DCollection[(A, B)])(
      zeroValue: B,
      func: (B, B) => B): DCollection[(A, B)]

  def reduceByKey[A: ClassTag, B: ClassTag](xs: DCollection[(A, B)])(
      f: (B, B) => B): DCollection[(A, B)]

  def aggregateByKey[A: ClassTag, B: ClassTag, C: ClassTag](
      xs: DCollection[(A, B)])(zeroValue: C)(
      seqOp: (C, B) => C,
      combOp: (C, C) => C): DCollection[(A, C)]

  def combineByKey[A: ClassTag, B: ClassTag, C: ClassTag](
      xs: DCollection[(A, B)])(createCombiner: B => C)(
      mergeValue: (C, B) => C,
      mergeCombiners: (C, C) => C): DCollection[(A, C)]

  def countByKey[A: ClassTag, B: ClassTag](
      xs: DCollection[(A, B)]): Map[A, Long]

  def collectAsMap[A: ClassTag, B: ClassTag](
      xs: DCollection[(A, B)]): Map[A, B]
} 
开发者ID:dwestheide,项目名称:kontextfrei,代码行数:56,代码来源:DCollectionPairFunctions.scala


示例14: SparkWordCountJob

//设置package包名称以及导入依赖的类
package com.spark.scala.core

import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import scala.collection.Map
import org.apache.commons.io.FileUtils
import java.io.File
import java.util.Properties

class SparkWordCountJob (sc: SparkContext) {
  //reads data  and computes the results
  def run(t: String , outputFile: String)  = {
  val readsdata = sc.textFile(t)
  // Transform into word and count.
  val words = readsdata.flatMap(word => word.split(" "))
  val counts = words.map(word => (word, 1)).reduceByKey(_ + _)
  // Save the word count back out to a text file
  counts.saveAsTextFile(outputFile)
                
	}
	
  //Delete results folder if exists
  def deleteFile(proprietes: Properties) = {
   if (new File((proprietes.getProperty("Output").toString())).exists ()) {
   FileUtils.deleteDirectory (new File((proprietes.getProperty("Output").toString())));
   } 
    }
} 
开发者ID:rayanegouda,项目名称:Spark-Scala,代码行数:30,代码来源:SparkWordCountJob.scala


示例15: ShipMap

//设置package包名称以及导入依赖的类
package com.harry0000.kancolle

import scala.collection.{Map, Seq, mutable}
import scala.collection.immutable.TreeMap

package object ac {
  type ShipName = String
  type ShipMap = TreeMap[ShipCategory, Seq[ShipName]]
  object ShipMap {
    def empty: ShipMap = TreeMap.empty[ShipCategory, Seq[ShipName]]
    def apply(value: (ShipCategory, Seq[ShipName]) *): ShipMap = TreeMap(value: _*)
  }

  sealed abstract class ShipCategory(val name: String)
  object ShipCategory {
    case object Destroyer       extends ShipCategory("???")
    case object LightCruiser    extends ShipCategory("??")
    case object HeavyCruiser    extends ShipCategory("??")
    case object SeaplaneTender  extends ShipCategory("?????")
    case object AircraftCarrier extends ShipCategory("??")
    case object Submarine       extends ShipCategory("???")
    case object Battleship      extends ShipCategory("??")

    private val order: Map[ShipCategory, Int] = mutable.LinkedHashMap.empty ++ Seq(
      Destroyer,
      LightCruiser,
      HeavyCruiser,
      SeaplaneTender,
      AircraftCarrier,
      Submarine,
      Battleship
    ).zipWithIndex

    implicit val ordering: Ordering[ShipCategory] = Ordering.by(order.getOrElse(_, Int.MaxValue))

    def apply(shipType: String): ShipCategory = shipType match {
      case "??" => Destroyer
      case "??" => LightCruiser
      case "??" => HeavyCruiser
      case "??" => SeaplaneTender
      case "??" => AircraftCarrier
      case "??" => AircraftCarrier
      case "??" => Submarine
      case "??" => Battleship
    }

    def get(index: Int): Option[ShipCategory] = order.find(_._2 == index).map(_._1)

    def values: Seq[ShipCategory] = order.keys.toSeq
  }
} 
开发者ID:harry0000,项目名称:DropTableParser,代码行数:52,代码来源:package.scala


示例16: MapConverter

//设置package包名称以及导入依赖的类
package com.kakao.shaded.jackson.module.scala.ser

import com.kakao.shaded.jackson.databind.`type`.{TypeFactory, MapType, MapLikeType}
import com.kakao.shaded.jackson.databind.jsontype.TypeSerializer
import com.kakao.shaded.jackson.databind.ser.Serializers
import com.kakao.shaded.jackson.databind.ser.std.StdDelegatingSerializer
import com.kakao.shaded.jackson.databind.util.StdConverter
import com.kakao.shaded.jackson.databind._
import com.kakao.shaded.jackson.module.scala.modifiers.MapTypeModifierModule
import scala.collection.JavaConverters._
import scala.collection.Map

private class MapConverter(inputType: JavaType, config: SerializationConfig)
  extends StdConverter[Map[_,_],java.util.Map[_,_]]
{
  def convert(value: Map[_,_]): java.util.Map[_,_] = {
    val m = if (config.isEnabled(SerializationFeature.WRITE_NULL_MAP_VALUES)) {
      value
    } else {
      value.filter(_._2 != None)
    }
    m.asJava
  }


  override def getInputType(factory: TypeFactory) = inputType

  override def getOutputType(factory: TypeFactory) =
    factory.constructMapType(classOf[java.util.Map[_,_]], inputType.getKeyType, inputType.getContentType)
      .withTypeHandler(inputType.getTypeHandler)
      .withValueHandler(inputType.getValueHandler)
}

private object MapSerializerResolver extends Serializers.Base {

  val BASE = classOf[collection.Map[_,_]]

  override def findMapLikeSerializer(config: SerializationConfig,
                                     mapLikeType : MapLikeType,
                                     beanDesc: BeanDescription,
                                     keySerializer: JsonSerializer[AnyRef],
                                     elementTypeSerializer: TypeSerializer,
                                     elementValueSerializer: JsonSerializer[AnyRef]): JsonSerializer[_] = {


    val rawClass = mapLikeType.getRawClass

    if (!BASE.isAssignableFrom(rawClass)) null
    else new StdDelegatingSerializer(new MapConverter(mapLikeType, config))
  }

}

trait MapSerializerModule extends MapTypeModifierModule {
  this += MapSerializerResolver
} 
开发者ID:kakao,项目名称:mango,代码行数:57,代码来源:MapSerializerModule.scala


示例17: TypesafeFluentLogger

//设置package包名称以及导入依赖的类
package io.github.takayuky.fluent

import org.fluentd.logger.scala.{FluentLogger, FluentLoggerFactory}
import scala.collection.Map

final case class TypesafeFluentLogger(underlying: FluentLogger) extends AnyVal {
  def log(label: String, data: Map[String, FluentAcceptable], timestamp: Long): Unit = {
    val validatedData = data.mapValues(_.v)
    underlying.log(label, validatedData, timestamp)
  }
}

object TypesafeFluentLoggerFactory {
  def getLogger(tag: String, host: String, port: Int): TypesafeFluentLogger =
    TypesafeFluentLogger(FluentLoggerFactory.getLogger(tag, host, port))
} 
开发者ID:takayuky,项目名称:TypesafeFluentLogger,代码行数:17,代码来源:TypesafeFluentLogger.scala


示例18: FluentEncoderListSpec

//设置package包名称以及导入依赖的类
package io.github.takayuky.fluent

import io.github.takayuky.fluent.basic._
import io.github.takayuky.fluent.list._
import org.fluentd.logger.scala.FluentLogger
import org.scalamock.scalatest.MockFactory
import org.scalatest.FlatSpec
import scala.collection.Map

class FluentEncoderListSpec extends FlatSpec with MockFactory {

  class Setup {
    val underlyingMock = mock[FluentLogger]
    val logger = TypesafeFluentLogger(underlyingMock)

    val label = "label"
    val time = 0L
  }

  behavior of "List instances"

  it should "correctly encode list instances" in new Setup {
    val data = Map[String, FluentAcceptable](
      "List" -> List(1,2,3),
      "Vector" -> Vector("hello", "world"),
      "Set" -> Set[Option[Double]](Some(1d), None)
    )
    (underlyingMock.log(_: String, _: Map[String, Any], _:Long)).expects(label, data.mapValues(_.v), time)

    logger.log(label, data, time)
  }
} 
开发者ID:takayuky,项目名称:TypesafeFluentLogger,代码行数:33,代码来源:FluentEncoderListSpec.scala


示例19: FluentEncoderBasicSpec

//设置package包名称以及导入依赖的类
package io.github.takayuky.fluent

import io.github.takayuky.fluent.basic._
import org.fluentd.logger.scala.FluentLogger
import org.scalamock.scalatest.MockFactory
import org.scalatest.FlatSpec
import scala.collection.Map

class FluentEncoderBasicSpec extends FlatSpec with MockFactory {

  class Setup {
    val underlyingMock = mock[FluentLogger]
    val logger = TypesafeFluentLogger(underlyingMock)

    val label = "label"
    val time = 0L
  }

  behavior of "Basic instances"

  it should "correctly encode basic instances" in new Setup {
    val data = Map[String, FluentAcceptable](
      "Boolean" -> true,
      "Int" -> 1,
      "Long" -> 1L,
      "Double" -> 1d,
      "Float" -> 1f,
      "String" -> "hello",
      "BigInt" -> BigInt(1),
      "BigDecimal" -> BigDecimal(1),
      "Some" -> Some(1),
      "None" -> (None: Option[String])
    )
    (underlyingMock.log(_: String, _: Map[String, Any], _:Long)).expects(label, data.mapValues(_.v), time)

    logger.log(label, data, time)
  }
} 
开发者ID:takayuky,项目名称:TypesafeFluentLogger,代码行数:39,代码来源:FluentEncoderBasicSpec.scala


示例20: FluentEncoderExtensionSpec

//设置package包名称以及导入依赖的类
package io.github.takayuky.fluent

import io.github.takayuky.fluent.basic._
import org.fluentd.logger.scala.FluentLogger
import org.scalamock.scalatest.MockFactory
import org.scalatest.FlatSpec
import scala.collection.Map

class FluentEncoderExtensionSpec extends FlatSpec with MockFactory {
  final case class User(id: Int, name: String)
  final case class Country(code: String, name: String)

  class Setup {
    val underlyingMock = mock[FluentLogger]
    val logger = TypesafeFluentLogger(underlyingMock)

    val label = "label"
    val time = 0L
  }

  behavior of "Custom instances"

  it should "correctly encode custom instances" in new Setup {
    implicit val fluentEncoderForUser: FluentEncoder[User] = FluentEncoder.create(_.name)
    implicit val fluentEncoderForCountry: FluentEncoder[Country] = FluentEncoder.id

    val user = User(1, "takayuky")
    val japan = Country("JP", "Japan")

    val data = Map[String, FluentAcceptable](
      "Int" -> 1,
      "User" -> user,
      "Country" -> japan,
      "Some" -> Some(user),
      "None" -> (None: Option[User])
    )

    val expected = Map[String, Any](
      "Int" -> 1,
      "User" -> user.name,
      "Country" -> japan,
      "Some" -> Some(user.name),
      "None" -> (None: Option[User])
    )
    (underlyingMock.log(_: String, _: Map[String, Any], _:Long)).expects(label, expected, time)

    logger.log(label, data, time)
  }
} 
开发者ID:takayuky,项目名称:TypesafeFluentLogger,代码行数:50,代码来源:FluentEncoderExtensionSpec.scala



注:本文中的scala.collection.Map类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Scala GZIPInputStream类代码示例发布时间:2022-05-23
下一篇:
Scala NodeSeq类代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap