• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Scala HashMap类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Scala中scala.collection.immutable.HashMap的典型用法代码示例。如果您正苦于以下问题:Scala HashMap类的具体用法?Scala HashMap怎么用?Scala HashMap使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了HashMap类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。

示例1: SimpleKafkaProducerTest

//设置package包名称以及导入依赖的类
package com.example
import java.nio.charset.StandardCharsets

import kafka.consumer.ConsumerConfig
import kafka.utils.TestUtils
import org.scalatest.{FunSpec, Matchers}
//import org.junit.Assert._
import scala.collection.immutable.HashMap

class SimpleKafkaProducerTest extends FunSpec with Matchers{

  private val topic = "test"
  private val groupId = "group0"
  private val kafkaHelpers = new KafkaHelpers()


  case class MessageData(a: String, b: String)

  describe("The SimpleKafka Api") {
    it("Should send data using a producer") {
      //Send data to Kafka
      val kafkaApi = new SimpleKafkaProducer(kafkaHelpers.kafkaSocket(), topic)
      kafkaApi.send[MessageData](new MessageData("Hello", "World"))

      //Create consumer
      val consumerProperties = TestUtils.createConsumerProperties(kafkaHelpers.zookeeperSocket().toString(), groupId, "consumer0", -1)
      val consumer = kafka.consumer.Consumer.create(new ConsumerConfig(consumerProperties))

      val topicCountMap = HashMap(topic -> 1)
      val consumerMap = consumer.createMessageStreams(topicCountMap)
      val stream = consumerMap.get(topic).get(0)
      val iterator = stream.iterator()
      val msg = new String(iterator.next().message(), StandardCharsets.UTF_8)

      assert("{\"a\":\"Hello\",\"b\":\"World\"}" == msg)

      // cleanup
      consumer.shutdown()
    }
  }
} 
开发者ID:frossi85,项目名称:financial-statistics-crawler,代码行数:42,代码来源:SimpleKafkaProducerTest.scala


示例2: SimpleKafkaConsumer

//设置package包名称以及导入依赖的类
package com.example

import java.nio.charset.StandardCharsets
import java.util.Properties
import kafka.consumer.ConsumerConfig
import org.json4s.{DefaultFormats, jackson}
import scala.collection.immutable.HashMap

class SimpleKafkaConsumer(kafkaSocket: Socket, zooKeeperSocket: Socket, groupId: String, topic: String) {

  private def configuration = {
    val deserializer = "org.apache.kafka.common.serialization.StringDeserializer"
    val props = new Properties()
    props.put("bootstrap.servers", kafkaSocket.toString())
    props.put("key.deserializer", deserializer)
    props.put("value.deserializer", deserializer)
    props.put("group.id", groupId)
    props.put("consumer.id", "consumer0")
    props.put("consumer.timeout", "-1")
    props.put("auto.offset.reset", "smallest")
    props.put("zookeeper.sync.time.ms", "200")
    props.put("zookeeper.session.timeout.ms", "6000")
    props.put("zookeeper.connect", zooKeeperSocket.toString())
    props.put("num.consumer.fetchers", "2")
    props.put("rebalance.max.retries", "4")
    props.put("auto.commit.interval.ms", "1000")
    props
  }

  private val consumer = kafka.consumer.Consumer.create(new ConsumerConfig(configuration))

  def read[T <: AnyRef]()(implicit m: Manifest[T]): Iterable[T] = {
    implicit val serialization = jackson.Serialization
    implicit val formats = DefaultFormats

    val topicCountMap = HashMap(topic -> 1)
    val consumerMap = consumer.createMessageStreams(topicCountMap)
    val stream = consumerMap.get(topic).get(0)
    val iterator = stream.iterator()

    iterator.map(x => serialization.read[T](new String(x.message(), StandardCharsets.UTF_8))).toStream
  }

  def shutdown() = {
    consumer.shutdown()
  }
} 
开发者ID:frossi85,项目名称:financial-statistics-collector,代码行数:48,代码来源:SimpleKafkaConsumer.scala


示例3: RomanNumeralsSpec

//设置package包名称以及导入依赖的类
package com.jgibbons.romannumerals

import org.scalatest.FlatSpec

import scala.collection.immutable.HashMap

class RomanNumeralsSpec extends FlatSpec {
  behavior of "RomanNumerals"

  it should "Convert Properly" in {
    val mapped = HashMap[Int, String](
      1 -> "I", 2 -> "II", 3 -> "III", 4 -> "IV", 5 -> "V", 6 -> "VI", 7 -> "VII", 8 -> "VIII", 9 -> "IX", 10 -> "X",
      11 -> "XI", 12 -> "XII", 13 -> "XIII", 14 -> "XIV", 15 -> "XV", 16 -> "XVI", 17 -> "XVII", 18 -> "XVIII", 19 -> "XIX", 20 -> "XX",
      41 -> "XLI", 42 -> "XLII", 43 -> "XLIII", 44 -> "XLIV", 45 -> "XLV", 46 -> "XLVI", 47 -> "XLVII", 48 -> "XLVIII", 49 -> "XLIX",
      50 -> "L", 51 -> "LI", 60 -> "LX", 64 -> "LXIV", 68 -> "LXVIII", 69 -> "LXIX",
      90 -> "XC", 91 -> "XCI", 94 -> "XCIV", 96 -> "XCVI", 99 -> "XCIX",
      100 -> "C", 140 -> "CXL", 144 -> "CXLIV", 149 -> "CXLIX",
      189 -> "CLXXXIX", 190 -> "CXC", 194 -> "CXCIV", 199 -> "CXCIX",
      989 -> "CMLXXXIX", 990 -> "CMXC", 994 -> "CMXCIV", 999 -> "CMXCIX",
      1194 -> "MCXCIV", 1199 -> "MCXCIX",
      1989 -> "MCMLXXXIX", 1990 -> "MCMXC",
      5989 -> "MMMMMCMLXXXIX", 5990 -> "MMMMMCMXC", 5994 -> "MMMMMCMXCIV", 5999 -> "MMMMMCMXCIX"
    )

    mapped.foreach { case (k: Int, v: String) =>
      assert(RomanNumerals.convertLongImperative(k) == v, s"convertImperative failed for $k")
      assert(RomanNumerals.convertFoldLeft(k) == v, s"convertFoldLeft failed for $k")
      assert(RomanNumerals.convertStrs(k) == v, s"convertStrs failed for $k")
      assert(RomanNumerals.convertMixed(k) == v, s"convertMixed failed for $k")
      assert(RomanNumerals.convertTailRecursive(k) == v, s"convertTailRecursive failed for $k")
      assert(RomanNumerals.convertFoldAgain(k) == v, s"convertFoldAgain failed for $k")
      assert(RomanNumerals.convertFoldTerse(k) == v, s"convertFoldTerse failed for $k")
      assert(RomanNumerals.convert(k) == v, s"convert failed for $k")

      assert(JpgRomanNumerals.convert(k) == v, s"JpgRomanNumerals.convert failed for $k")
    }
  }
} 
开发者ID:PendaRed,项目名称:roman-numerals,代码行数:39,代码来源:RomanNumeralsSpec.scala


示例4: SearchEngine

//设置package包名称以及导入依赖的类
package com.ferran.searchEngine

import java.io.File

import com.ferran.searchEngine.tools.{FileLoader, Tools}

import scala.collection.immutable.HashMap
import scala.io.Source

object SearchEngine {

  type DocId      = Int
  type DocName    = String
  type Dictionary = HashMap[String, Map[DocId, DocName]]

  type RankingList = List[(DocId, DocName, Int)]

  def loadWords(dictionary: Dictionary, file: File, index: Int):Dictionary = {
    val fileName = file.getName
    Tools.wordSplitter(Source.fromFile(file).mkString)
      .foldLeft(dictionary){
        case (dict, word) =>
          dict.get(word.toLowerCase) match {
            case Some(docs) =>
              dict.updated(word.toLowerCase,
                {
                  if(docs.contains(index)) docs
                  else docs ++ Map(index -> fileName)
                }
              )
            case None =>
              dict.updated(word.toLowerCase, Map(index -> fileName))
          }
        }
  }


  final def main(args:Array[String]) = {
    val pathFolder = args(0)
    val filesOpt = FileLoader.getFilesList(pathFolder)

    // Create our Dictionary
    val dictionary =
      filesOpt.map{
        files =>
          files.zipWithIndex.foldLeft(HashMap.empty[String, Map[DocId, DocName]]){
            case (dict, (file, index)) => loadWords(dict, file, index)
          }
      }
      .getOrElse(HashMap.empty)

    println(s"Files loaded")
    print(s"search> ")
    var line = scala.io.StdIn.readLine()
    while(line != ":quit"){
      Ranking.printRanking(Ranking.getRanking(Dictionary.searchWords(dictionary,line)))
      print(s"search> ")
      line = scala.io.StdIn.readLine()
    }
  }
} 
开发者ID:ferranjr,项目名称:searchEngine,代码行数:62,代码来源:SearchEngine.scala


示例5: UserGroupInformationLoginUtil

//设置package包名称以及导入依赖的类
package im.yanchen.krb5.auth.passwd

import javax.security.auth.Subject
import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag
import javax.security.auth.login.{AppConfigurationEntry, LoginContext}

import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod
import org.apache.hadoop.security.authentication.util.KerberosUtil

import scala.collection.JavaConverters._
import scala.collection.immutable.HashMap


object UserGroupInformationLoginUtil {
  def loginUserFromPasswordAndReturnUGI(user: String, password: String): UserGroupInformation = {
    this.synchronized {
      if (!UserGroupInformation.isSecurityEnabled) {
        UserGroupInformation.getCurrentUser
      } else {
        val krbOptions = HashMap[String, String](
          "doNotPrompt" -> "false",
          "useTicketCache" -> "false",
          "useKeyTab" -> "false",
          "renewTGT" -> "false"
        ).asJava
        val ace = new AppConfigurationEntry(
          KerberosUtil.getKrb5LoginModuleName,
          LoginModuleControlFlag.REQUIRED,
          krbOptions)
        val dynConf = new DynamicConfiguration(Array[AppConfigurationEntry](ace))
        val loginContext = new LoginContext(
          "hadoop-password-kerberos",
          null,
          new LoginHandler(user, password),
          dynConf)
        loginContext.login()

        val loginSubject = loginContext.getSubject
        val loginPrincipals = loginSubject.getPrincipals

        val subject = new Subject()
        subject.getPrincipals().addAll(loginPrincipals)

        val newLoginUser = UserGroupInformation.getUGIFromSubject(subject)
        setUGILogin(newLoginUser, loginContext)
        newLoginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS)
        newLoginUser
      }
    }
  }

  private def setUGILogin(loginUser: UserGroupInformation, loginContext: LoginContext): Unit = {
    val cls = classOf[UserGroupInformation]
    val mtd = cls.getDeclaredMethod("setLogin", classOf[LoginContext])
    mtd.setAccessible(true)
    mtd.invoke(loginUser, loginContext)
  }
} 
开发者ID:halfvim,项目名称:krb5-auth-passwd,代码行数:60,代码来源:UserGroupInformationLoginUtil.scala


示例6: TestNovelLaTeX

//设置package包名称以及导入依赖的类
package com.lyrx.latex.documents

import com.lyrx.latex.{LaTeXArticle, LaTeXNovel, LaTeXReplacements}
import com.lyrx.structures.{TestNovel, Whitepaper}
import com.lyrx.text._
import com.lyrx.text.bib.{BibItem, Online}

import scala.collection.immutable.HashMap

object TestNovelLaTeX extends LaTeXReplacements with FileAssertions {

  def main(args: Array[String]): Unit = {
    implicit val ctx = Context()
      .withInputDir("src/test/resources/dummynovel")
      .withOutputDir("target/generated/latex")
      .inEnglish
    implicit val coll = new SimpleStringCollector();
    assertFile(TestNovel.collect[String](TestNovelLaTeX()).writeOut())
  }

  def apply(): TestNovelLaTeX =
    new TestNovelLaTeX(new ParData[String](
      Nil,
      "testnovel.tex",
      sections = Seq(
        "Section"
      ),
      replacements = HashMap[String, String]("llb" -> "lyrx Books"),
      subSections = Seq( "Subsection" )
   ).withReplacer(
      """\(\d+,\d+\)""".r,(s:String)=>{
        val AR ="""\((\d+),(\d+)\)""".r
         s match {
          case AR(s1:String,s2:String) => s"\\begin{tiny}[${s1}]\\end{tiny}"
          case _ =>  ""
        }
        }
    ))

}

class TestNovelLaTeX(override val parData: ParData[String])
    extends  LaTeXNovel
    with BookData {

  override val texTemplate: String = "../../../main/resources/templates/novel.tex"
  override def withParData(aParData: ParData[String]): TestNovelLaTeX = new TestNovelLaTeX(aParData)

  override val title: String = "How to write a novel"
  override val publisher: String = "lyrx GmbH"
  override val author: String = "Alexander Weinmann"
  override val subtitle: String = "This is the subtitle"
  override val motto: String = "Motto"
  override val widmung: String = "Widmung"
} 
开发者ID:lyrx,项目名称:lyrxgenerator,代码行数:56,代码来源:TestNovelLaTeX.scala


示例7: RapierConfJson

//设置package包名称以及导入依赖的类
package com.frankandrobot.rapier.cli

import scala.collection.immutable.HashMap

sealed trait JsonConf

case class RapierConfJson(val params : RapierParamsJson,
                          val masterTemplateUri : String,
                          val trainingDataUri : String,
                          val learedRulesUri: String,
                          val dataUri: String)
  extends JsonConf

case class RapierParamsJson(val maxOuterLoopFails: Option[Int],
                            val compressionRandomPairs: Option[Int],
                            val compressionPriorityQueueSize: Option[Int],
                            val compressionFails: Option[Int],
                            val metricMinPositiveMatches: Option[Int],
                            val maxElementsToSpecialize: Option[Int],
                            val ruleSizeWeight: Option[Double],
                            val maxPatternLength: Option[Int],
                            val maxUnequalPatternLength: Option[Int],
                            val maxDifferenceInPatternLength: Option[Int])
  extends JsonConf

case class MasterTemplateJson(val name : String, val slots : List[String])
  extends JsonConf

case class TrainingExampleJson(val document : String,
                               val filledTemplate: HashMap[String, List[String]])
  extends JsonConf 
开发者ID:frankandrobot,项目名称:rapier-cli,代码行数:32,代码来源:JsonConf.scala


示例8: transform

//设置package包名称以及导入依赖的类
package ftse.transformations.laresflat
import scala.collection.immutable.HashMap
import ftse.simulation.laresflat.LFReachTypes
import ftse.simulation.laresflat.LFReachTypes._

import ftse.formalism.lares.LARES_metamodel._
import ftse.formalism.lares.LaresAlgo

import ftse.formalism.tra.Tra
import ftse.transformations.AbstrTransformer

/**
 * This Trait is used to transform the result from the reachability analysis in LFSim.reach() into an Tra-Object.
 */
trait Reach2TraTransformer extends 
	AbstrTransformer[ReachResult, (Tra, Map[ComposedState, Int])] 
	with LFReachTypes with LaresAlgo {
  
  /**
   * builds a Tra-Object from the ReachStructure from the reach() Method
   * @returns Tra-Object and a Map, which contains the mapping from the composed States to the numbered States 
   */
  override def transform(obj: ReachResult) : (Tra, Map[ComposedState,Int]) = {
    
    var statemap = HashMap(
	      // unbedingt den initialstate nach vorne schieben und mit 1 beginnen lassen
	      (obj._2.toList sortWith((a,b) => if (a==obj._3) true else false) zipWithIndex) map (a => (a._1,a._2+1))   :_* 
	  ) 
	  
	  val groupedT = obj._1.map(t => {
	    t._3 match {
	      case (name,Dirac(weight))  => {
	        val evalArith = eval(t._3._2)
	        val weight : Double = evalArith.right.getOrElse(evalArith.left.get)	       
	        ftse.formalism.tra.ImmediateTransition(statemap(t._1),name, statemap(t._2),weight): ftse.formalism.tra.Transition
	      } 
	      //case e : ExponentialTransitionType => ftse.formalism.tra.MarkovianTransition(statemap(t._1),"delay" + e.delay, statemap(t._2),e.delay.toDouble) : ftse.formalism.tra.Transition
	      case (name,Exponential(rate)) => {
	        println(name)
	        val evalArith = eval(t._3._2)
	        val rate : Double = evalArith.right.getOrElse(evalArith.left.get)
	        ftse.formalism.tra.MarkovianTransition(statemap(t._1), name, statemap(t._2),rate) : ftse.formalism.tra.Transition
	      }
	    }
	  }).groupBy(_.source)
	  
	  return (ftse.formalism.tra.Tra(groupedT ,Set[Long](statemap.values.toList.map(a => a : Long):_ *)),statemap)
  }
} 
开发者ID:martin-riedl,项目名称:ftse-lares,代码行数:50,代码来源:Reach2TraTransformer.scala


示例9: transform

//设置package包名称以及导入依赖的类
package ftse.formalism.lares.flat

import ftse.formalism.lares._
import ftse.formalism.lares.LARES_metamodel._
import ftse.simulation.laresflat.LFReachTypes
import scala.collection.immutable.HashMap
import ftse.simulation.laresflat.LFReachTypes._
import ftse.transformations.AbstrTransformer


  override def transform(obj: ReachResult) : String =  {
	  var statemap = HashMap(
	      // unbedingt den initialstate nach vorne schieben und mit 1 beginnen lassen
	      (obj._2.toList sortWith((a,b) => if (a==obj._3) true else false) zipWithIndex) map (a => (a._1,a._2+1))   :_* 
	  ) 
	  
	  val dotTrans = obj._1.map(e => statemap(e._1) + "->" + statemap(e._2) + "["+(e._3 match {
	    		  case (name,Exponential(rate)) => "label=\"("+name+",rate="+eval(e._3._2)+")\"" + " fontcolor=black color=black"
	    		  case (name,Dirac(weight)) => "label=\"(*"+name+",rate="+eval(e._3._2)+"*)\"" + " fontcolor=blue color=blue"
	    		      
			      case _ => ""	      
	  })+"]")
	  
	  val statesDot = statemap.map(a=> a._2 + " "+a._1.values.map(_.identifier.toText).mkString("[style=filled shape=\"record\" label=\"{","|","}\"]") )
	  val resstring = dotTrans.foldLeft(
	      "digraph g {ratio=1.33;node [height=\"2\" width=\"3\" shape = \"ellipse\"]; edge []; graph [nodesep=0.5];"
	      )((a,b)=>a+"\n  "+b)+statesDot.mkString("\n  ","\n  ","\n")+"}"
	  //println(resstring)

	  resstring    
  }
} 
开发者ID:martin-riedl,项目名称:ftse-lares,代码行数:33,代码来源:flat_serializer.scala


示例10: MongoAndInternal

//设置package包名称以及导入依赖的类
package nsmc.conversion.types

import java.util.Date

import com.mongodb.casbah.Imports._
import nsmc.conversion.SchemaAccumulator
import org.apache.spark.sql.types._

import scala.collection.immutable.HashMap

class MongoAndInternal {

}

object MongoAndInternal {
  def toInternal(o: MongoDBObject) : StructureType = {
    val convertedPairs = o.toSeq.map(kv => toInternal(kv))
    val hm = HashMap[String, ConversionType](convertedPairs:_*)
    new StructureType(hm)
  }

  def toInternal(o: BasicDBObject) : StructureType = {
    val convertedPairs = o.toSeq.map(kv => toInternal(kv))
    val hm = HashMap[String, ConversionType](convertedPairs:_*)
    new StructureType(hm)
  }

  def toInternal(a: AnyRef) : ConversionType = {
    a match {
      case d: Date => AtomicType(TimestampType)
      case bts: BSONTimestamp => {
        val s = Seq("inc" -> AtomicType(IntegerType), "time" -> AtomicType(IntegerType))
        StructureType(HashMap[String, ConversionType](s: _*))
      }
      case ba: Array[Byte] => AtomicType(BinaryType)
      case bt: org.bson.types.ObjectId => AtomicType(StringType)
      case _: java.lang.Long => AtomicType(LongType)
      case _: java.lang.Byte => AtomicType(ByteType)
      case _: java.lang.Double => AtomicType(DoubleType)
      case _: java.lang.Boolean => AtomicType(BooleanType)
      case _:String => AtomicType(StringType)
      case _:Integer => AtomicType(IntegerType)
      case o:BasicDBObject => toInternal(o)
      case l:BasicDBList => {
        val sa = new SchemaAccumulator
        l.foreach(dbo => sa.considerDatum(dbo.asInstanceOf[AnyRef]))
        SequenceType(sa.getInternal)
      }
    }
  }

  def toInternal(kv: Pair[String, AnyRef]) : (String, ConversionType) = {
    kv match {
      case (k: String, a: AnyRef) => {
        val vt = toInternal(a)
        (k, vt)
      }
    }
  }
} 
开发者ID:baank,项目名称:spark-mongodb-connector,代码行数:61,代码来源:MongoAndInternal.scala


示例11: Merger

//设置package包名称以及导入依赖的类
package nsmc.conversion.types

import scala.collection.immutable.HashMap

object Merger {
  def merge(l: ConversionType, r: ConversionType) : ConversionType = {
    (l, r) match {
      case (StructureType(lMap), StructureType(rMap)) => {
        val keys = lMap.keySet ++ rMap.keySet
        val pairs = keys.map(k => {
          val inLeft = lMap.isDefinedAt(k)
          val inRight = rMap.isDefinedAt(k)
          (inLeft, inRight) match {
            case (true, true) => (k, merge(lMap.getOrElse(k, null), rMap.getOrElse(k, null)))
            case (true, false) => (k, lMap.getOrElse(k, null))
            case (false, true) => (k, rMap.getOrElse(k, null))
            case (false, false) => (k, null) // can't happen
          }
        })
        val ct = new StructureType(HashMap[String, ConversionType](pairs.toSeq:_*))
        ct
      }
      case (_, _) => l // TODO: assume for now they're equal
    }
  }
} 
开发者ID:baank,项目名称:spark-mongodb-connector,代码行数:27,代码来源:Merger.scala


示例12: InternalAndSchema

//设置package包名称以及导入依赖的类
package nsmc.conversion.types

import org.apache.spark.sql.types._

import scala.collection.immutable.HashMap


class InternalAndSchema {

}

object InternalAndSchema {

  def toSchema(it: ConversionType) : DataType = {
    it match {
      case AtomicType(dt: DataType) => dt
      case SequenceType(et) => ArrayType(toSchema(et))
      case StructureType(fields) => {
        val converted = fields.map(kv => makeField(kv._1, toSchema(kv._2)))
        val sorted = converted.toSeq.sortBy(sf => sf.name)
        StructType(sorted)
      }
    }
  }

  private def makeField(k:String, t: DataType) : StructField = {
    StructField(k, t, nullable = true)
  }

  def toInternal(schema: Seq[StructField]) : ConversionType = {
    val convertedPairs = schema.toSeq.map(toInternal)
    val hm = HashMap[String, ConversionType](convertedPairs:_*)
    new StructureType(hm)
  }

  private def toInternal(sf: StructField) : (String, ConversionType) = {
    sf.dataType match {
      case DateType => (sf.name, AtomicType(DateType))
      case DoubleType => (sf.name, AtomicType(DoubleType))
      case StringType => (sf.name, AtomicType(StringType))
      case IntegerType => (sf.name, AtomicType(IntegerType))
      case LongType => (sf.name, AtomicType(LongType))
      case TimestampType => (sf.name, AtomicType(TimestampType))
      case BooleanType => (sf.name, AtomicType(BooleanType))
      case StructType(s) => (sf.name, toInternal(s))
      case _ : DecimalType => (sf.name, AtomicType(DoubleType))
    }
  }

} 
开发者ID:baank,项目名称:spark-mongodb-connector,代码行数:51,代码来源:InternalAndSchema.scala


示例13: SchemaAccumulator

//设置package包名称以及导入依赖的类
package nsmc.conversion

import nsmc.conversion.types._

import org.apache.spark.sql.types._

import scala.collection.immutable.HashMap

class SchemaAccumulator {
  private var currentInternal: Option[ConversionType] = None

  private def maybeMerge(l: ConversionType, ro: Option[ConversionType]) : ConversionType = {
    ro match {
      case Some(r) => Merger.merge(l, r)
      case None => l
    }
  }

  def considerDatum(datum: AnyRef) : Unit = {
    val recInternalType = MongoAndInternal.toInternal(datum)
    currentInternal = Some(maybeMerge(recInternalType, currentInternal))
  }

  def accumulate(types: Iterator[StructureType]) : Unit = {
    types.foreach(ty => {
      currentInternal = Some(maybeMerge(ty, currentInternal))
    })
  }

  def getInternal : ConversionType = {
    currentInternal match {
      case Some(i) => i
      case None => new StructureType(new HashMap[String, ConversionType]())
    }
  }

  // should only be called for a top level (record) schema
  def getSchema : Seq[StructField] = {
    currentInternal match {
      case Some(i) => InternalAndSchema.toSchema(i).asInstanceOf[StructType].fields
      case None => Seq()
    }

  }
} 
开发者ID:baank,项目名称:spark-mongodb-connector,代码行数:46,代码来源:SchemaAccumulator.scala


示例14: main

//设置package包名称以及导入依赖的类
package io.radicalbit.flink.closure

import org.apache.flink.api.scala._
import scala.collection.immutable.HashMap
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.core.fs.FileSystem.WriteMode

 {

  def main(args: Array[String]): Unit = {

    val params = ParameterTool.fromArgs(args)
    val env = ExecutionEnvironment.getExecutionEnvironment

    val tokenize: String => Seq[String] = { s =>

      val stopWords = Set("the", "i", "a", "an", "at", "are", "am", "for", "and",
        "or", "is", "there", "it", "this", "that", "on", "was", "by", "of",
        "to", "in", "to", "not", "be", "with", "you", "have", "as", "can")

      s.toLowerCase
        .split("\\W+")
        .filter(w => !stopWords.contains(w) || w.matches("(\\p{Alpha})+"))
    }
    
    val uniqueWords: String => Set[(String, Int)] = s => tokenize(s).toSet[String].map(w => (w, 1))

    val mails = env.readCsvFile[(String, String)](
      params.getRequired("input"),
      lineDelimiter = params.get("line-delimiter", "##//##"),
      fieldDelimiter = params.get("field-delimiter", "#|#"),
      includedFields = Array(0, 4))

    val numberOfDocs = mails.count

    val tf = mails.flatMap { x =>

      tokenize(x._2).foldLeft(HashMap.empty[String, Int]) { (h, w) =>

        val c = h.getOrElse(w, 0)
        h + (w -> (c + 1))
      } map {
        case (k, v) =>
          (x._1, k, v)
      }
    }

    val df = mails.flatMap(x => uniqueWords(x._2)).groupBy(0).reduce { (l, r) => (l._1, l._2 + r._2) }

    val tfidf = tf.join(df).where(1).equalTo(0) { (l, r) => (l._1, l._2, l._3 * (numberOfDocs.toDouble / r._2)) }

    tfidf.writeAsCsv(filePath = params.get("output", "~/Desktop/tf-idf.csv"), writeMode = WriteMode.OVERWRITE)
    env.execute("TF-IDF Scala")
  }
} 
开发者ID:alkagin,项目名称:flink-tfidf-example,代码行数:56,代码来源:TFIDF.scala


示例15: Molecules

//设置package包名称以及导入依赖的类
package scaffvis.client.store.model

import scaffvis.shared.model._
import diode.data.Pot

import scala.collection.immutable.{HashMap, HashSet}

case class Molecules(im: IndexedMolecules,
                     svg: Map[MoleculeId, Pot[String]] = Map.empty,
                     selected: Set[MoleculeId] = Set.empty, active: Option[MoleculeId] = None) {

  def molecules: Iterable[Molecule] = im.molecules
  def scaffoldMolecules(scaffoldId: ScaffoldId) = im.scaffoldIndex.getOrElse(scaffoldId, Set.empty)
  def get(moleculeId: MoleculeId): Option[Molecule] = im.moleculeMap.get(moleculeId)
  def get_!(moleculeId: MoleculeId): Molecule = im.moleculeMap.apply(moleculeId)
}

case class IndexedMolecules(molecules: Vector[Molecule]) {

  val moleculeMap: Map[MoleculeId, Molecule] = HashMap(molecules.map(m => m.id -> m).toSeq:_*)

  val scaffoldIndex: Map[ScaffoldId,Set[MoleculeId]] = {
    val pairs: Iterable[(ScaffoldId, MoleculeId)] = for {
      m <- molecules
      s <- m.scaffolds
    } yield s -> m.id
    val groups: Map[ScaffoldId, Iterable[(ScaffoldId, MoleculeId)]] = pairs.groupBy(_._1)
    val index: Map[ScaffoldId, Set[MoleculeId]] = groups.mapValues(i => i.map(_._2).to[HashSet])
    HashMap(index.toSeq:_*)
  }

} 
开发者ID:velkoborsky,项目名称:scaffvis,代码行数:33,代码来源:Molecules.scala


示例16: PlatformFactory

//设置package包名称以及导入依赖的类
package flyweight

import scala.collection.immutable.HashMap

object PlatformFactory {

  private var map: Map[String, Platform] = HashMap()

  def getPlatformInstance(platformType: String): Platform = synchronized(map.get(platformType) match {
    case Some(pt) => pt
    case None => platformType match {
      case "SCALA" =>
        map += (platformType -> new ScalaPlatform)
        map(platformType)
      case "JAVA" =>
        map += (platformType -> new JavaPlatform)
        map(platformType)
      case "RUBY" =>
        map += (platformType -> new RubyPlatform)
        map(platformType)
      case "C" =>
        map += (platformType -> new CPlatform)
        map(platformType)
      case _ => throw new NotImplementedError("Platform is not implemented")
    }
  })

} 
开发者ID:BBK-PiJ-2015-67,项目名称:sdp-portfolio,代码行数:29,代码来源:PlatformFactory.scala


示例17: AccessControlProvider

//设置package包名称以及导入依赖的类
package prototype

import scala.collection.immutable.HashMap

object AccessControlProvider {

  println("Fetching data from external resources and creating access control objects...")

  private lazy val map: HashMap[String, AccessControl] = HashMap(
    ("USER", AccessControl("USER", "DO_WORK")),
    ("ADMIN", AccessControl("ADMIN", "ADD/REMOVE USERS")),
    ("MANAGER", AccessControl("MANAGER", "GENERATE/READ REPORTS")),
    ("VP", AccessControl("VP", "MODIFY REPORTS"))
  )

  def getAccessControlObject(controlLevel: String): AccessControl =
    map.get(controlLevel) match {
      case Some(ac) => ac.copy(ac.controlLevel, ac.access)
      case None => AccessControl("ANONYMOUS", "ACCESS DENIED")
    }

} 
开发者ID:BBK-PiJ-2015-67,项目名称:sdp-portfolio,代码行数:23,代码来源:AccessControlProvider.scala


示例18: SparkSQLInitData

//设置package包名称以及导入依赖的类
package com.jjzhk.sparkexamples.sql.search

import java.io.{FileWriter, PrintWriter}

import scala.collection.immutable.HashMap


object SparkSQLInitData {
  def main(args: Array[String]): Unit = {
    var ItemMap = new HashMap[String, String]
    ItemMap += ("BJB" -> "???")
    ItemMap += ("HZP" -> "???")
    ItemMap += ("BWB" -> "???")
    ItemMap += ("SG" -> "??")
    ItemMap += ("PM" -> "??")
    ItemMap += ("NZ" -> "??")
    ItemMap += ("YD" -> "??")
    ItemMap += ("ZB" -> "??")
    ItemMap += ("SP" -> "??")
    ItemMap += ("TS" -> "??")
    ItemMap += ("JP" -> "??")
    ItemMap += ("LC" -> "??")
    ItemMap += ("BT" -> "??")
    ItemMap += ("QC" -> "??")
    ItemMap += ("YY" -> "??")
    ItemMap += ("WJ" -> "??")

    val userLogBuffer = {
      val userLogger = new StringBuffer
      ItemMap.foreach(e => {userLogger.append(e._1).append("\t").append(e._2).append("\t\n")})

      userLogger
    }

    val pw : PrintWriter = new PrintWriter(new FileWriter("wordcount/Items.txt", true))
    pw.write(userLogBuffer.toString)
    pw.close()

  }
} 
开发者ID:JJZHK,项目名称:MySpark,代码行数:41,代码来源:SparkSQLInitData.scala


示例19: Entry

//设置package包名称以及导入依赖的类
package uk.co.pollett.flink.newsreader.rss

import java.text.SimpleDateFormat
import java.util.Date

import scala.collection.immutable.HashMap

object Entry {
  val dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss")
}

case class Entry(
                  title: String,
                  desc: String,
                  link: String,
                  date: Date,
                  source: String,
                  body: Option[String],
                  places: Option[List[String]],
                  people: Option[List[String]],
                  organizations: Option[List[String]],
                  sentiment: Option[String]
                ) extends Serializable {
  def getMap: HashMap[String, String] = {
    HashMap(
      "date" -> Entry.dateFormat.format(date),
      "title" -> title,
      "desc" -> desc,
      "link" -> link,
      "source" -> source,
      "body" -> (if (body.isDefined) body.get else ""),
      "places" -> (if (places.isDefined) places.get.mkString(", ") else ""),
      "people" -> (if (people.isDefined) people.get.mkString(", ") else ""),
      "organizations" -> (if (organizations.isDefined) organizations.get.mkString(", ") else ""),
      "sentiment" -> (if (sentiment.isDefined) sentiment.get else "")
    )
  }
} 
开发者ID:pollett,项目名称:flink-newsreader,代码行数:39,代码来源:Entry.scala


示例20: InternalAndSchema

//设置package包名称以及导入依赖的类
package nsmc.conversion.types

import org.apache.spark.sql.types._

import scala.collection.immutable.HashMap


class InternalAndSchema {

}

object InternalAndSchema {

  def toSchema(it: ConversionType) : DataType = {
    it match {
      case AtomicType(dt: DataType) => dt
      case SequenceType(et) => ArrayType(toSchema(et))
      case StructureType(fields) => {
        val converted = fields.map(kv => makeField(kv._1, toSchema(kv._2)))
        val sorted = converted.toSeq.sortBy(sf => sf.name)
        StructType(sorted)
      }
    }
  }

  private def makeField(k:String, t: DataType) : StructField = {
    StructField(k, t, nullable = true)
  }

  def toInternal(schema: Seq[StructField]) : ConversionType = {
    val convertedPairs = schema.toSeq.map(toInternal)
    val hm = HashMap[String, ConversionType](convertedPairs:_*)
    new StructureType(hm)
  }

  private def toInternal(sf: StructField) : (String, ConversionType) = {
    sf.dataType match {
      // TODO: leaving out some of the atomic types
      case StringType => (sf.name, AtomicType(StringType))
      case IntegerType => (sf.name, AtomicType(IntegerType))
      case StructType(s) => (sf.name, toInternal(s))
    }
  }

} 
开发者ID:gourimahapatra,项目名称:Spark-MongoDB,代码行数:46,代码来源:InternalAndSchema.scala



注:本文中的scala.collection.immutable.HashMap类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Scala Router类代码示例发布时间:2022-05-23
下一篇:
Scala ObjectInputStream类代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap