• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Scala HashMap类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Scala中java.util.HashMap的典型用法代码示例。如果您正苦于以下问题:Scala HashMap类的具体用法?Scala HashMap怎么用?Scala HashMap使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了HashMap类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。

示例1: MultiMap

//设置package包名称以及导入依赖的类
package com.protegra_ati.agentservices.store.util

import java.util.HashMap

class MultiMap[K, V] extends java.io.Serializable{
  private var _map = new HashMap[K, List[V]]()

  def add(key:K, value:V) = {
    val list:List[V] = if (_map.get(key) == null) List[V]() else _map.get(key)
    _map.put(key, list ++ List(value))
  }

  def remove(key:K, value:V) {
    if (_map.get(key) != null) {
      val sublist = _map.get(key).filterNot(x => x == value)
      _map.put(key, sublist)
    }
  }

  def get(key:K):List[V] = {
    _map.get(key)
  }
  
  def hasValue(key:K, value:V):Boolean = {
    val values = get(key)
    (values != null && values.contains(value))
  }
} 
开发者ID:synereo,项目名称:synereo,代码行数:29,代码来源:MultiMap.scala


示例2: InMemoryHeap

//设置package包名称以及导入依赖的类
package offheap

import java.util.HashMap

class InMemoryHeap[T <: Node[T]] extends Heap[T] {
  val data = new HashMap[Long, T]
  var next_id = 0
  
  def append(node : T, reserve : Int) : Long = {
    data.put(next_id, node)
    val result = next_id
    next_id += 1
    next_id
  }
  
  def write(pointer : Long, node : T) : Unit = {
    data.put(pointer, node)
  }
  
  def read(pointer : Long) : T =
    data.get(pointer)
    
  def commit() = {}
} 
开发者ID:utwente-fmt,项目名称:lazy-persistent-trie,代码行数:25,代码来源:InMemoryHeap.scala


示例3: TweetCollect

//设置package包名称以及导入依赖的类
package info.matsumana.flink

import java.util.regex.Pattern
import java.util.{HashMap, Properties}

import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer010
import org.apache.flink.streaming.connectors.twitter.TwitterSource
import org.apache.flink.streaming.util.serialization.SimpleStringSchema

object TweetCollect {

  val DELETED_TWEET_PATTERN = Pattern.compile("""^\{"delete":\{""")

  // ????
  val TARGET_TWEET_PATTERN = Pattern.compile("^.*[\u3040-\u3096]+.*$")

  val mapper = new ObjectMapper()

  def main(args: Array[String]): Unit = {
    val params = ParameterTool.fromPropertiesFile(args(0))

    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.enableCheckpointing(60000)

    // source (twitter)
    val twitterProps = new Properties()
    twitterProps.setProperty(TwitterSource.CONSUMER_KEY, params.get("consumer_key", ""))
    twitterProps.setProperty(TwitterSource.CONSUMER_SECRET, params.get("consumer_secret", ""))
    twitterProps.setProperty(TwitterSource.TOKEN, params.get("token", ""))
    twitterProps.setProperty(TwitterSource.TOKEN_SECRET, params.get("token_secret", ""))
    val sourceStream = env.addSource(new TwitterSource(twitterProps))

    // sink (kafka)
    val kafkaProps = new Properties()
    kafkaProps.setProperty("bootstrap.servers", params.get("bootstrap.servers", "localhost:9092"))
    val topic = params.get("topic", "twitter")
    val sink = new FlinkKafkaProducer010[String](
      topic,
      new SimpleStringSchema,
      kafkaProps)

    // stream processing
    sourceStream
      .filter(!DELETED_TWEET_PATTERN.matcher(_).matches())
      .map(mapper.readValue(_, classOf[HashMap[String, Object]]))
      .filter(m => {
        val text = m.get("text")
        text != null && TARGET_TWEET_PATTERN.matcher(String.valueOf(text)).matches()
      })
      .map(mapper.writeValueAsString(_))
      .addSink(sink)

    env.execute("TweetCollect")
  }
} 
开发者ID:matsumana,项目名称:scala-fukuoka-lt,代码行数:59,代码来源:TweetCollect.scala


示例4: Marker

//设置package包名称以及导入依赖的类
import java.util.HashMap

class Marker private (val color: String) {
  println(s"Creating ${toString}")
  
  override def toString : String = "marker color " + color
}

object Marker {
  private var markers : HashMap[String, Marker] = new HashMap[String, Marker]
  
  def getMarker(color: String) : Marker = {
    if (!markers.containsKey(color)) {
      markers.put(color, new Marker(color))
    }
    
    markers.get(color)
  }
} 
开发者ID:ReactivePlatform,项目名称:Pragmatic-Scala,代码行数:20,代码来源:Companion.scala


示例5: TestUtils

//设置package包名称以及导入依赖的类
package org.apache.spark.sql

import java.nio.ByteBuffer
import java.util.{ArrayList, HashMap}

import scala.util.Random

object TestUtils {

  def generateRandomByteBuffer(rand: Random, size: Int): ByteBuffer = {
    val bb = ByteBuffer.allocate(size)
    val arrayOfBytes = new Array[Byte](size)
    rand.nextBytes(arrayOfBytes)
    bb.put(arrayOfBytes)
  }

  def generateRandomMap(rand: Random, size: Int): java.util.Map[String, Int] = {
    val jMap = new HashMap[String, Int]()
    for (i <- 0 until size) {
      jMap.put(rand.nextString(5), i)
    }
    jMap
  }

  def generateRandomArray(rand: Random, size: Int): ArrayList[Boolean] = {
    val vec = new ArrayList[Boolean]()
    for (i <- 0 until size) {
      vec.add(rand.nextBoolean())
    }
    vec
  }
} 
开发者ID:hzxjtx,项目名称:hbaseConnector,代码行数:33,代码来源:TestUtils.scala


示例6: KafkaWordCountProducer

//设置package包名称以及导入依赖的类
package com.jcode.spark.streaming

import java.util.HashMap

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}

import org.apache.spark.SparkConf
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka._


object KafkaWordCountProducer {

  def main(args: Array[String]) {
    if (args.length < 4) {
      System.err.println("Usage: KafkaWordCountProducer <metadataBrokerList> <topic> " +
        "<messagesPerSec> <wordsPerMessage>")
//      System.exit(1)
    }

//    val Array(brokers, topic, messagesPerSec, wordsPerMessage) = args
    val Array(brokers, topic, messagesPerSec, wordsPerMessage) = Array("192.168.1.234:9092","sparkStreamingTest","1","5")

    // Zookeeper connection properties
    val props = new HashMap[String, Object]()
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers)
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
      "org.apache.kafka.common.serialization.StringSerializer")
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
      "org.apache.kafka.common.serialization.StringSerializer")

    val producer = new KafkaProducer[String, String](props)

    // Send some messages
    while(true) {
      (1 to messagesPerSec.toInt).foreach { messageNum =>
        val str = (1 to wordsPerMessage.toInt).map(x => scala.util.Random.nextInt(10).toString)
          .mkString(" ")

        val message = new ProducerRecord[String, String](topic, null, str)
        producer.send(message)
      }

      Thread.sleep(1000)
    }
  }

}
// scalastyle:on println 
开发者ID:dreajay,项目名称:jcode-spark,代码行数:50,代码来源:KafkaWordCountProducer.scala


示例7: Macro

//设置package包名称以及导入依赖的类
package alexsmirnov.pbconsole

import java.util.HashMap
import org.apache.commons.lang3.text.StrSubstitutor
import scala.io.Source
import scalafx.beans.property.StringProperty
import javafx.util.Callback
import javafx.beans.Observable

class Macro {
  val nameProperty = StringProperty("")
  def name = nameProperty.get
  def name_=(v: String) = nameProperty.update(v)
  val descriptionProperty = StringProperty("")
  def description = descriptionProperty.get
  def description_=(v: String) = descriptionProperty.update(v)
  val contentProperty = StringProperty("")
  def content = contentProperty.get
  def content_=(v: String) = contentProperty.update(v)
}

object Macro {
  import Settings._
  object extractor extends Callback[Macro,Array[Observable]]{
    def call(m: Macro) = Array(m.nameProperty,m.descriptionProperty,m.contentProperty)
  }
  def apply(name: String,description: String,content: String) = {
    val m = new Macro
    m.name = name
    m.description = description
    m.content = content
    m
  }
  def prepare(content: String,conf: Settings): Iterator[String] = {
    val values = new HashMap[String,Any]
    values.put(BED_W, conf.bedWidth())
    values.put(BED_D, conf.bedDepth())
    values.put(H, conf.height())
    values.put(Z_OFFSET, conf.zOffset())
    val sub = new StrSubstitutor(values)
    val src = Source.fromString(sub.replace(content))
    src.getLines()
  }
} 
开发者ID:alexsmirnov,项目名称:printrbot-g2-console,代码行数:45,代码来源:Macro.scala



注:本文中的java.util.HashMap类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Scala JsArray类代码示例发布时间:2022-05-23
下一篇:
Scala Partition类代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap