• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Scala ZkClient类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Scala中org.I0Itec.zkclient.ZkClient的典型用法代码示例。如果您正苦于以下问题:Scala ZkClient类的具体用法?Scala ZkClient怎么用?Scala ZkClient使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了ZkClient类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。

示例1: ClusterImplTest

//设置package包名称以及导入依赖的类
package com.groupon.dse.kafka.cluster.impl

import com.groupon.dse.configs.KafkaServerConfig
import com.groupon.dse.testutils.{EmbeddedKafka, TestDefaults}
import com.groupon.dse.zookeeper.ZkClientBuilder
import kafka.producer.{Producer, ProducerConfig}
import org.I0Itec.zkclient.ZkClient
import org.scalatest.{BeforeAndAfter, FlatSpec}

class ClusterImplTest extends FlatSpec with BeforeAndAfter {

  val kafkaTopic = TestDefaults.TestTopic
  val zkConnTimeout = 10000
  val zkSessionTimeout = 10000
  var producer: Producer[String, Array[Byte]] = _
  var embeddedKafka: EmbeddedKafka = _
  var cluster: ClusterImpl = _
  var zkConnect: String = _
  var kafkaServerConfigs: KafkaServerConfig = _
  var zkClient: ZkClient = _

  before {
    embeddedKafka = new EmbeddedKafka
    embeddedKafka.startCluster()
    producer = new Producer[String, Array[Byte]](new ProducerConfig(embeddedKafka.kafkaProducerProperties))
    zkConnect = embeddedKafka.zkServer.connectString
    kafkaServerConfigs = TestDefaults.testKafkaServerConfig(zkConnect)
    cluster = new ClusterImpl(kafkaServerConfigs)
    zkClient = ZkClientBuilder(zkConnect, zkConnTimeout, zkSessionTimeout)
  }

  after {
    zkClient.close()
    embeddedKafka.stopCluster()
  }

  "The topic list" must "have size 0 before producing" in {
    assert(cluster.topics(zkClient).size == 0)
  }

  "The topic list" must "have size 1 after producing" in {
    embeddedKafka.sendMessage(4, producer, kafkaTopic)
    assert(cluster.topics(zkClient).size == 1)
  }

  "The number of partitions for a topic" should "be 1 for 1 valid topic" in {
    embeddedKafka.sendMessage(4, producer, kafkaTopic)
    assert(cluster.partitions(List(kafkaTopic), zkClient).size == 1)
  }

  "The number of partitions" should "be 0 for an invalid topic" in {
    embeddedKafka.sendMessage(4, producer, kafkaTopic)
    assert(cluster.partitions(List("invalid_topic"), zkClient).size == 0)
  }

  "The number of partitions" should "be 1 for a valid and invalid topic" in {
    embeddedKafka.sendMessage(4, producer, kafkaTopic)
    assert(cluster.partitions(List(kafkaTopic, "invalid_topic"), zkClient).size == 1)
  }

} 
开发者ID:groupon,项目名称:baryon,代码行数:62,代码来源:ClusterImplTest.scala


示例2: StormOffsetGetterSpec

//设置package包名称以及导入依赖的类
package com.quantifind.kafka.core

import com.quantifind.utils.ZkUtilsWrapper
import org.I0Itec.zkclient.ZkClient
import org.apache.zookeeper.data.Stat
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.mockito.{Matchers => MockitoMatchers, Mockito}
import org.scalatest._

class StormOffsetGetterSpec extends FlatSpec with ShouldMatchers {

  trait Fixture {

    val mockedZkClient = Mockito.mock(classOf[ZkClient])
    val zkOffsetBase = "/stormconsumers"
    val mockedZkUtil =  Mockito.mock(classOf[ZkUtilsWrapper])

    val offsetGetter = new StormOffsetGetter(mockedZkClient, zkOffsetBase, mockedZkUtil)
  }

  "StormOffsetGetter" should "be able to extract topic from persisted spout state" in new Fixture {

    val testGroup = "testgroup"
    val testTopic = "testtopic"
    val spoutState = s"""{
                        "broker": {
                            "host": "kafka.sample.net",
                            "port": 9092
                        },
                        "offset": 4285,
                        "partition": 1,
                        "topic": "${testTopic}",
                        "topology": {
                            "id": "fce905ff-25e0 -409e-bc3a-d855f 787d13b",
                            "name": "Test Topology"
                        }
                       }"""
    val ret = (spoutState, Mockito.mock(classOf[Stat]))
    when(mockedZkUtil.readData(MockitoMatchers.eq(mockedZkClient), anyString)).thenReturn(ret)

    val topics = offsetGetter.getTopicList(testGroup)
    
    topics.size shouldBe 1
    topics(0) shouldBe testTopic
  }
} 
开发者ID:ZhouGitHub1,项目名称:KafkaOffsetMonitor,代码行数:48,代码来源:StormOffsetGetterSpec.scala


示例3: KafkaOffsetGetterSpec

//设置package包名称以及导入依赖的类
package com.quantifind.kafka.core

import com.quantifind.kafka.core.KafkaOffsetGetter.GroupTopicPartition
import com.quantifind.utils.ZkUtilsWrapper
import kafka.api.{OffsetRequest, OffsetResponse, PartitionOffsetsResponse}
import kafka.common.{OffsetAndMetadata, TopicAndPartition}
import kafka.consumer.SimpleConsumer
import org.I0Itec.zkclient.ZkClient
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.mockito.{Matchers => MockitoMatchers, Mockito}
import org.scalatest._

class KafkaOffsetGetterSpec extends FlatSpec with ShouldMatchers {

  trait Fixture {

    val mockedZkClient = Mockito.mock(classOf[ZkClient])
    val mockedZkUtil =  Mockito.mock(classOf[ZkUtilsWrapper])
    val mockedConsumer = Mockito.mock(classOf[SimpleConsumer])
    val testPartitionLeader = 1

    val offsetGetter = new KafkaOffsetGetter(mockedZkClient, mockedZkUtil)
    offsetGetter.consumerMap += (testPartitionLeader -> Some(mockedConsumer))
  }

  "KafkaOffsetGetter" should "be able to build offset data for given partition" in new Fixture {

    val testGroup = "testgroup"
    val testTopic = "testtopic"
    val testPartition = 1

    val topicAndPartition = TopicAndPartition(testTopic, testPartition)
    val groupTopicPartition = GroupTopicPartition(testGroup, topicAndPartition)
    val offsetAndMetadata = OffsetAndMetadata(100, "meta", System.currentTimeMillis)
    KafkaOffsetGetter.offsetMap += (groupTopicPartition -> offsetAndMetadata)

    when(mockedZkUtil.getLeaderForPartition(MockitoMatchers.eq(mockedZkClient), MockitoMatchers.eq(testTopic), MockitoMatchers.eq(testPartition)))
      .thenReturn(Some(testPartitionLeader))

    val partitionErrorAndOffsets = Map(topicAndPartition -> PartitionOffsetsResponse(0,Seq(102)))
    val offsetResponse = OffsetResponse(1, partitionErrorAndOffsets)
    when(mockedConsumer.getOffsetsBefore(any[OffsetRequest])).thenReturn(offsetResponse)

    offsetGetter.processPartition(testGroup, testTopic, testPartition) match {
      case Some(offsetInfo) =>
        offsetInfo.topic shouldBe testTopic
        offsetInfo.group shouldBe testGroup
        offsetInfo.partition shouldBe testPartition
        offsetInfo.offset shouldBe 100
        offsetInfo.logSize shouldBe 102
      case None => fail("Failed to build offset data")
    }
    
  }
} 
开发者ID:ZhouGitHub1,项目名称:KafkaOffsetMonitor,代码行数:57,代码来源:KafkaOffsetGetterSpec.scala



注:本文中的org.I0Itec.zkclient.ZkClient类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Scala col类代码示例发布时间:2022-05-23
下一篇:
Scala NaiveBayes类代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap