本文整理汇总了Scala中org.apache.spark.scheduler.SparkListenerJobStart类的典型用法代码示例。如果您正苦于以下问题:Scala SparkListenerJobStart类的具体用法?Scala SparkListenerJobStart怎么用?Scala SparkListenerJobStart使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SparkListenerJobStart类的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。
示例1: CleanupUtil
//设置package包名称以及导入依赖的类
package com.hazelcast.spark.connector.util
import com.hazelcast.spark.connector.util.ConnectionUtil.closeAll
import org.apache.spark.SparkContext
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobEnd, SparkListenerJobStart}
object CleanupUtil {
val jobIds: collection.mutable.Map[Int, Seq[Int]] = collection.mutable.Map[Int, Seq[Int]]()
val cleanupJobRddName: String = "HazelcastResourceCleanupJob"
def addCleanupListener(sc: SparkContext): Unit = {
sc.addSparkListener(new SparkListener {
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
this.synchronized {
jobStart.stageInfos.foreach(info => {
info.rddInfos.foreach(rdd => {
if (!cleanupJobRddName.equals(rdd.name)) {
val ids: Seq[Int] = info.rddInfos.map(_.id)
val maybeIds: Option[Seq[Int]] = jobIds.get(jobStart.jobId)
if (maybeIds.isDefined) {
jobIds.put(jobStart.jobId, ids ++ maybeIds.get)
} else {
jobIds.put(jobStart.jobId, ids)
}
}
})
})
}
}
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
this.synchronized {
if (jobIds.contains(jobEnd.jobId)) {
try {
val workers = sc.getConf.getInt("spark.executor.instances", sc.getExecutorStorageStatus.length)
val rddId: Option[Seq[Int]] = jobIds.get(jobEnd.jobId)
if (rddId.isDefined) {
sc.parallelize(1 to workers, workers).setName(cleanupJobRddName).foreachPartition(it ? closeAll(rddId.get))
}
jobIds -= jobEnd.jobId
} catch {
case e: Exception =>
}
}
}
}
})
}
}
开发者ID:hazelcast,项目名称:hazelcast-spark,代码行数:53,代码来源:CleanupUtil.scala
注:本文中的org.apache.spark.scheduler.SparkListenerJobStart类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论