本文整理汇总了Scala中org.apache.spark.mllib.feature.PCA类的典型用法代码示例。如果您正苦于以下问题:Scala PCA类的具体用法?Scala PCA怎么用?Scala PCA使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PCA类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。
示例1: VT_sample_label_rdd_class
//设置package包名称以及导入依赖的类
import org.apache.spark.mllib.feature.PCA
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.clustering.KMeans
import PreProcessingConfig._
case class VT_sample_label_rdd_class(sha256:String, label:Double)
val VT_sample_signatures_final_array_rdd = spark.read.format("parquet").load(VT_sample_signatures_final_array_file).rdd.map(row => new VT_sample_signatures_final_array_rdd_class(row(0).toString,row(1).asInstanceOf[Seq[Double]].toArray))
val VT_sample_signatures_with_sha_rddvector = VT_sample_signatures_final_array_rdd.map(x=>(x.sha256,Vectors.dense(x.array_results)))
val VT_sample_signatures_rddvector = VT_sample_signatures_with_sha_rddvector.map(x=>x._2)
val pca = new PCA(2).fit(VT_sample_signatures_rddvector)
val VT_sample_pca_with_sha_rddvector = VT_sample_signatures_with_sha_rddvector.map(x => (x._1,pca.transform(x._2)))
val VT_sample_pca_rddvector = (VT_sample_pca_with_sha_rddvector.map(x=>x._2)).cache()
val KMeans_Model = KMeans.train(VT_sample_pca_rddvector,5,5,1)
val VT_sample_pca_label_with_sha_rdd = VT_sample_pca_with_sha_rddvector.map(x=>(x._1,(x._2.toArray(0),x._2.toArray(1),KMeans_Model.predict(x._2))))
val VT_sample_label_rdd = VT_sample_pca_label_with_sha_rdd.map(x=>new VT_sample_label_rdd_class(x._1,x._2._3.toDouble))
VT_sample_label_rdd.toDF().write.format("parquet").save(VT_sample_label_file)
开发者ID:HolmesProcessing,项目名称:gsoc_relationship,代码行数:18,代码来源:get_labels_from_VT_signatures.scala
示例2: PCAExample2
//设置package包名称以及导入依赖的类
package com.chapter11.SparkMachineLearning
import org.apache.spark.sql.SparkSession
import org.apache.spark.mllib.feature.PCA
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.mllib.regression.LinearRegressionWithSGD
object PCAExample2 {
def main(args: Array[String]): Unit = {
val spark = SparkSession
.builder
.master("local[*]")
.config("spark.sql.warehouse.dir", "E:/Exp/")
.appName(s"OneVsRestExample")
.getOrCreate()
val data = MLUtils.loadLibSVMFile(spark.sparkContext, "data/mnist.bz2")
val df = spark.read.format("libsvm").load("C:/Exp/mnist.bz2")
df.show(20)
val featureSize = data.first().features.size
println("Feature Size: " + featureSize)
val splits = data.randomSplit(Array(0.75, 0.25), seed = 12345L)
val (training, test) = (splits(0), splits(1))
val pca = new PCA(featureSize/2).fit(data.map(_.features))
val training_pca = training.map(p => p.copy(features = pca.transform(p.features)))
val test_pca = test.map(p => p.copy(features = pca.transform(p.features)))
val numIterations = 20
val stepSize = 0.0001
val model = LinearRegressionWithSGD.train(training, numIterations, stepSize)
val model_pca = LinearRegressionWithSGD.train(training_pca, numIterations, stepSize)
val valuesAndPreds = test.map { point =>
val score = model.predict(point.features)
(score, point.label)
}
val valuesAndPreds_pca = test_pca.map { point =>
val score = model_pca.predict(point.features)
(score, point.label)
}
val MSE = valuesAndPreds.map { case (v, p) => math.pow(v - p, 2) }.mean()
val MSE_pca = valuesAndPreds_pca.map { case (v, p) => math.pow(v - p, 2) }.mean()
println("Mean Squared Error = " + MSE)
println("PCA Mean Squared Error = " + MSE_pca)
println("Model coefficients:"+ model.toString())
println("Model with PCA coefficients:"+ model_pca.toString())
spark.stop()
}
}
开发者ID:PacktPublishing,项目名称:Scala-and-Spark-for-Big-Data-Analytics,代码行数:61,代码来源:PCA_LinearRegression_Demo.scala
示例3: PCAApp
//设置package包名称以及导入依赖的类
package org.apress.prospark
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.mllib.feature.PCA
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
object PCAApp {
def main(args: Array[String]) {
if (args.length != 4) {
System.err.println(
"Usage: PCAApp <appname> <batchInterval> <hostname> <port>")
System.exit(1)
}
val Seq(appName, batchInterval, hostname, port) = args.toSeq
val conf = new SparkConf()
.setAppName(appName)
.setJars(SparkContext.jarOfClass(this.getClass).toSeq)
val ssc = new StreamingContext(conf, Seconds(batchInterval.toInt))
val substream = ssc.socketTextStream(hostname, port.toInt)
.filter(!_.contains("NaN"))
.map(_.split(" "))
.filter(f => f(1) != "0")
val datastream = substream.map(f => Array(f(1), f(4), f(5), f(6), f(20), f(21), f(22), f(36), f(37), f(38)))
.map(f => f.map(v => v.toDouble))
.map(f => LabeledPoint(f(0), Vectors.dense(f.slice(1, f.length))))
datastream.foreachRDD(rdd => {
val pca = new PCA(rdd.first().features.size / 2)
.fit(rdd.map(_.features))
val testTrain = rdd.randomSplit(Array(0.3, 0.7))
val test = testTrain(0).map(lp => lp.copy(features = pca.transform(lp.features)))
val train = testTrain(1).map(lp => lp.copy(features = pca.transform(lp.features)))
train.take(20).foreach(println)
})
ssc.start()
ssc.awaitTermination()
}
}
开发者ID:ZubairNabi,项目名称:prosparkstreaming,代码行数:50,代码来源:L9-8PCA.scala
注:本文中的org.apache.spark.mllib.feature.PCA类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论