本文整理汇总了Scala中breeze.linalg.DenseVector类的典型用法代码示例。如果您正苦于以下问题:Scala DenseVector类的具体用法?Scala DenseVector怎么用?Scala DenseVector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DenseVector类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。
示例1: MllibLBFGS
//设置package包名称以及导入依赖的类
package optimizers
import breeze.linalg.{DenseVector, Vector}
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
import org.apache.spark.mllib.optimization.{L1Updater, SimpleUpdater, SquaredL2Updater, Updater}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import utils.Functions._
class MllibLBFGS(val data: RDD[LabeledPoint],
loss: LossFunction,
regularizer: Regularizer,
params: LBFGSParameters
) extends Optimizer(loss, regularizer) {
val opt = new LogisticRegressionWithLBFGS
val reg: Updater = (regularizer: Regularizer) match {
case _: L1Regularizer => new L1Updater
case _: L2Regularizer => new SquaredL2Updater
case _: Unregularized => new SimpleUpdater
}
opt.optimizer.
setNumIterations(params.iterations).
setConvergenceTol(params.convergenceTol).
setNumCorrections(params.numCorrections).
setRegParam(regularizer.lambda).
setUpdater(reg)
override def optimize(): Vector[Double] = {
val model = opt.run(data)
val w = model.weights.toArray
return DenseVector(w)
}
}
开发者ID:mlbench,项目名称:mlbench,代码行数:38,代码来源:MllibLBFGS.scala
示例2: rddvector
//设置package包名称以及导入依赖的类
package breeze
import breeze.linalg.{DenseMatrix, DenseVector}
import org.apache.spark.{SparkConf, SparkContext}
import org.slf4j.LoggerFactory
import spark.RecommendationExample.getClass
/**
* Created by I311352 on 4/5/2017.
*/
object rddvector extends App {
val LOG = LoggerFactory.getLogger(getClass)
val conf = new SparkConf().setAppName("vector").setMaster("local[2]")
val sc = new SparkContext(conf)
val data = sc.textFile("data/testdata.txt")
println(data.take(10).toList)
val vectorRDD = data.map(value => {
val columns = value.split(",").map(value => value.toDouble)
new DenseVector(columns)
})
println(vectorRDD.take(100).toList)
// multiply each row by a constant vector
val constant = 5.0
val broadcastConstant = sc.broadcast(constant)
val scaledRDD = vectorRDD.map(row => {
row :* broadcastConstant.value
})
println(scaledRDD.take(10).toList)
val scaledRDDByPartition = vectorRDD.glom().map((value:Array[DenseVector[Double]]) => {
val arrayValues = value.map(denseVector => denseVector.data).flatten
val denseMatrix = new DenseMatrix[Double](value.length,value(0).length,arrayValues)
denseMatrix :*= broadcastConstant.value
denseMatrix.toDenseVector
})
println(scaledRDDByPartition.take(10).toList)
}
开发者ID:compasses,项目名称:elastic-spark,代码行数:44,代码来源:rddvector.scala
示例3: ColorApp
//设置package包名称以及导入依赖的类
package com.esri
import java.awt.Color
import breeze.linalg.DenseVector
object ColorApp extends App {
val rnd = new java.security.SecureRandom()
val colorSeq = for (_ <- 0 until 200)
yield {
val r = rnd.nextInt(255)
val g = rnd.nextInt(255)
val b = rnd.nextInt(255)
val hsb = Color.RGBtoHSB(r, g, b, null).map(_.toDouble)
DenseVector[Double](hsb)
}
val colorLen = colorSeq.length
val somSize = 8
val nodes = for {
q <- 0 until somSize
r <- 0 until somSize
} yield Node(q, r, colorSeq(rnd.nextInt(colorLen)))
val epochMax = colorLen * 100
implicit val pb = TerminalProgressBar(epochMax)
val som = SOM(nodes)
som.train(colorSeq, epochMax, somSize / 2, initialAlpha = 0.3)
som.saveAsPNG("/tmp/som.png", 20)
}
开发者ID:mraad,项目名称:spark-som-path,代码行数:34,代码来源:ColorApp.scala
示例4: SOMSpec
//设置package包名称以及导入依赖的类
package com.esri
import breeze.linalg.DenseVector
import org.scalatest.{FlatSpec, Matchers}
class SOMSpec extends FlatSpec with Matchers {
it should "train the SOM" in {
val nodes = for (q <- 0 until 10) yield {
Node(q, 0, new DenseVector[Double](Array(q, 0.0)))
}
nodes.length shouldBe 10
val som = SOM(nodes)
som.train(new DenseVector[Double](Array(5.0, 0.0)), 1.0, 0.1)
}
}
开发者ID:mraad,项目名称:spark-som-path,代码行数:16,代码来源:SOMSpec.scala
示例5: MllibSGD
//设置package包名称以及导入依赖的类
package optimizers
import breeze.linalg.{DenseVector, Vector}
import org.apache.spark.mllib.classification.{LogisticRegressionWithSGD, SVMWithSGD}
import org.apache.spark.mllib.optimization.{L1Updater, SimpleUpdater, SquaredL2Updater, Updater}
import org.apache.spark.mllib.regression.{LabeledPoint, LinearRegressionWithSGD}
import org.apache.spark.rdd.RDD
import utils.Functions._
import scala.tools.cmd.gen.AnyVals.D
class MllibSGD(val data: RDD[LabeledPoint],
loss: LossFunction,
regularizer: Regularizer,
params: SGDParameters,
ctype: String
) extends Optimizer(loss, regularizer) {
val opt = ctype match {
case "SVM" => new SVMWithSGD()
case "LR" => new LogisticRegressionWithSGD()
case "Regression" => new LinearRegressionWithSGD()
}
val reg: Updater = (regularizer: Regularizer) match {
case _: L1Regularizer => new L1Updater
case _: L2Regularizer => new SquaredL2Updater
case _: Unregularized => new SimpleUpdater
}
ctype match {
case "SVM" => opt.asInstanceOf[SVMWithSGD].optimizer.
setNumIterations(params.iterations).
setMiniBatchFraction(params.miniBatchFraction).
setStepSize(params.stepSize).
setRegParam(regularizer.lambda).
setUpdater(reg)
case "LR" => opt.asInstanceOf[LogisticRegressionWithSGD].optimizer.
setNumIterations(params.iterations).
setMiniBatchFraction(params.miniBatchFraction).
setStepSize(params.stepSize).
setRegParam(regularizer.lambda).
setUpdater(reg)
case "Regression" => opt.asInstanceOf[LinearRegressionWithSGD].optimizer.
setNumIterations(params.iterations).
setMiniBatchFraction(params.miniBatchFraction).
setStepSize(params.stepSize).
setRegParam(regularizer.lambda).
setUpdater(reg)
}
override def optimize(): Vector[Double] = {
val model = opt.run(data)
val w = model.weights.toArray
DenseVector(w)
}
}
开发者ID:mlbench,项目名称:mlbench,代码行数:60,代码来源:MllibSGD.scala
示例6: CocoaParameters
//设置package包名称以及导入依赖的类
package optimizers
import java.io.Serializable
import breeze.linalg.DenseVector
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
class CocoaParameters(var n: Int,
var numRounds: Int,
var localIterFrac: Double,
var lambda: Double,
var beta: Double,
var gamma: Double,
var numParts: Int,
var wInit: DenseVector[Double]) extends Serializable {
def this(train: RDD[LabeledPoint], test: RDD[LabeledPoint]) {
this(train.count().toInt,
200,
1.0,
0.01,
1.0,
1.0,
train.partitions.size,
DenseVector.zeros[Double](train.first().features.size))
}
def getLocalIters() = (localIterFrac * n / numParts).toInt
def getDistOptPar(): distopt.utils.Params ={
val loss = distopt.utils.OptUtils.hingeLoss _
return distopt.utils.Params(loss, n, wInit, numRounds, getLocalIters, lambda, beta, gamma)
}
override def toString = s"CocoaParameters(n: $n, numRounds: $numRounds, localIters: $getLocalIters, " +
s"lambda: $lambda, beta: $beta, gamma: $gamma, wInit: $wInit)"
}
开发者ID:mlbench,项目名称:mlbench,代码行数:38,代码来源:CocoaParameters.scala
示例7: weights
//设置package包名称以及导入依赖的类
package regression
import breeze.linalg.DenseVector
import breeze.numerics.{log, sigmoid}
trait Regressor {
private lazy val weightsWithIterations = learn
def weights: DenseVector[Double] = weightsWithIterations._1
def iterations: Seq[Double] = weightsWithIterations._2
protected def predict(x: DenseVector[Double], weights: DenseVector[Double]): Double
protected def costOfPrediction(h: Double, y: Double): Double
protected def learn: (DenseVector[Double], Seq[Double])
def predict(x: DenseVector[Double]): Double = {
predict(x, weights)
}
def cost(x: DenseVector[Double], y: Double): Double = {
costOfPrediction(predict(x), y)
}
def meanCost(data: Iterable[(DenseVector[Double], Double)]): Double = {
var cost = 0.0d
var total = 0L
data.foreach { case (x, y) =>
total += 1
cost += costOfPrediction(predict(x), y)
}
cost / total
}
}
trait LinearLike {
protected def predict(x: DenseVector[Double], weights: DenseVector[Double]): Double = {
x dot weights
}
protected def costOfPrediction(h: Double, y: Double): Double = {
val error = h - y
error * error / 2
}
}
trait LogisticLike {
protected def predict(x: DenseVector[Double], weights: DenseVector[Double]): Double = {
sigmoid(x dot weights)
}
protected def costOfPrediction(h: Double, y: Double): Double = {
-y * log(h) - (1.0 - y) * log(1.0 - h)
}
}
开发者ID:agolovenko,项目名称:ml-tools,代码行数:59,代码来源:Regressor.scala
示例8: Evaluation
//设置package包名称以及导入依赖的类
package util
import breeze.linalg.{DenseMatrix, DenseVector, sum}
import regression.Regressor
object Evaluation {
def confusion(lr: Regressor, data: Iterable[(DenseVector[Double], Double)]): DenseMatrix[Double] = {
val confusion = DenseMatrix.zeros[Double](2, 2)
data.map { case (x, y) =>
(y.toInt, if (lr.predict(x) > 0.5) 1 else 0)
} foreach { case (truth, predicted) =>
confusion(truth, predicted) += 1.0
}
confusion
}
def printConfusionMtx(confusion: DenseMatrix[Double]): Unit = {
val negatives = confusion(0, 0) + confusion(0, 1)
val positives = confusion(1, 0) + confusion(1, 1)
val total = sum(confusion)
val falseNegatives = confusion(1, 0)
val falsePositives = confusion(0, 1)
val accuracy = (confusion(0, 0) + confusion(1, 1)) / total
println("============= Stats =============\n")
println(f"Positive examples: $positives%1.0f")
println(f"Negative examples: $negatives%1.0f")
println(f"Total: $total%1.0f")
println(f"Pos/Neg ratio: ${positives/negatives}%1.2f")
println("\n============= Results =============\n")
println("Confusion Matrix:")
println(confusion)
println(f"Accuracy: ${accuracy * 100}%2.2f%%")
println(f"False positives: ${falsePositives * 100 / negatives}%2.2f%%")
println(f"False negatives: ${falseNegatives * 100 / positives}%2.2f%%")
}
}
开发者ID:agolovenko,项目名称:ml-tools,代码行数:45,代码来源:Evaluation.scala
示例9: Transformations
//设置package包名称以及导入依赖的类
package util
import breeze.linalg.DenseVector
object Transformations {
def minMax(med: DenseVector[Double], halfRange: DenseVector[Double])(features: DenseVector[Double]): DenseVector[Double] = {
(features :- med) / halfRange
}
def zScore(means: DenseVector[Double], stddevs: DenseVector[Double])(features: DenseVector[Double]): DenseVector[Double] = {
(features :- means) :/ stddevs
}
def filter(indices: Set[Int])(features: DenseVector[Double]): DenseVector[Double] = {
val result = features.keysIterator.collect {
case i if indices.contains(i) => features(i)
}.toArray
DenseVector(result)
}
def addPolynomialFeatures(mask: DenseVector[Boolean], maxPower: Int)(features: DenseVector[Double]): DenseVector[Double] = {
val f = features(mask).toArray
val len = (maxPower - 1) * f.length
val polyFeatures = new Array[Double](len)
var j = 0
for (p <- 2 to maxPower) {
f.indices.foreach { i =>
polyFeatures(j) = Math.pow(f(i), p)
j += 1
}
}
DenseVector.vertcat(features, DenseVector(polyFeatures))
}
}
开发者ID:agolovenko,项目名称:ml-tools,代码行数:41,代码来源:Transformations.scala
示例10: TabularSpec
//设置package包名称以及导入依赖的类
import org.scalatest._
import breeze.linalg.{DenseVector, argmax}
import scarla.domain.{Fixtures => F, State}
import scarla.mapping.Tabular
class TabularSpec extends FlatSpec with Matchers {
"A tabular mapping" should "have the correct dimensionality" in {
val m = new Tabular(F.spec(nd=3), 20)
m.dimensionality should be (scala.math.pow(20, 3))
}
it should "have no collisions in 1d" in {
val m = new Tabular(F.spec(nd=1), 10)
for (i <- 0 until 10) {
val p = DenseVector.zeros[Double](10)
p(i) = 1.0
m._phi(Vector(i)) should be (p)
}
}
it should "have no collisions in 2d" in {
val m = new Tabular(F.spec(nd=2), 10)
val ps = DenseVector.zeros[Double](100)
for (i <- 0 until 10; j <- 0 until 10) {
val l = argmax(m._phi(Vector(i, j)))
ps(l) should be (0.0)
ps(l) = 1.0
}
}
it should "have no collisions in 3d" in {
val m = new Tabular(F.spec(nd=3), 10)
val ps = DenseVector.zeros[Double](10000)
for (i <- 0 until 10; j <- 0 until 10; k <- 0 until 10) {
val l = argmax(m._phi(Vector(i, j, k)))
ps(l) should be (0.0)
ps(l) = 1.0
}
}
}
开发者ID:tspooner,项目名称:scaRLa,代码行数:51,代码来源:TabularSpec.scala
示例11: sampleFeature
//设置package包名称以及导入依赖的类
package glintlda.naive
import breeze.linalg.{DenseVector, Vector}
import breeze.stats.distributions.Multinomial
import glintlda.LDAConfig
import glintlda.util.FastRNG
def sampleFeature(feature: Int, oldTopic: Int): Int = {
var i = 0
val p = DenseVector.zeros[Double](config.topics)
var sum = 0.0
while (i < config.topics) {
p(i) = (documentCounts(i) + ?) * ((wordCounts(i) + ?) / (globalCounts(i) + ?Sum))
sum += p(i)
i += 1
}
p /= sum
Multinomial(p).draw()
}
}
开发者ID:rjagerman,项目名称:glintlda,代码行数:23,代码来源:Sampler.scala
示例12: StreamingSimpleModel
//设置package包名称以及导入依赖的类
package com.bigchange.streaming
import breeze.linalg.DenseVector
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.{LabeledPoint, StreamingLinearRegressionWithSGD}
import org.apache.spark.streaming.{Seconds, StreamingContext}
object StreamingSimpleModel {
def main(args: Array[String]) {
val ssc = new StreamingContext("local","test",Seconds(10))
val stream = ssc.socketTextStream("localhost",9999)
val numberFeatures = 100
val zeroVector = DenseVector.zeros[Double](numberFeatures)
val model = new StreamingLinearRegressionWithSGD()
.setInitialWeights(Vectors.dense(zeroVector.data))
.setNumIterations(1)
.setStepSize(0.01)
val labeledStream = stream.map { event =>
val split = event.split("\t")
val y = split(0).toDouble
val features = split(1).split(",").map(_.toDouble)
LabeledPoint(label = y, features = Vectors.dense(features))
}
model.trainOn(labeledStream)
// ??DStream?????
val predictAndTrue = labeledStream.transform { rdd =>
val latestModel = model.latestModel()
rdd.map { point =>
val predict = latestModel.predict(point.features)
predict - point.label
}
}
// ??MSE
predictAndTrue.foreachRDD { rdd =>
val mse = rdd.map(x => x * x).mean()
val rmse = math.sqrt(mse)
println(s"current batch, MSE: $mse, RMSE:$rmse")
}
ssc.start()
ssc.awaitTermination()
}
}
开发者ID:bigchange,项目名称:AI,代码行数:51,代码来源:StreamingSimpleModel.scala
示例13: LazyWindower
//设置package包名称以及导入依赖的类
package nodes
import breeze.linalg.DenseVector
import org.apache.spark.rdd.RDD
import pipelines.FunctionNode
import utils.{ImageMetadata, ChannelMajorArrayVectorizedImage, Image}
class LazyWindower(
stride: Int,
windowSize: Int) extends FunctionNode[RDD[Image], RDD[Image]] {
def apply(in: RDD[Image]) = {
in.flatMap(getImageWindow)
}
def getImageWindow(image: Image) = {
val xDim = image.metadata.xDim
val yDim = image.metadata.yDim
val numChannels = image.metadata.numChannels
// Start at (0,0) in (x, y) and
(0 until xDim - windowSize + 1 by stride).toIterator.flatMap { x =>
(0 until yDim - windowSize + 1 by stride).toIterator.map { y =>
// Extract the window.
val pool = new DenseVector[Double](windowSize * windowSize * numChannels)
val startX = x
val endX = x + windowSize
val startY = y
val endY = y + windowSize
var c = 0
while (c < numChannels) {
var s = startX
while (s < endX) {
var b = startY
while (b < endY) {
pool(c + (s-startX)*numChannels +
(b-startY)*(endX-startX)*numChannels) = image.get(s, b, c)
b = b + 1
}
s = s + 1
}
c = c + 1
}
ChannelMajorArrayVectorizedImage(pool.toArray,
ImageMetadata(windowSize, windowSize, numChannels))
}
}
}
}
开发者ID:Vaishaal,项目名称:ckm,代码行数:53,代码来源:LazyWindower.scala
示例14: PassiveAggressiveBinaryModelEvaluation
//设置package包名称以及导入依赖的类
package hu.sztaki.ilab.ps.test.utils
import breeze.linalg.{DenseVector, SparseVector}
import hu.sztaki.ilab.ps.passive.aggressive.algorithm.PassiveAggressiveBinaryAlgorithm
import org.slf4j.LoggerFactory
class PassiveAggressiveBinaryModelEvaluation
object PassiveAggressiveBinaryModelEvaluation {
private val log = LoggerFactory.getLogger(classOf[PassiveAggressiveBinaryModelEvaluation])
def accuracy(model: DenseVector[Double],
testLines: Traversable[(SparseVector[Double], Option[Boolean])],
featureCount: Int,
pac: PassiveAggressiveBinaryAlgorithm): Double = {
var tt = 0
var ff = 0
var tf = 0
var ft = 0
var cnt = 0
testLines.foreach { case (vector, label) => label match {
case Some(lab) =>
val real = lab
val predicted = pac.predict(vector, model)
(real, predicted) match {
case (true, true) => tt +=1
case (false, false) => ff +=1
case (true, false) => tf +=1
case (false, true) => ft +=1
}
cnt += 1
case _ => throw new IllegalStateException("Labels shold not be missing.")
}
}
val percent = ((tt + ff).toDouble / cnt) * 100
percent
}
}
开发者ID:gaborhermann,项目名称:flink-parameter-server,代码行数:45,代码来源:PassiveAggressiveBinaryModelEvaluation.scala
示例15: QuadraticObjectiveFunction
//设置package包名称以及导入依赖的类
package cvx
import breeze.linalg.{DenseMatrix, DenseVector}
import MatrixUtils._
class QuadraticObjectiveFunction(
override val dim:Int,
val r:Double,
val a:DenseVector[Double],
val P:DenseMatrix[Double]
)
extends ObjectiveFunction(dim) {
if(a.length!=dim){
val msg = "Vector a must be of dimension "+dim+" but length(a) "+a.length
throw new IllegalArgumentException(msg)
}
if(!(P.rows==dim & P.cols==dim)) {
val msg = "Matrix P must be square of dimension "+dim+" but is "+P.rows+"x"+P.cols
throw new IllegalArgumentException(msg)
}
checkSymmetric(P,1e-13)
def valueAt(x:DenseVector[Double]) = { checkDim(x); r + (a dot x) + (x dot (P*x))/2 }
def gradientAt(x:DenseVector[Double]) = { checkDim(x); a+P*x }
def hessianAt(x:DenseVector[Double]) = { checkDim(x); P }
}
开发者ID:spyqqqdia,项目名称:cvx,代码行数:32,代码来源:QuadraticObjectiveFunction.scala
示例16: vectorize
//设置package包名称以及导入依赖的类
package models
import breeze.linalg.DenseVector
import edu.stanford.nlp.simple._
def vectorize(): DenseVector[Double] = {
var vectorList = List[Double]()
val words = sentences.flatMap(sentence => sentence.words().toArray.toList.asInstanceOf[List[String]])
val uniqueWordMap = words.foldLeft(Map.empty[String, Int]) { case (map, word) =>
map + (word.toLowerCase -> (map.getOrElse(word.toLowerCase, 0) + 1))
}
vectorList = vectorList ++ List[Double](uniqueWordMap.keySet.size.toFloat / words.length.toFloat)
val countWordMap = words.foldLeft(Map.empty[Int, Int]) { case (map, word) =>
map + (word.length -> (map.getOrElse(word.length, 0) + 1))
}
vectorList = vectorList ++ (1 to 20).map(num => countWordMap.getOrElse(num, 0).toDouble)
val partsOfSpeech = sentences.flatMap(sentence => sentence.posTags().toArray.toList.asInstanceOf[List[String]])
val partsOfSpeechMap = partsOfSpeech.foldLeft(Map.empty[String, Int]) { case (map, pos) =>
map + (pos -> (map.getOrElse(pos, 0) + 1))
}
vectorList = vectorList ++ Segment.partsOfSpeech.map(part => partsOfSpeechMap.getOrElse(part, 0).toDouble)
vectorList = vectorList ++ Segment.commonConjunctions.map(conj => uniqueWordMap.getOrElse(conj, 0).toDouble)
vectorList = vectorList ++ Segment.commonPronouns.map(pronoun => uniqueWordMap.getOrElse(pronoun, 0).toDouble)
DenseVector[Double](vectorList.toArray)
}
}
object Segment {
def apply(text: String) = {
val sentences = new Document(text).sentences().toArray.toList.asInstanceOf[List[Sentence]]
new Segment(sentences)
}
val partsOfSpeech = scala.io.Source.fromFile("app/resources/pos.txt").mkString.split("\n").map(_.trim())
val commonConjunctions = List("for", "and", "nor", "but", "or", "yet", "so")
val commonPronouns = List("she", "he", "her", "him", "his", "hers")
val defaultDimension = 70
}
开发者ID:ChenJesse,项目名称:Spectrum,代码行数:46,代码来源:Segment.scala
示例17: Name
//设置package包名称以及导入依赖的类
package models
import breeze.linalg.DenseVector
class Name(name: String) extends Vectorizable {
val dimension = 2000
def vectorize(): DenseVector[Double] = {
val vectorArray = Array.fill[Double](dimension)(0)
for (i <- 0 to 7) {
var featureString = "prefix" + name.slice(0, i)
vectorArray(Math.abs(featureString.hashCode) % dimension) = 1.0
featureString = "suffix" + name.slice(name.length - i, name.length)
vectorArray(Math.abs(featureString.hashCode) % dimension) = 1.0
}
DenseVector(vectorArray)
}
}
开发者ID:ChenJesse,项目名称:Spectrum,代码行数:19,代码来源:Name.scala
示例18: hinge
//设置package包名称以及导入依赖的类
package classifiers
import breeze.linalg.{*, DenseMatrix, DenseVector, norm}
def hinge(xTr: DenseMatrix[Double],
yTr: DenseVector[Int]): DenseVector[Double] = {
val doubleYTR = new DenseVector(yTr.toArray.map(_.toDouble))
val YWX = doubleYTR *:* (xTr(*, ::) dot w)
val YX = xTr(::, *) *:* doubleYTR
val delta = (DenseVector.ones[Double](YWX.length) - YWX).map(x => if (x <= 0) 0.0 else 1.0)
val gradient = (YX(::, *) dot delta) * -1.0
reg match {
case Some(reg) => gradient.t + reg.regGradient(w)
case None => gradient.t
}
}
}
开发者ID:ChenJesse,项目名称:Spectrum,代码行数:20,代码来源:SVMClassifier.scala
示例19: adagrad
//设置package包名称以及导入依赖的类
package classifiers
import breeze.linalg.{*, DenseMatrix, DenseVector, norm}
def adagrad(lossFunc: ((DenseMatrix[Double], DenseVector[Int]) => DenseVector[Double]),
alpha: Double, maxiter: Int, delta: Double,
xTr: DenseMatrix[Double], yTr: DenseVector[Int]): Unit = {
var z = DenseVector.zeros[Double](dimension)
for (_ <- 1 until maxiter) {
val gradient = lossFunc(xTr, yTr)
z = z + gradient.map {x => x * x}
val zEps = z :+= 0.0001
val alphaGradient = gradient :*= alpha
val newW = w - alphaGradient /:/ zEps.map(x => Math.sqrt(x))
if (norm(gradient) < delta) return
w = newW
}
}
}
开发者ID:ChenJesse,项目名称:Spectrum,代码行数:21,代码来源:LinearClassifier.scala
示例20: logistic
//设置package包名称以及导入依赖的类
package classifiers
import breeze.linalg.{*, Axis, DenseMatrix, DenseVector, norm, sum}
import breeze.numerics.exp
def logistic(xTr: DenseMatrix[Double],
yTr: DenseVector[Int]): DenseVector[Double] = {
val doubleYTR = new DenseVector(yTr.toArray.map(_.toDouble))
val YWX = doubleYTR *:* (xTr(*, ::) dot w)
var eToTheYWX = exp.inPlace(YWX)
val numerator = xTr(::, *) *:* doubleYTR
val denominator = eToTheYWX :+= 1.0
val gradient = (sum(numerator(::, *) /:/ denominator, Axis._0) * -1.0).t
reg match {
case Some(reg) => gradient + reg.regGradient(w)
case None => gradient
}
}
}
开发者ID:ChenJesse,项目名称:Spectrum,代码行数:21,代码来源:LogisticRegressionClassifier.scala
注:本文中的breeze.linalg.DenseVector类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论