本文整理汇总了Java中org.apache.spark.mllib.linalg.DenseVector类的典型用法代码示例。如果您正苦于以下问题:Java DenseVector类的具体用法?Java DenseVector怎么用?Java DenseVector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DenseVector类属于org.apache.spark.mllib.linalg包,在下文中一共展示了DenseVector类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: DGER
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
public static DistributedMatrix DGER(double alpha, DenseVector x, DenseVector y, DistributedMatrix A, JavaSparkContext jsc) {
// Case of IndexedRowMatrix
if( A.getClass() == IndexedRowMatrix.class) {
return L2.DGER_IRW((IndexedRowMatrix) A, alpha, x, y, jsc);
}
else if (A.getClass() == CoordinateMatrix.class) {
return L2.DGER_COORD((CoordinateMatrix) A, alpha, x, y, jsc);
}
else if (A.getClass() == BlockMatrix.class){
//return L2.DGER_BCK((BlockMatrix) A, alpha, x, y, jsc);
return null;
}
else {
return null;
}
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:19,代码来源:L2.java
示例2: getVectors
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
private List<Vector> getVectors(List<String[]> data) {
List<Vector> vectors = new ArrayList<Vector>();
List<Map<String, Integer>> encodings = model.getEncodings();
BasicEncoder encoder = new BasicEncoder.Builder().encodings(encodings).build();
for (String[] dataEntry : data) {
String[] encodedEntry;
try {
encodedEntry = encoder.call(dataEntry);
} catch (Exception e) {
log.warn("Data encoding failed. Cause: " + e.getMessage());
encodedEntry = dataEntry;
}
double[] doubleValues = MLUtils.toDoubleArray(encodedEntry);
Vector vector = new DenseVector(doubleValues);
vectors.add(vector);
}
return vectors;
}
开发者ID:wso2-attic,项目名称:carbon-ml,代码行数:19,代码来源:Predictor.java
示例3: DGEMV
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
public static DenseVector DGEMV(double alpha, DistributedMatrix A, DenseVector x, double beta, DenseVector y, JavaSparkContext jsc){
// First form y := beta*y.
if (beta != 1.0) {
if (beta == 0.0) {
y = Vectors.zeros(y.size()).toDense();
}
else {
BLAS.scal(beta, y);
}
}
if (alpha == 0.0) {
return y;
}
DenseVector tmpVector = Vectors.zeros(y.size()).toDense();
// Form y := alpha*A*x + y.
// Case of IndexedRowMatrix
if( A.getClass() == IndexedRowMatrix.class) {
tmpVector = L2.DGEMV_IRW((IndexedRowMatrix) A, alpha, x, jsc);
}
else if (A.getClass() == CoordinateMatrix.class) {
tmpVector = L2.DGEMV_COORD((CoordinateMatrix) A, alpha, x, jsc);
}
else if (A.getClass() == BlockMatrix.class){
tmpVector = L2.DGEMV_BCK((BlockMatrix) A, alpha, x, jsc);
}
else {
tmpVector = null;
}
BLAS.axpy(1.0, tmpVector, y);
return y;
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:40,代码来源:L2.java
示例4: DGEMV_COORD
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
private static DenseVector DGEMV_COORD(CoordinateMatrix matrix, double alpha, DenseVector vector, JavaSparkContext jsc) {
JavaRDD<MatrixEntry> items = matrix.entries().toJavaRDD();
DenseVector result = items.mapPartitions(new MatrixEntriesMultiplication(vector, alpha))
.reduce(new MatrixEntriesMultiplicationReducer());
return result;
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:9,代码来源:L2.java
示例5: testVectorBinarizerDense
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
@Test
public void testVectorBinarizerDense() {
// prepare data
JavaRDD<Row> jrdd = sc.parallelize(Arrays.asList(
RowFactory.create(0d, 1d, new DenseVector(new double[]{-2d, -3d, -4d, -1d, 6d, -7d, 8d, 0d, 0d, 0d, 0d, 0d})),
RowFactory.create(1d, 2d, new DenseVector(new double[]{4d, -5d, 6d, 7d, -8d, 9d, -10d, 0d, 0d, 0d, 0d, 0d})),
RowFactory.create(2d, 3d, new DenseVector(new double[]{-5d, 6d, -8d, 9d, 10d, 11d, 12d, 0d, 0d, 0d, 0d, 0d}))
));
StructType schema = new StructType(new StructField[]{
new StructField("id", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("value1", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("vector1", new VectorUDT(), false, Metadata.empty())
});
DataFrame df = sqlContext.createDataFrame(jrdd, schema);
VectorBinarizer vectorBinarizer = new VectorBinarizer()
.setInputCol("vector1")
.setOutputCol("binarized")
.setThreshold(2d);
//Export this model
byte[] exportedModel = ModelExporter.export(vectorBinarizer, df);
//Import and get Transformer
Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel);
//compare predictions
Row[] sparkOutput = vectorBinarizer.transform(df).orderBy("id").select("id", "value1", "vector1", "binarized").collect();
for (Row row : sparkOutput) {
Map<String, Object> data = new HashMap<>();
data.put(vectorBinarizer.getInputCol(), ((DenseVector) row.get(2)).toArray());
transformer.transform(data);
double[] output = (double[]) data.get(vectorBinarizer.getOutputCol());
assertArrayEquals(output, ((DenseVector) row.get(3)).toArray(), 0d);
}
}
开发者ID:flipkart-incubator,项目名称:spark-transformers,代码行数:40,代码来源:VectorBinarizerBridgeTest.java
示例6: predict
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
public Status predict(double lat, double lon, double speedKnots, double courseMinusHeading,
double preEffectiveSpeedKnots, double preError, double postEffectiveSpeedKnots,
double postError) {
Vector features = new DenseVector(new double[] { lat, lon, speedKnots, courseMinusHeading,
preEffectiveSpeedKnots, preError, postEffectiveSpeedKnots, postError });
double prediction = model.predict(features);
if (is(prediction, 1))
return Status.MOORED;
else if (is(prediction, 2))
return Status.ANCHORED;
else
return Status.OTHER;
}
开发者ID:amsa-code,项目名称:risky,代码行数:15,代码来源:AnchoredPredictor.java
示例7: convertExecRowToVector
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
/**
*
* ExecRow is one-based as far as the elements
*
* @param execRow
* @return
*/
public static Vector convertExecRowToVector(ExecRow execRow) throws StandardException {
int length = execRow.nColumns();
double[] vectorValues = new double[length];
for (int i=1;i<=length;i++) {
vectorValues[i] = execRow.getColumn(i).getDouble();
}
return new DenseVector(vectorValues);
}
开发者ID:splicemachine,项目名称:spliceengine,代码行数:16,代码来源:SparkMLibUtils.java
示例8: call
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
@Override
public IndexedRow call(Tuple2<Long, double[]> longTuple2) throws Exception {
return new IndexedRow(longTuple2._1(), new DenseVector(longTuple2._2()));
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:5,代码来源:Array2IndexedRow.java
示例9: GetD_IRW
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
private static IndexedRowMatrix GetD_IRW(IndexedRowMatrix A, boolean inverseValues, JavaSparkContext jsc) {
JavaRDD<IndexedRow> rows = A.rows().toJavaRDD().cache();
final Broadcast<Boolean> inverseValuesBC = jsc.broadcast(inverseValues);
JavaRDD<IndexedRow> LURows = rows.map(new Function<IndexedRow, IndexedRow>() {
@Override
public IndexedRow call(IndexedRow indexedRow) throws Exception {
long index = indexedRow.index();
DenseVector vect = indexedRow.vector().toDense();
boolean inverseValuesValue = inverseValuesBC.getValue().booleanValue();
double newValues[] = new double[vect.size()];
for(int i = 0; i< vect.size(); i++) {
if( i == index) {
if(inverseValuesValue) {
newValues[i] = 1.0/vect.apply(i);
}
else {
newValues[i] = vect.apply(i);
}
}
else {
newValues[i] = 0.0;
}
}
DenseVector newVector = new DenseVector(newValues);
return new IndexedRow(index, newVector);
}
});
IndexedRowMatrix newMatrix = new IndexedRowMatrix(LURows.rdd());
return newMatrix;
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:45,代码来源:OtherOperations.java
示例10: call
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
@Override
public DenseVector call(DenseVector vector, DenseVector vector2) throws Exception {
BLAS.axpy(1.0, vector, vector2);
return vector2;
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:7,代码来源:MatrixEntriesMultiplicationReducer.java
示例11: MatrixEntriesMultiplication
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
public MatrixEntriesMultiplication(DenseVector vector, double alpha) {
this.vector = vector;
this.alpha = alpha;
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:6,代码来源:MatrixEntriesMultiplication.java
示例12: readVectorFromFileInHDFS
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
public static DenseVector readVectorFromFileInHDFS(String file, Configuration conf){
try {
FileSystem fs = FileSystem.get(conf);
Path pt = new Path(file);
//FileSystem fileSystem = FileSystem.get(context.getConfiguration());
BufferedReader br=new BufferedReader(new InputStreamReader(fs.open(pt)));
String line;
line=br.readLine();
double vector[] = null;
boolean arrayInfo = true;
int i = 0;
while (line != null){
if((arrayInfo == true) && (line.charAt(0) == '%')){
arrayInfo = true;
//LOG.info("JMAbuin:: Skipping line with %");
}
else if((arrayInfo == true) && !(line.charAt(0) == '%')){
arrayInfo = false;
String[] matrixInfo = line.split(" ");
//LOG.info("JMAbuin:: Creating vector after line with %");
vector = new double[Integer.parseInt(matrixInfo[0])];
}
else{
vector[i] = Double.parseDouble(line);
i++;
}
line=br.readLine();
}
br.close();
return new DenseVector(vector);
} catch (IOException e) {
LOG.error("Error in " + IO.class.getName() + ": " + e.getMessage());
e.printStackTrace();
System.exit(1);
}
return null;
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:52,代码来源:IO.java
示例13: format
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
@Override
public Vector format(Object value){
List<String> labels = getLabels();
HasProbability hasProbability = (HasProbability)value;
double[] probabilities = new double[labels.size()];
for(int i = 0; i < labels.size(); i++){
String label = labels.get(i);
probabilities[i] = hasProbability.getProbability(label);
}
return new DenseVector(probabilities);
}
开发者ID:jeremyore,项目名称:spark-pmml-import,代码行数:17,代码来源:ProbabilityColumnProducer.java
示例14: testVectorAssembler
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
@Test
public void testVectorAssembler() {
// prepare data
JavaRDD<Row> jrdd = sc.parallelize(Arrays.asList(
RowFactory.create(0d, 1d, new DenseVector(new double[]{2d, 3d})),
RowFactory.create(1d, 2d, new DenseVector(new double[]{3d, 4d})),
RowFactory.create(2d, 3d, new DenseVector(new double[]{4d, 5d})),
RowFactory.create(3d, 4d, new DenseVector(new double[]{5d, 6d})),
RowFactory.create(4d, 5d, new DenseVector(new double[]{6d, 7d}))
));
StructType schema = new StructType(new StructField[]{
new StructField("id", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("value1", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("vector1", new VectorUDT(), false, Metadata.empty())
});
DataFrame df = sqlContext.createDataFrame(jrdd, schema);
VectorAssembler vectorAssembler = new VectorAssembler()
.setInputCols(new String[]{"value1", "vector1"})
.setOutputCol("feature");
//Export this model
byte[] exportedModel = ModelExporter.export(vectorAssembler, null);
String exportedModelJson = new String(exportedModel);
//Import and get Transformer
Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel);
//compare predictions
Row[] sparkOutput = vectorAssembler.transform(df).orderBy("id").select("id", "value1", "vector1", "feature").collect();
for (Row row : sparkOutput) {
Map<String, Object> data = new HashMap<>();
data.put(vectorAssembler.getInputCols()[0], row.get(1));
data.put(vectorAssembler.getInputCols()[1], ((DenseVector) row.get(2)).toArray());
transformer.transform(data);
double[] output = (double[]) data.get(vectorAssembler.getOutputCol());
assertArrayEquals(output, ((DenseVector) row.get(3)).toArray(), 0d);
}
}
开发者ID:flipkart-incubator,项目名称:spark-transformers,代码行数:43,代码来源:VectorAssemblerBridgeTest.java
示例15: testChiSqSelector
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
@Test
public void testChiSqSelector() {
// prepare data
JavaRDD<Row> jrdd = sc.parallelize(Arrays.asList(
RowFactory.create(0d, 0d, new DenseVector(new double[]{8d, 7d, 0d})),
RowFactory.create(1d, 1d, new DenseVector(new double[]{0d, 9d, 6d})),
RowFactory.create(2d, 1d, new DenseVector(new double[]{0.0d, 9.0d, 8.0d})),
RowFactory.create(3d, 2d, new DenseVector(new double[]{8.0d, 9.0d, 5.0d}))
));
double[] preFilteredData = {0.0d, 6.0d, 8.0d, 5.0d};
StructType schema = new StructType(new StructField[]{
new StructField("id", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("label", DataTypes.DoubleType, false, Metadata.empty()),
new StructField("features", new VectorUDT(), false, Metadata.empty())
});
DataFrame df = sqlContext.createDataFrame(jrdd, schema);
ChiSqSelector chiSqSelector = new ChiSqSelector();
chiSqSelector.setNumTopFeatures(1);
chiSqSelector.setFeaturesCol("features");
chiSqSelector.setLabelCol("label");
chiSqSelector.setOutputCol("output");
ChiSqSelectorModel chiSqSelectorModel = chiSqSelector.fit(df);
//Export this model
byte[] exportedModel = ModelExporter.export(chiSqSelectorModel, null);
String exportedModelJson = new String(exportedModel);
//Import and get Transformer
Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel);
//compare predictions
Row[] sparkOutput = chiSqSelectorModel.transform(df).orderBy("id").select("id", "label", "features", "output").collect();
for (Row row : sparkOutput) {
Map<String, Object> data = new HashMap<>();
data.put(chiSqSelectorModel.getFeaturesCol(), ((DenseVector) row.get(2)).toArray());
transformer.transform(data);
double[] output = (double[]) data.get(chiSqSelectorModel.getOutputCol());
System.out.println(Arrays.toString(output));
assertArrayEquals(output, ((DenseVector) row.get(3)).toArray(), 0d);
}
}
开发者ID:flipkart-incubator,项目名称:spark-transformers,代码行数:48,代码来源:ChiSqSelectorBridgeTest.java
示例16: GetLU_IRW
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
private static IndexedRowMatrix GetLU_IRW(IndexedRowMatrix A) {
JavaRDD<IndexedRow> rows = A.rows().toJavaRDD().cache();
JavaRDD<IndexedRow> LURows = rows.map(new Function<IndexedRow, IndexedRow>() {
@Override
public IndexedRow call(IndexedRow indexedRow) throws Exception {
long index = indexedRow.index();
DenseVector vect = indexedRow.vector().toDense();
double newValues[] = new double[vect.size()];
for(int i = 0; i< vect.size(); i++) {
if( i != index) {
newValues[i] = vect.apply(i);
}
else {
newValues[i] = 0.0;
}
}
DenseVector newVector = new DenseVector(newValues);
return new IndexedRow(index, newVector);
}
});
IndexedRowMatrix newMatrix = new IndexedRowMatrix(LURows.rdd());
return newMatrix;
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:37,代码来源:OtherOperations.java
示例17: DGEMV_IRW
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
private static DenseVector DGEMV_IRW(IndexedRowMatrix matrix, double alpha, DenseVector vector, JavaSparkContext jsc) {
final Broadcast BC = jsc.broadcast(vector);
final Broadcast<Double> AlphaBC = jsc.broadcast(alpha);
//IndexedRowMatrix indexedMatrix = (IndexedRowMatrix) matrix;
JavaRDD<IndexedRow> rows = matrix.rows().toJavaRDD();
List<Tuple2<Long, Double>> returnValues = rows.mapToPair(new PairFunction<IndexedRow, Long, Double>() {
@Override
public Tuple2<Long, Double> call(IndexedRow row) {
DenseVector vect = (DenseVector) BC.getValue();
double alphaBCRec = AlphaBC.getValue().doubleValue();
DenseVector tmp = row.vector().copy().toDense();
BLAS.scal(alphaBCRec, tmp);
return new Tuple2<Long, Double>(row.index(), BLAS.dot(tmp, vect));
}
}).collect();
double[] stockArr = new double[returnValues.size()];
//for(int i = 0; i< returnValues.size(); i++) {
for(Tuple2<Long, Double> item : returnValues) {
stockArr[item._1().intValue()] = item._2();
}
return new DenseVector(stockArr);
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:35,代码来源:L2.java
示例18: DGER_IRW
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
private static IndexedRowMatrix DGER_IRW(IndexedRowMatrix A, double alpha, DenseVector x, DenseVector y, JavaSparkContext jsc) {
final Broadcast<Double> AlphaBC = jsc.broadcast(alpha);
final Broadcast<DenseVector> BCVector_X = jsc.broadcast(x);
final Broadcast<DenseVector> BCVector_Y = jsc.broadcast(y);
JavaRDD<IndexedRow> rows = A.rows().toJavaRDD();
JavaRDD<IndexedRow> resultRows = rows.map(new Function<IndexedRow, IndexedRow>() {
@Override
public IndexedRow call(IndexedRow indexedRow) throws Exception {
DenseVector Vector_X = BCVector_X.getValue();
DenseVector Vector_Y = BCVector_Y.getValue();
double alphaBCRec = AlphaBC.getValue().doubleValue();
DenseVector row = indexedRow.vector().toDense();
double[] resultArray = new double[row.size()];
long i = indexedRow.index();
for( int j = 0; j< Vector_Y.size(); j++) {
resultArray[j] = alphaBCRec * Vector_X.apply((int)i) * Vector_Y.apply(j) + row.apply(j);
}
DenseVector result = new DenseVector(resultArray);
return new IndexedRow(indexedRow.index(), result);
}
});
IndexedRowMatrix newMatrix = new IndexedRowMatrix(resultRows.rdd(), x.size(), y.size());
return newMatrix;
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:38,代码来源:L2.java
示例19: multiply
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
public static double multiply(DenseVector v1, DenseVector v2) {
double result = 0;
for( int i = 0; i< v1.size(); i++){
result = result + v1.apply(i) * v2.apply(i);
}
return result;
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:14,代码来源:L1.java
示例20: vectorSumElements
import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
public static double vectorSumElements(DenseVector vector) {
double result = 0.0;
for(int i = 0; i< vector.size(); i++) {
result = result + vector.apply(i);
}
return result;
}
开发者ID:jmabuin,项目名称:BLASpark,代码行数:11,代码来源:L1.java
注:本文中的org.apache.spark.mllib.linalg.DenseVector类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论