本文整理汇总了Java中com.datastax.spark.connector.cql.CassandraConnector类的典型用法代码示例。如果您正苦于以下问题:Java CassandraConnector类的具体用法?Java CassandraConnector怎么用?Java CassandraConnector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
CassandraConnector类属于com.datastax.spark.connector.cql包,在下文中一共展示了CassandraConnector类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: saveToCassandra
import com.datastax.spark.connector.cql.CassandraConnector; //导入依赖的package包/类
void saveToCassandra(List<DependencyLink> links) {
Dependencies thrift = Dependencies.create(day, day /** ignored */, links);
ByteBuffer blob = thrift.toThrift();
log.info("Saving with day={}", dateStamp);
CassandraConnector.apply(conf).withSessionDo(new AbstractFunction1<Session, Void>() {
@Override public Void apply(Session session) {
session.execute(QueryBuilder.insertInto(keyspace, "dependencies")
.value("day", new Date(day))
.value("dependencies", blob)
);
return null;
}
});
log.info("Done");
}
开发者ID:openzipkin,项目名称:zipkin-dependencies,代码行数:17,代码来源:CassandraDependenciesJob.java
示例2: createSchema
import com.datastax.spark.connector.cql.CassandraConnector; //导入依赖的package包/类
private void createSchema(JavaSparkContext sc) {
CassandraConnector connector = CassandraConnector.apply(sc.getConf());
try (Session session = connector.openSession()) {
session.execute(deletekeyspace);
session.execute(keyspace);
session.execute("USE todolist");
session.execute(table);
session.execute(tableRDD);
}
}
开发者ID:MammatusTech,项目名称:Spark-Course,代码行数:15,代码来源:SparkApp.java
示例3: main
import com.datastax.spark.connector.cql.CassandraConnector; //导入依赖的package包/类
public static void main(String[] args) {
verifyArgs(args);
SparkConf conf = new SparkConf()
.setAppName("Spark Canary (CC) - Test")
.setMaster(args[0])
.set("spark.cassandra.connection.host", args[1]);
JavaSparkContext sc = new JavaSparkContext(conf);
CassandraConnector connector = CassandraConnector.apply(conf);
try (Session session = connector.openSession()) {
session.execute("DROP KEYSPACE IF EXISTS simple_canary_cc");
session.execute("CREATE KEYSPACE simple_canary_cc WITH " +
"REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1}");
session.execute("CREATE TABLE simple_canary_cc.simple_pair " +
"(key int PRIMARY KEY, value text)");
}
List<SimplePair> pairs = Arrays.asList(
new SimplePair(1, "One"),
new SimplePair(5, "Five"),
new SimplePair(6, "Six"),
new SimplePair(7, "Seven"),
new SimplePair(9, "Nine"),
new SimplePair(10, "Ten"),
new SimplePair(12, "Twelve"),
new SimplePair(16, "Sixteen"),
new SimplePair(19, "Nineteen")
);
JavaRDD<SimplePair> simplekvRDD = sc.parallelize(pairs);
javaFunctions(simplekvRDD)
.writerBuilder("simple_canary_cc", "simple_pair",
mapToRow(SimplePair.class))
.saveToCassandra();
CassandraJavaRDD<CassandraRow> rdd = javaFunctions(sc)
.cassandraTable("simple_canary_cc", "simple_pair")
.select("key", "value");
long count = rdd.count();
System.out.format("Count: %d %n", count);
List somePairs = rdd.take(9);
System.out.println(somePairs);
sc.stop();
}
开发者ID:lenards,项目名称:spark-cstar-canaries,代码行数:52,代码来源:SparkCanaryCC.java
示例4: createDdl
import com.datastax.spark.connector.cql.CassandraConnector; //导入依赖的package包/类
private void createDdl(SparkConf conf) {
CassandraConnector connector = CassandraConnector.apply(conf);
try (Session session = connector.openSession()) {
session.execute("DROP KEYSPACE IF EXISTS test");
session.execute("CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}");
session.execute("CREATE TABLE test.kafka_logstream (key UUID PRIMARY KEY, value TEXT)");
}
}
开发者ID:ogidogi,项目名称:laughing-octo-sansa,代码行数:10,代码来源:KafkaSparkCassandraFlow.java
示例5: train
import com.datastax.spark.connector.cql.CassandraConnector; //导入依赖的package包/类
public MatrixFactorizationModel train(JavaSparkContext sparkCtx, CassandraConnector cassandraConnector) {
CassandraJavaRDD<CassandraRow> trainingRdd = javaFunctions(sparkCtx).cassandraTable(RatingDO.EMPLOYERRATINGS_KEYSPACE, RatingDO.RATINGS_TABLE);
JavaRDD<Rating> trainingJavaRdd = trainingRdd.map(new org.apache.spark.api.java.function.Function<CassandraRow, Rating>() {
@Override
public Rating call(CassandraRow trainingRow) throws Exception {
return new Rating(trainingRow.getInt(RatingDO.USER_COL), trainingRow.getInt(RatingDO.PRODUCT_COL), trainingRow.getDouble(RatingDO.RATING_COL));
}
});
MatrixFactorizationModel model = ALS.train(JavaRDD.toRDD(trainingJavaRdd), RANK, ITER, LAMBDA);
return model;
}
开发者ID:JoshuaFox,项目名称:spark-cassandra-collabfiltering,代码行数:12,代码来源:CollabFilterCassandra7.java
示例6: loadData
import com.datastax.spark.connector.cql.CassandraConnector; //导入依赖的package包/类
private void loadData(JavaSparkContext sc) {
CassandraConnector connector = CassandraConnector.apply(sc.getConf());
try (Session session = connector.openSession()) {
session.execute(task1);
session.execute(task2);
session.execute(task3);
session.execute(task4);
session.execute(task5);
session.execute(task6);
session.execute(task7);
}
}
开发者ID:MammatusTech,项目名称:Spark-Course,代码行数:19,代码来源:SparkApp.java
示例7: queryData
import com.datastax.spark.connector.cql.CassandraConnector; //导入依赖的package包/类
private void queryData(JavaSparkContext sc) {
CassandraConnector connector = CassandraConnector.apply(sc.getConf());
try (Session session = connector.openSession()) {
ResultSet results = session.execute(query);
System.out.println("\nQuery all results from cassandra's todolisttable:\n" + results.all());
ResultSet results1 = session.execute(query1);
System.out.println("\nSaving RDD into a temp table in casssandra then query all results from cassandra:\n" + results1.all());
}
}
开发者ID:MammatusTech,项目名称:Spark-Course,代码行数:18,代码来源:SparkApp.java
示例8: train
import com.datastax.spark.connector.cql.CassandraConnector; //导入依赖的package包/类
public MatrixFactorizationModel train(JavaSparkContext sparkCtx, CassandraConnector cassandraConnector) {
CassandraJavaRDD<CassandraRow> trainingRdd = javaFunctions(sparkCtx).cassandraTable(RatingDO.EMPLOYERRATINGS_KEYSPACE, RatingDO.RATINGS_TABLE);
JavaRDD<Rating> trainingJavaRdd = trainingRdd.map(trainingRow -> new Rating(trainingRow.getInt(RatingDO.USER_COL), trainingRow.getInt(RatingDO.PRODUCT_COL), trainingRow.getDouble(RatingDO.RATING_COL)));
MatrixFactorizationModel model = ALS.train(JavaRDD.toRDD(trainingJavaRdd), RANK, ITER, LAMBDA);
return model;
}
开发者ID:JoshuaFox,项目名称:spark-cassandra-collabfiltering,代码行数:7,代码来源:CollabFilterCassandra8.java
示例9: CollabFilterCassandraDriver
import com.datastax.spark.connector.cql.CassandraConnector; //导入依赖的package包/类
public CollabFilterCassandraDriver() {
SparkConf conf = new SparkConf().setAppName("Collaborative Filtering with Cassandra").set("spark.master", SPARK_MASTER).set("spark.cassandra.connection.host", CASSANDRA_HOST);
this.sparkCtx = new JavaSparkContext(conf);
this.cassandraConnector = CassandraConnector.apply(this.sparkCtx.getConf());
}
开发者ID:JoshuaFox,项目名称:spark-cassandra-collabfiltering,代码行数:6,代码来源:CollabFilterCassandraDriver.java
示例10: train
import com.datastax.spark.connector.cql.CassandraConnector; //导入依赖的package包/类
MatrixFactorizationModel train(JavaSparkContext sparkCtx, CassandraConnector cassandraConnector);
开发者ID:JoshuaFox,项目名称:spark-cassandra-collabfiltering,代码行数:2,代码来源:ICollabFilterCassandra.java
注:本文中的com.datastax.spark.connector.cql.CassandraConnector类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论