本文整理汇总了Scala中java.sql.PreparedStatement类的典型用法代码示例。如果您正苦于以下问题:Scala PreparedStatement类的具体用法?Scala PreparedStatement怎么用?Scala PreparedStatement使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PreparedStatement类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Scala代码示例。
示例1: genericEncoder
//设置package包名称以及导入依赖的类
package io.gustavoamigo.quill.pgsql.encoding.range.numeric
import java.sql.{Types, PreparedStatement}
import io.getquill.source.jdbc.JdbcSource
import scala.collection.immutable.NumericRange
trait Encoders {
this: JdbcSource[_, _] =>
private def genericEncoder[T](valueToString: (T => String)): Encoder[T] = {
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
val sqlLiteral = valueToString(value)
row.setObject(index + 1, sqlLiteral, Types.OTHER)
row
}
}
}
private def tuple[T](t: (T, T)) = s"[${t._1}, ${t._2}]"
private def range[T](r: NumericRange[T]) = s"[${r.head}, ${r.last}]"
implicit val intTupleEncoder: Encoder[(Int, Int)] = genericEncoder(tuple)
implicit val intRangeEncoder: Encoder[NumericRange[Int]] = genericEncoder(range)
implicit val bigIntTupleEncoder: Encoder[(BigInt, BigInt)] = genericEncoder(tuple)
implicit val bigIntRangeEncoder: Encoder[NumericRange[BigInt]] = genericEncoder(range)
implicit val longTupleEncoder: Encoder[(Long, Long)] = genericEncoder(tuple)
implicit val longRangeEncoder: Encoder[NumericRange[Long]] = genericEncoder(range)
implicit val doubleTupleEncoder: Encoder[(Double, Double)] = genericEncoder(tuple)
implicit val bigDecimalTupleEncoder: Encoder[(BigDecimal, BigDecimal)] = genericEncoder(tuple)
implicit val bigDecimalRangeEncoder: Encoder[NumericRange[BigDecimal]] = genericEncoder(range)
}
开发者ID:gustavoamigo,项目名称:quill-pgsql,代码行数:36,代码来源:Encoders.scala
示例2: genericEncoder
//设置package包名称以及导入依赖的类
package io.gustavoamigo.quill.pgsql.encoding.range.datetime
import java.sql.{PreparedStatement, Types}
import java.time.{LocalDate, ZonedDateTime, LocalDateTime}
import java.util.Date
import io.getquill.source.jdbc.JdbcSource
trait Encoders {
this: JdbcSource[_, _] =>
import Formatters._
private def genericEncoder[T](valueToString: (T => String)): Encoder[T] = {
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
val sqlLiteral = valueToString(value)
row.setObject(index + 1, sqlLiteral, Types.OTHER)
row
}
}
}
private def tuple[T](t: (T, T))(valToStr: T => String) = s"[${valToStr(t._1)}, ${valToStr(t._2)}]"
implicit val dateTupleEncoder: Encoder[(Date, Date)] = genericEncoder(tuple(_)(formatDate))
implicit val localDateTimeTupleEncoder: Encoder[(LocalDateTime, LocalDateTime)] =
genericEncoder(tuple(_)(formatLocalDateTime))
implicit val zonedDateTimeTupleEncoder: Encoder[(ZonedDateTime, ZonedDateTime)] =
genericEncoder(tuple(_)(formatZonedDateTime))
implicit val dateTimeTupleEncoder: Encoder[(LocalDate, LocalDate)] =
genericEncoder(t => s"[${formatLocalDate(t._1)}, ${formatLocalDate(t._2)})")
}
开发者ID:gustavoamigo,项目名称:quill-pgsql,代码行数:34,代码来源:Encoders.scala
示例3: genericEncoder
//设置package包名称以及导入依赖的类
package io.gustavoamigo.quill.pgsql.encoding.jodatime
import java.sql.{Types, PreparedStatement}
import io.getquill.source.jdbc.JdbcSource
import org.joda.time._
trait Encoders {
this: JdbcSource[_, _] =>
import Formatters._
private def genericEncoder[T](valueToString: (T => String) = (r: T) => r.toString): Encoder[T] =
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
val sqlLiteral = s"'${valueToString(value)}'"
row.setObject(index + 1, sqlLiteral, Types.OTHER)
row
}
}
implicit val jodaDateTimeEncoder: Encoder[DateTime] = genericEncoder(_.toString(jodaTzDateTimeFormatter))
implicit val jodaLocalDateEncoder: Encoder[LocalDate] = genericEncoder(_.toString(jodaDateFormatter))
implicit val jodaLocalTimeEncoder: Encoder[LocalTime] = genericEncoder(_.toString(jodaTimeFormatter))
}
开发者ID:gustavoamigo,项目名称:quill-pgsql,代码行数:26,代码来源:Encoders.scala
示例4: genericEncoder
//设置package包名称以及导入依赖的类
package io.gustavoamigo.quill.pgsql.encoding.json.play
import java.sql.{Types, PreparedStatement}
import io.getquill.source.jdbc.JdbcSource
trait JsonEncoder {
this: JdbcSource[_, _] =>
import play.api.libs.json._
private def genericEncoder[T](valueToString: (T => String) = (r: T) => r.toString): Encoder[T] =
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
row.setObject(index + 1, valueToString(value), Types.OTHER)
row
}
}
implicit val jsonEncoder: Encoder[JsValue] = genericEncoder(Json.stringify)
}
开发者ID:gustavoamigo,项目名称:quill-pgsql,代码行数:21,代码来源:JsonEncoder.scala
示例5: genericEncoder
//设置package包名称以及导入依赖的类
package io.gustavoamigo.quill.pgsql.encoding.datetime
import java.sql.{PreparedStatement, Timestamp, Types}
import java.time._
import io.getquill.source.jdbc.JdbcSource
trait Encoders {
this: JdbcSource[_, _] =>
import Formatters._
private def genericEncoder[T](valueToString: (T => String) = (r: T) => r.toString): Encoder[T] =
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
val sqlLiteral = s"'${valueToString(value)}'"
row.setObject(index + 1, sqlLiteral, Types.OTHER)
row
}
}
private def encoder[T](f: PreparedStatement => (Int, T) => Unit): Encoder[T] =
new Encoder[T] {
override def apply(index: Int, value: T, row: PreparedStatement) = {
f(row)(index + 1, value)
row
}
}
implicit val localDateTimeEncoder: Encoder[LocalDateTime] = encoder { (row: PreparedStatement) =>
(index: Int, d: LocalDateTime) => row.setObject(index, Timestamp.valueOf(d), Types.TIMESTAMP)
}
implicit val zonedDateTimeEncoder: Encoder[ZonedDateTime] = genericEncoder(_.format(bpTzDateTimeFormatter))
implicit val localDateEncoder: Encoder[LocalDate] = genericEncoder(_.format(bpDateFormatter))
implicit val localTimeEncoder: Encoder[LocalTime] = genericEncoder(_.format(bpTimeFormatter))
implicit val offsetTimeEncoder: Encoder[OffsetTime] = genericEncoder(_.format(bpTzTimeFormatter))
}
开发者ID:gustavoamigo,项目名称:quill-pgsql,代码行数:38,代码来源:Encoders.scala
示例6: SqlServerSink
//设置package包名称以及导入依赖的类
package yumi.pipeline.sinks
import java.sql.{Connection, PreparedStatement}
import org.apache.spark.sql.jdbc.JdbcDialect
import org.apache.spark.sql.types._
class SqlServerSink extends DatabaseSink
with Serializable {
override def bulkInsertStatement (conn: Connection,
table: String,
rddSchema: StructType,
dialect: JdbcDialect,
rowCount: Int) : PreparedStatement = {
val columns = rddSchema.fields.map(x => dialect.quoteIdentifier(x.name)).mkString(",")
val parameters = rddSchema.fields.map(_ => "?").mkString("(", ",", ")")
val sql = new StringBuilder(s"INSERT INTO $table($columns) VALUES ")
(1 to rowCount).foldLeft(false) { (prependComma, index) =>
if (prependComma) {
sql.append(", ")
}
sql.append(parameters)
true
}
conn.prepareStatement(sql.toString())
}
}
开发者ID:coderdiaries,项目名称:yumi,代码行数:34,代码来源:SqlServerSink.scala
示例7: OracleDatabaseSink
//设置package包名称以及导入依赖的类
package yumi.pipeline.sinks
import java.sql.{Connection, PreparedStatement}
import org.apache.spark.sql.jdbc.JdbcDialect
import org.apache.spark.sql.types._
class OracleDatabaseSink extends DatabaseSink with Serializable {
override def bulkInsertStatement (conn: Connection,
table: String,
rddSchema: StructType,
dialect: JdbcDialect,
rowCount: Int) : PreparedStatement = {
val columns = rddSchema.fields.map(x => dialect.quoteIdentifier(x.name)).mkString(",")
val parameters = rddSchema.fields.map(_ => "?").mkString(s"INTO $table($columns) VALUES (", ",", ")")
val sql = new StringBuilder("INSERT ALL ")
(1 to rowCount).foldLeft(false) { (prependComma, index) =>
if (prependComma) {
sql.append(" ")
}
sql.append(parameters)
true
}
sql.append(" SELECT 1 FROM DUAL")
conn.prepareStatement(sql.toString())
}
}
开发者ID:coderdiaries,项目名称:yumi,代码行数:34,代码来源:OracleDatabaseSink.scala
示例8: MySqlSink
//设置package包名称以及导入依赖的类
package yumi.pipeline.sinks
import java.sql.{Connection, PreparedStatement}
import org.apache.spark.sql.jdbc.JdbcDialect
import org.apache.spark.sql.types._
class MySqlSink extends DatabaseSink with Serializable {
override def bulkInsertStatement (conn: Connection,
table: String,
rddSchema: StructType,
dialect: JdbcDialect,
rowCount: Int) : PreparedStatement = {
val columns = rddSchema.fields.map(x => dialect.quoteIdentifier(x.name)).mkString(",")
val parameters = rddSchema.fields.map(_ => "?").mkString("(", ",", ")")
val sql = new StringBuilder(s"INSERT INTO $table($columns) VALUES ")
(1 to rowCount).foldLeft(false) { (prependComma, index) =>
if (prependComma) {
sql.append(", ")
}
sql.append(parameters)
true
}
conn.prepareStatement(sql.toString())
}
}
开发者ID:coderdiaries,项目名称:yumi,代码行数:33,代码来源:MySqlSink.scala
示例9: PostgreSqlSink
//设置package包名称以及导入依赖的类
package yumi.pipeline.sinks
import java.sql.{Connection, PreparedStatement}
import org.apache.spark.sql.jdbc.JdbcDialect
import org.apache.spark.sql.types._
class PostgreSqlSink extends DatabaseSink with Serializable {
override def bulkInsertStatement (conn: Connection,
table: String,
rddSchema: StructType,
dialect: JdbcDialect,
rowCount: Int) : PreparedStatement = {
val columns = rddSchema.fields.map(x => dialect.quoteIdentifier(x.name)).mkString(",")
val parameters = rddSchema.fields.map(_ => "?").mkString("(", ",", ")")
val sql = new StringBuilder(s"INSERT INTO $table($columns) VALUES ")
(1 to rowCount).foldLeft(false) { (prependComma, index) =>
if (prependComma) {
sql.append(", ")
}
sql.append(parameters)
true
}
conn.prepareStatement(sql.toString())
}
}
开发者ID:coderdiaries,项目名称:yumi,代码行数:33,代码来源:PostgreSqlSink.scala
示例10: UserQuery
//设置package包名称以及导入依赖的类
package database
import models.{Types, User}
import java.sql.{Connection, PreparedStatement, ResultSet, SQLException, Statement}
object UserQuery {
def create(
connection : Connection,
main_email : String,
password : String,
firstname : String,
lastname : String,
avatar : String
) : Types.userid = {
val statement : PreparedStatement =
connection.prepareStatement(
"INSERT INTO users.user (" +
"main_email, " +
"password, " +
"firstname, " +
"lastname, " +
"avatar) " +
"VALUES(?, ?, ?, ?, ?)",
Statement.RETURN_GENERATED_KEYS
)
statement.setString(0, main_email)
statement.setString(1, password)
statement.setString(2, firstname)
statement.setString(3, lastname)
statement.setString(4, avatar)
val generatedKeys : ResultSet = statement.getGeneratedKeys()
if (generatedKeys.next()) {
generatedKeys.getLong(1)
}
else {
throw new SQLException("Adding ad failed. No ID obtained.")
}
}
}
开发者ID:dannywillems,项目名称:scala-micro-service-user,代码行数:43,代码来源:UserQuery.scala
示例11: CatOutput
//设置package包名称以及导入依赖的类
package formatter
import java.sql.{Connection, PreparedStatement}
import com.mysql.jdbc.exceptions.jdbc4.MySQLIntegrityConstraintViolationException
import renderer.CliCommunicator
case class CatOutput(dbCon: Connection, values: Iterable[Category]) extends Output {
override def insert(): Unit = values.foreach { c =>
val ps: PreparedStatement = dbCon.prepareStatement(
"insert into category (pattern, category) values (?,?)")
try {
// set ps parameters
ps.setString(1, c.pattern)
ps.setString(2, c.category)
// execute sql update statement
ps.executeUpdate()
} catch {
case e: MySQLIntegrityConstraintViolationException =>
CliCommunicator().talk(s"${e.getMessage}")
} finally {
ps.close()
}
}
}
开发者ID:claudiusbr,项目名称:ExpenditureAnalyser,代码行数:29,代码来源:Output.scala
示例12: OracleJdbcPartition
//设置package包名称以及导入依赖的类
package util
import org.apache.spark.Partition
import java.sql.ResultSet
import java.sql.Connection
import scala.reflect.ClassTag
import org.apache.spark.SparkContext
import java.sql.PreparedStatement
import org.apache.spark.rdd.RDD
import org.apache.spark.Logging
import org.apache.spark.TaskContext
import org.apache.spark.util
class OracleJdbcPartition(idx: Int, parameters: Map[String, Object]) extends Partition {
override def index = idx
val partitionParameters = parameters
}
abstract class OracleJdbcRdd[T:ClassTag] (
sc:SparkContext,
getConnection:() => Connection,
sql:String,
getOracleJdbcPatition :()=>Array[Partition],
preparedStatement :(PreparedStatement,OracleJdbcPartition)=> PreparedStatement,
mapRow :(ResultSet)=> T=OracleJdbcRdd.resultSetToObjectArray _)
extends RDD[T](sc,Nil)with Logging{
def getPartitions: Array[Partition] = {
getOracleJdbcPatition();
}
}
object OracleJdbcRdd {
def resultSetToObjectArray(rs: ResultSet): Array[Object] = {
Array.tabulate[Object](rs.getMetaData.getColumnCount)(i => rs.getObject(i + 1))
}
trait ConnectionFactory extends Serializable {
@throws[Exception]
def getConnection: Connection
}
}
开发者ID:LotteryOne,项目名称:ScalaSpark,代码行数:47,代码来源:OracleJdbcRdd.scala
示例13: TaskMarshaller
//设置package包名称以及导入依赖的类
package dao.postgres.marshalling
import java.sql.{Connection, PreparedStatement, ResultSet}
import java.util.UUID
import dao.postgres.common.TaskTable
import model.{Task, TaskStatus}
import util.JdbcUtil._
object TaskMarshaller {
def unmarshalTask(rs: ResultSet): Task = {
import TaskTable._
Task(
id = rs.getObject(COL_ID).asInstanceOf[UUID],
processId = rs.getObject(COL_PROCESS_ID).asInstanceOf[UUID],
processDefinitionName = rs.getString(COL_PROC_DEF_NAME),
taskDefinitionName = rs.getString(COL_TASK_DEF_NAME),
executable = PostgresJsonMarshaller.toExecutable(rs.getString(COL_EXECUTABLE)),
previousAttempts = rs.getInt(COL_ATTEMPTS),
startedAt = javaDate(rs.getTimestamp(COL_STARTED)),
status = rs.getString(COL_STATUS) match {
case STATUS_SUCCEEDED => TaskStatus.Success(javaDate(rs.getTimestamp(COL_ENDED_AT)))
case STATUS_FAILED => TaskStatus.Failure(javaDate(rs.getTimestamp(COL_ENDED_AT)), Option(rs.getString(COL_REASON)))
case STATUS_RUNNING => TaskStatus.Running()
}
)
}
def marshalTask(task: Task, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)
(implicit conn: Connection) = {
import TaskTable._
var index = startIndex
columns.foreach { col =>
col match {
case COL_ID => stmt.setObject(index, task.id)
case COL_PROCESS_ID => stmt.setObject(index, task.processId)
case COL_PROC_DEF_NAME => stmt.setString(index, task.processDefinitionName)
case COL_TASK_DEF_NAME => stmt.setString(index, task.taskDefinitionName)
case COL_EXECUTABLE => stmt.setString(index, PostgresJsonMarshaller.toJson(task.executable))
case COL_ATTEMPTS => stmt.setInt(index, task.previousAttempts)
case COL_STARTED => stmt.setTimestamp(index, task.startedAt)
case COL_STATUS => stmt.setString(index, task.status match {
case TaskStatus.Success(_) => STATUS_SUCCEEDED
case TaskStatus.Failure(_, _) => STATUS_FAILED
case TaskStatus.Running() => STATUS_RUNNING
})
case COL_REASON => stmt.setString(index, task.status match {
case TaskStatus.Failure(_, reasons) => reasons.mkString(",")
case _ => null
})
case COL_ENDED_AT => stmt.setTimestamp(index, task.endedAt.getOrElse(null))
}
index += 1
}
}
}
开发者ID:gilt,项目名称:sundial,代码行数:58,代码来源:TaskMarshaller.scala
示例14: ProcessTriggerRequestMarshaller
//设置package包名称以及导入依赖的类
package dao.postgres.marshalling
import java.sql.{Connection, PreparedStatement, ResultSet}
import java.util.UUID
import dao.postgres.common.{ProcessTriggerRequestTable, TaskTriggerRequestTable}
import model.ProcessTriggerRequest
import util.JdbcUtil._
object ProcessTriggerRequestMarshaller {
def marshal(request: ProcessTriggerRequest, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)
(implicit conn: Connection) = {
import ProcessTriggerRequestTable._
var index = startIndex
columns.foreach { col =>
col match {
case COL_REQUEST_ID => stmt.setObject(index, request.requestId)
case COL_PROCESS_DEF_NAME => stmt.setString(index, request.processDefinitionName)
case COL_REQUESTED_AT => stmt.setTimestamp(index, request.requestedAt)
case COL_STARTED_PROCESS_ID => stmt.setObject(index, request.startedProcessId.orNull)
case COL_TASK_FILTER => stmt.setArray(index, request.taskFilter.map(makeStringArray).orNull)
}
index += 1
}
}
def unmarshal(rs: ResultSet): ProcessTriggerRequest = {
import ProcessTriggerRequestTable._
ProcessTriggerRequest(
requestId = rs.getObject(COL_REQUEST_ID).asInstanceOf[UUID],
processDefinitionName = rs.getString(COL_PROCESS_DEF_NAME),
requestedAt = javaDate(rs.getTimestamp(COL_REQUESTED_AT)),
startedProcessId = Option(rs.getObject(COL_STARTED_PROCESS_ID)).map(_.asInstanceOf[UUID]),
taskFilter = getStringArray(rs, COL_TASK_FILTER)
)
}
}
开发者ID:gilt,项目名称:sundial,代码行数:40,代码来源:ProcessTriggerRequestMarshaller.scala
示例15: TaskDefinitionMarshaller
//设置package包名称以及导入依赖的类
package dao.postgres.marshalling
import java.sql.{Connection, PreparedStatement, ResultSet}
import java.util.UUID
import dao.postgres.common.TaskDefinitionTable
import model.{TaskBackoff, TaskDefinition, TaskDependencies, TaskLimits}
import util.JdbcUtil._
object TaskDefinitionMarshaller {
def marshal(definition: TaskDefinition, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)
(implicit conn: Connection) = {
import TaskDefinitionTable._
var index = startIndex
columns.foreach { col =>
col match {
case COL_NAME => stmt.setString(index, definition.name)
case COL_PROC_ID => stmt.setObject(index, definition.processId)
case COL_EXECUTABLE => stmt.setString(index, PostgresJsonMarshaller.toJson(definition.executable))
case COL_MAX_ATTEMPTS => stmt.setInt(index, definition.limits.maxAttempts)
case COL_MAX_EXECUTION_TIME => stmt.setObject(index, definition.limits.maxExecutionTimeSeconds.orNull)
case COL_BACKOFF_SECONDS => stmt.setInt(index, definition.backoff.seconds)
case COL_BACKOFF_EXPONENT => stmt.setDouble(index, definition.backoff.exponent)
case COL_REQUIRED_DEPS => stmt.setArray(index, makeStringArray(definition.dependencies.required))
case COL_OPTIONAL_DEPS => stmt.setArray(index, makeStringArray(definition.dependencies.optional))
case COL_REQUIRE_EXPLICIT_SUCCESS => stmt.setBoolean(index, definition.requireExplicitSuccess)
}
index += 1
}
}
def unmarshal(rs: ResultSet): TaskDefinition = {
import TaskDefinitionTable._
TaskDefinition(
name = rs.getString(COL_NAME),
processId = rs.getObject(COL_PROC_ID).asInstanceOf[UUID],
executable = PostgresJsonMarshaller.toExecutable(rs.getString(COL_EXECUTABLE)),
limits = TaskLimits(
maxAttempts = rs.getInt(COL_MAX_ATTEMPTS),
maxExecutionTimeSeconds = getIntOption(rs, COL_MAX_EXECUTION_TIME)
),
backoff = TaskBackoff(
seconds = rs.getInt(COL_BACKOFF_SECONDS),
exponent = rs.getDouble(COL_BACKOFF_EXPONENT)
),
dependencies = TaskDependencies(
required = getStringArray(rs, COL_REQUIRED_DEPS).getOrElse(Seq.empty),
optional = getStringArray(rs, COL_OPTIONAL_DEPS).getOrElse(Seq.empty)
),
requireExplicitSuccess = rs.getBoolean(COL_REQUIRE_EXPLICIT_SUCCESS)
)
}
}
开发者ID:gilt,项目名称:sundial,代码行数:55,代码来源:TaskDefinitionMarshaller.scala
示例16: TaskDefinitionTemplateMarshaller
//设置package包名称以及导入依赖的类
package dao.postgres.marshalling
import java.sql.{Connection, PreparedStatement, ResultSet}
import dao.postgres.common.TaskDefinitionTemplateTable
import model._
import util.JdbcUtil._
object TaskDefinitionTemplateMarshaller {
def marshal(definition: TaskDefinitionTemplate, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)
(implicit conn: Connection) = {
import TaskDefinitionTemplateTable._
var index = startIndex
columns.foreach { col =>
col match {
case COL_NAME => stmt.setString(index, definition.name)
case COL_PROC_DEF_NAME => stmt.setString(index, definition.processDefinitionName)
case COL_EXECUTABLE => stmt.setString(index, PostgresJsonMarshaller.toJson(definition.executable))
case COL_MAX_ATTEMPTS => stmt.setInt(index, definition.limits.maxAttempts)
case COL_MAX_EXECUTION_TIME => stmt.setObject(index, definition.limits.maxExecutionTimeSeconds.orNull)
case COL_BACKOFF_SECONDS => stmt.setInt(index, definition.backoff.seconds)
case COL_BACKOFF_EXPONENT => stmt.setDouble(index, definition.backoff.exponent)
case COL_REQUIRED_DEPS => stmt.setArray(index, makeStringArray(definition.dependencies.required))
case COL_OPTIONAL_DEPS => stmt.setArray(index, makeStringArray(definition.dependencies.optional))
case COL_REQUIRE_EXPLICIT_SUCCESS => stmt.setBoolean(index, definition.requireExplicitSuccess)
}
index += 1
}
}
def unmarshal(rs: ResultSet): TaskDefinitionTemplate = {
import TaskDefinitionTemplateTable._
TaskDefinitionTemplate(
name = rs.getString(COL_NAME),
processDefinitionName = rs.getString(COL_PROC_DEF_NAME),
executable = PostgresJsonMarshaller.toExecutable(rs.getString(COL_EXECUTABLE)),
limits = TaskLimits(
maxAttempts = rs.getInt(COL_MAX_ATTEMPTS),
maxExecutionTimeSeconds = getIntOption(rs, COL_MAX_EXECUTION_TIME)
),
backoff = TaskBackoff(
seconds = rs.getInt(COL_BACKOFF_SECONDS),
exponent = rs.getDouble(COL_BACKOFF_EXPONENT)
),
dependencies = TaskDependencies(
required = getStringArray(rs, COL_REQUIRED_DEPS).getOrElse(Seq.empty),
optional = getStringArray(rs, COL_OPTIONAL_DEPS).getOrElse(Seq.empty)
),
requireExplicitSuccess = rs.getBoolean(COL_REQUIRE_EXPLICIT_SUCCESS)
)
}
}
开发者ID:gilt,项目名称:sundial,代码行数:55,代码来源:TaskDefinitionTemplateMarshaller.scala
示例17: TaskTriggerRequestMarshaller
//设置package包名称以及导入依赖的类
package dao.postgres.marshalling
import java.sql.{Connection, PreparedStatement, ResultSet}
import java.util.UUID
import dao.postgres.common.TaskTriggerRequestTable
import model.TaskTriggerRequest
import util.JdbcUtil._
object TaskTriggerRequestMarshaller {
def marshal(request: TaskTriggerRequest, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)
(implicit conn: Connection) = {
import TaskTriggerRequestTable._
var index = startIndex
columns.foreach { col =>
col match {
case COL_REQUEST_ID => stmt.setObject(index, request.requestId)
case COL_PROCESS_DEF_NAME => stmt.setString(index, request.processDefinitionName)
case COL_TASK_DEF_NAME => stmt.setString(index, request.taskDefinitionName)
case COL_REQUESTED_AT => stmt.setTimestamp(index, request.requestedAt)
case COL_STARTED_PROCESS_ID => stmt.setObject(index, request.startedProcessId.orNull)
}
index += 1
}
}
def unmarshal(rs: ResultSet): TaskTriggerRequest = {
import TaskTriggerRequestTable._
TaskTriggerRequest(
requestId = rs.getObject(COL_REQUEST_ID).asInstanceOf[UUID],
processDefinitionName = rs.getString(COL_PROCESS_DEF_NAME),
taskDefinitionName = rs.getString(COL_TASK_DEF_NAME),
requestedAt = javaDate(rs.getTimestamp(COL_REQUESTED_AT)),
startedProcessId = Option(rs.getObject(COL_STARTED_PROCESS_ID)).map(_.asInstanceOf[UUID])
)
}
}
开发者ID:gilt,项目名称:sundial,代码行数:39,代码来源:TaskTriggerRequestMarshaller.scala
示例18: ProcessMarshaller
//设置package包名称以及导入依赖的类
package dao.postgres.marshalling
import java.sql.{Connection, PreparedStatement, ResultSet, Timestamp}
import java.util.UUID
import dao.postgres.common.ProcessTable
import model.{Process, ProcessStatus}
import util.JdbcUtil._
object ProcessMarshaller {
def unmarshalProcess(rs: ResultSet): Process = {
import ProcessTable._
Process(
id = rs.getObject(COL_ID).asInstanceOf[UUID],
processDefinitionName = rs.getString(COL_DEF_NAME),
startedAt = javaDate(rs.getTimestamp(COL_STARTED)),
status = rs.getString(COL_STATUS) match {
case STATUS_SUCCEEDED => ProcessStatus.Succeeded(javaDate(rs.getTimestamp(COL_ENDED_AT)))
case STATUS_FAILED => ProcessStatus.Failed(javaDate(rs.getTimestamp(COL_ENDED_AT)))
case STATUS_RUNNING => ProcessStatus.Running()
},
taskFilter = getStringArray(rs, COL_TASK_FILTER)
)
}
def marshalProcess(process: Process, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)
(implicit conn: Connection) = {
import ProcessTable._
var index = startIndex
columns.foreach { col =>
col match {
case COL_ID => stmt.setObject(index, process.id)
case COL_DEF_NAME => stmt.setString(index, process.processDefinitionName)
case COL_STARTED => stmt.setTimestamp(index, new Timestamp(process.startedAt.getTime()))
case COL_ENDED_AT => stmt.setTimestamp(index, process.endedAt.getOrElse(null))
case COL_STATUS => stmt.setString(index, process.status match {
case ProcessStatus.Succeeded(_) => STATUS_SUCCEEDED
case ProcessStatus.Failed(_) => STATUS_FAILED
case ProcessStatus.Running() => STATUS_RUNNING
})
case COL_TASK_FILTER => stmt.setArray(index, process.taskFilter.map(makeStringArray).getOrElse(null))
}
index += 1
}
}
}
开发者ID:gilt,项目名称:sundial,代码行数:48,代码来源:ProcessMarshaller.scala
示例19: ProcessDefinitionMarshaller
//设置package包名称以及导入依赖的类
package dao.postgres.marshalling
import java.sql.{Connection, PreparedStatement, ResultSet}
import dao.postgres.common.ProcessDefinitionTable
import model.{EmailNotification, Notification, ProcessDefinition, ProcessOverlapAction}
import util.JdbcUtil._
object ProcessDefinitionMarshaller {
private val postgresJsonMarshaller = new PostgresJsonMarshaller
def marshal(definition: ProcessDefinition, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)
(implicit conn: Connection) = {
import ProcessDefinitionTable._
var index = startIndex
columns.foreach { col =>
col match {
case COL_NAME => stmt.setString(index, definition.name)
case COL_DESCRIPTION => stmt.setString(index, definition.description.orNull)
case COL_SCHEDULE => stmt.setString(index, definition.schedule.map(PostgresJsonMarshaller.toJson).orNull)
case COL_OVERLAP_ACTION => stmt.setString(index, definition.overlapAction match {
case ProcessOverlapAction.Wait => OVERLAP_WAIT
case ProcessOverlapAction.Terminate => OVERLAP_TERMINATE
})
case COL_TEAMS => stmt.setString(index, "[]")
case COL_NOTIFICATIONS => stmt.setString(index, postgresJsonMarshaller.toJson(definition.notifications))
case COL_DISABLED => stmt.setBoolean(index, definition.isPaused)
case COL_CREATED_AT => stmt.setTimestamp(index, definition.createdAt)
}
index += 1
}
}
def unmarshal(rs: ResultSet): ProcessDefinition = {
import ProcessDefinitionTable._
ProcessDefinition(
name = rs.getString(COL_NAME),
description = Option(rs.getString(COL_DESCRIPTION)),
schedule = Option(rs.getString(COL_SCHEDULE)).map(PostgresJsonMarshaller.toSchedule),
overlapAction = rs.getString(COL_OVERLAP_ACTION) match {
case OVERLAP_WAIT => ProcessOverlapAction.Wait
case OVERLAP_TERMINATE => ProcessOverlapAction.Terminate
},
notifications = this.getNotifications(rs),
isPaused = rs.getBoolean(COL_DISABLED),
createdAt = javaDate(rs.getTimestamp(COL_CREATED_AT))
)
}
private def getNotifications(rs: ResultSet): Seq[Notification] = {
import ProcessDefinitionTable._
val teams = PostgresJsonMarshaller.toTeams(rs.getString(COL_TEAMS)).map(team => EmailNotification(team.name, team.email, team.notifyAction))
val notifications = postgresJsonMarshaller.toNotifications(rs.getString(COL_NOTIFICATIONS))
notifications ++ teams
}
}
开发者ID:gilt,项目名称:sundial,代码行数:59,代码来源:ProcessDefinitionMarshaller.scala
示例20: HashPartitionStrategy
//设置package包名称以及导入依赖的类
package io.eels.component.jdbc
import java.sql.{Connection, PreparedStatement}
import io.eels.Row
import io.eels.component.jdbc.dialect.JdbcDialect
import io.eels.datastream.Publisher
case class HashPartitionStrategy(hashExpression: String,
numberOfPartitions: Int) extends JdbcPartitionStrategy {
def partitionedQuery(partNum: Int, query: String): String =
s"""SELECT * from ($query) WHERE $hashExpression = $partNum""".stripMargin
override def parts(connFn: () => Connection,
query: String,
bindFn: (PreparedStatement) => Unit,
fetchSize: Int,
dialect: JdbcDialect): Seq[Publisher[Seq[Row]]] = {
for (k <- 0 until numberOfPartitions) yield {
new JdbcPublisher(connFn, partitionedQuery(k, query), bindFn, fetchSize, dialect)
}
}
}
开发者ID:51zero,项目名称:eel-sdk,代码行数:26,代码来源:HashPartitionStrategy.scala
注:本文中的java.sql.PreparedStatement类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论