java.sql.PreparedStatement Scala Examples
The following examples show how to use java.sql.PreparedStatement.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: Queries.scala From daml with Apache License 2.0 | 7 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.on.sql.queries import java.io.InputStream import java.sql.{Blob, Connection, PreparedStatement} import anorm.{ BatchSql, Column, MetaDataItem, NamedParameter, RowParser, SqlMappingError, SqlParser, SqlRequestError, ToStatement } import com.google.protobuf.ByteString trait Queries extends ReadQueries with WriteQueries object Queries { val TablePrefix = "ledger" val LogTable = s"${TablePrefix}_log" val MetaTable = s"${TablePrefix}_meta" val StateTable = s"${TablePrefix}_state" // By explicitly writing a value to a "table_key" column, we ensure we only ever have one row in // the meta table. An attempt to write a second row will result in a key conflict. private[queries] val MetaTableKey = 0 def executeBatchSql( query: String, params: Iterable[Seq[NamedParameter]], )(implicit connection: Connection): Unit = { if (params.nonEmpty) BatchSql(query, params.head, params.drop(1).toArray: _*).execute() () } implicit def byteStringToStatement: ToStatement[ByteString] = new ToStatement[ByteString] { override def set(s: PreparedStatement, index: Int, v: ByteString): Unit = s.setBinaryStream(index, v.newInput(), v.size()) } implicit def columnToByteString: Column[ByteString] = Column.nonNull { (value: Any, meta: MetaDataItem) => value match { case blob: Blob => Right(ByteString.readFrom(blob.getBinaryStream)) case byteArray: Array[Byte] => Right(ByteString.copyFrom(byteArray)) case inputStream: InputStream => Right(ByteString.readFrom(inputStream)) case _ => Left[SqlRequestError, ByteString]( SqlMappingError(s"Cannot convert value of column ${meta.column} to ByteString")) } } def getBytes(columnName: String): RowParser[ByteString] = SqlParser.get(columnName)(columnToByteString) }
Example 2
Source File: ProcessMarshaller.scala From sundial with MIT License | 5 votes |
package dao.postgres.marshalling import java.sql.{Connection, PreparedStatement, ResultSet, Timestamp} import java.util.UUID import dao.postgres.common.ProcessTable import model.{Process, ProcessStatus} import util.JdbcUtil._ object ProcessMarshaller { def unmarshalProcess(rs: ResultSet): Process = { import ProcessTable._ Process( id = rs.getObject(COL_ID).asInstanceOf[UUID], processDefinitionName = rs.getString(COL_DEF_NAME), startedAt = javaDate(rs.getTimestamp(COL_STARTED)), status = rs.getString(COL_STATUS) match { case STATUS_SUCCEEDED => ProcessStatus.Succeeded(javaDate(rs.getTimestamp(COL_ENDED_AT))) case STATUS_FAILED => ProcessStatus.Failed(javaDate(rs.getTimestamp(COL_ENDED_AT))) case STATUS_RUNNING => ProcessStatus.Running() }, taskFilter = getStringArray(rs, COL_TASK_FILTER) ) } def marshalProcess(process: Process, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)(implicit conn: Connection) = { import ProcessTable._ var index = startIndex columns.foreach { col => col match { case COL_ID => stmt.setObject(index, process.id) case COL_DEF_NAME => stmt.setString(index, process.processDefinitionName) case COL_STARTED => stmt.setTimestamp(index, new Timestamp(process.startedAt.getTime())) case COL_ENDED_AT => stmt.setTimestamp(index, process.endedAt.getOrElse(null)) case COL_STATUS => stmt.setString( index, process.status match { case ProcessStatus.Succeeded(_) => STATUS_SUCCEEDED case ProcessStatus.Failed(_) => STATUS_FAILED case ProcessStatus.Running() => STATUS_RUNNING } ) case COL_TASK_FILTER => stmt.setArray(index, process.taskFilter.map(makeStringArray).getOrElse(null)) } index += 1 } } }
Example 3
Source File: DataServiceTest.scala From kafka-jdbc-connector with Apache License 2.0 | 5 votes |
package com.agoda.kafka.connector.jdbc.services import java.sql.{Connection, PreparedStatement, ResultSet, ResultSetMetaData} import com.agoda.kafka.connector.jdbc.utils.DataConverter import org.apache.kafka.connect.data.Schema import org.apache.kafka.connect.source.SourceRecord import org.scalatest.mockito.MockitoSugar import org.mockito.Mockito._ import org.scalatest.{Matchers, WordSpec} import scala.concurrent.duration._ import scala.util.Success class DataServiceTest extends WordSpec with Matchers with MockitoSugar { "Data Service" should { val spName = "stored-procedure" val connection = mock[Connection] val converter = mock[DataConverter] val sourceRecord1 = mock[SourceRecord] val sourceRecord2 = mock[SourceRecord] val resultSet = mock[ResultSet] val resultSetMetadata = mock[ResultSetMetaData] val preparedStatement = mock[PreparedStatement] val schema = mock[Schema] val dataService = new DataService { override def storedProcedureName: String = spName override protected def createPreparedStatement(connection: Connection) = Success(preparedStatement) override protected def extractRecords(resultSet: ResultSet, schema: Schema) = Success(Seq(sourceRecord1, sourceRecord2)) override def dataConverter: DataConverter = converter } "get records" in { doNothing().when(preparedStatement).setQueryTimeout(1) when(preparedStatement.executeQuery).thenReturn(resultSet) when(resultSet.getMetaData).thenReturn(resultSetMetadata) when(converter.convertSchema(spName, resultSetMetadata)).thenReturn(Success(schema)) dataService.getRecords(connection, 1.second) shouldBe Success(Seq(sourceRecord1, sourceRecord2)) verify(preparedStatement).setQueryTimeout(1) verify(preparedStatement).executeQuery verify(resultSet).getMetaData verify(converter).convertSchema(spName, resultSetMetadata) } } }
Example 4
Source File: PrepareMonixJdbcSpecBase.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill import java.sql.{ Connection, PreparedStatement, ResultSet } import io.getquill.context.jdbc.ResultSetExtractor import io.getquill.context.sql.ProductSpec import monix.eval.Task import org.scalactic.Equality trait PrepareMonixJdbcSpecBase extends ProductSpec { implicit val productEq = new Equality[Product] { override def areEqual(a: Product, b: Any): Boolean = b match { case Product(_, desc, sku) => desc == a.description && sku == a.sku case _ => false } } def productExtractor: ResultSet => Product def withOrderedIds(products: List[Product]) = products.zipWithIndex.map { case (product, id) => product.copy(id = id.toLong + 1) } def singleInsert(conn: => Connection)(prep: Connection => Task[PreparedStatement]) = { Task(conn).bracket { conn => prep(conn).bracket { stmt => Task(stmt.execute()) }(stmt => Task(stmt.close())) }(conn => Task(conn.close())) } def batchInsert(conn: => Connection)(prep: Connection => Task[List[PreparedStatement]]) = { Task(conn).bracket { conn => prep(conn).flatMap(stmts => Task.sequence( stmts.map(stmt => Task(stmt).bracket { stmt => Task(stmt.execute()) }(stmt => Task(stmt.close()))) )) }(conn => Task(conn.close())) } def extractResults[T](conn: => Connection)(prep: Connection => Task[PreparedStatement])(extractor: ResultSet => T) = { Task(conn).bracket { conn => prep(conn).bracket { stmt => Task(stmt.executeQuery()).bracket { rs => Task(ResultSetExtractor(rs, extractor)) }(rs => Task(rs.close())) }(stmt => Task(stmt.close())) }(conn => Task(conn.close())) } def extractProducts(conn: => Connection)(prep: Connection => Task[PreparedStatement]) = extractResults(conn)(prep)(productExtractor) }
Example 5
Source File: PrepareJdbcSpecBase.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.context.jdbc import java.sql.{ Connection, PreparedStatement, ResultSet } import io.getquill.context.sql.ProductSpec import io.getquill.util.Using.Manager import org.scalactic.Equality import scala.util.{ Success, Failure } trait PrepareJdbcSpecBase extends ProductSpec { implicit val productEq = new Equality[Product] { override def areEqual(a: Product, b: Any): Boolean = b match { case Product(_, desc, sku) => desc == a.description && sku == a.sku case _ => false } } def productExtractor: ResultSet => Product def withOrderedIds(products: List[Product]) = products.zipWithIndex.map { case (product, id) => product.copy(id = id.toLong + 1) } def singleInsert(conn: => Connection)(prep: Connection => PreparedStatement) = { val flag = Manager { use => val c = use(conn) val s = use(prep(c)) s.execute() } flag match { case Success(value) => value case Failure(e) => throw e } } def batchInsert(conn: => Connection)(prep: Connection => List[PreparedStatement]) = { val r = Manager { use => val c = use(conn) val st = prep(c) appendExecuteSequence(st) } r.flatten match { case Success(value) => value case Failure(e) => throw e } } def extractResults[T](conn: => Connection)(prep: Connection => PreparedStatement)(extractor: ResultSet => T) = { val r = Manager { use => val c = use(conn) val st = use(prep(c)) val rs = st.executeQuery() ResultSetExtractor(rs, extractor) } r match { case Success(v) => v case Failure(e) => throw e } } def extractProducts(conn: => Connection)(prep: Connection => PreparedStatement): List[Product] = extractResults(conn)(prep)(productExtractor) def appendExecuteSequence(actions: => List[PreparedStatement]) = { Manager { use => actions.map { stmt => val s = use(stmt) s.execute() } } } }
Example 6
Source File: JdbcSourceStage.scala From fusion-data with Apache License 2.0 | 5 votes |
package mass.connector.sql import java.sql.{ Connection, PreparedStatement, ResultSet } import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler } import akka.stream.{ Attributes, Outlet, SourceShape } import javax.sql.DataSource import fusion.jdbc.ConnectionPreparedStatementCreator import fusion.jdbc.util.JdbcUtils import scala.util.control.NonFatal class JdbcSourceStage(dataSource: DataSource, creator: ConnectionPreparedStatementCreator, fetchRowSize: Int) extends GraphStage[SourceShape[ResultSet]] { private val out: Outlet[ResultSet] = Outlet("JdbcSource.out") override def shape: SourceShape[ResultSet] = SourceShape(out) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { var maybeConn = Option.empty[(Connection, Boolean, PreparedStatement, ResultSet)] setHandler(out, this) override def onPull(): Unit = maybeConn match { case Some((_, _, _, rs)) if rs.next() => push(out, rs) case Some(_) => completeStage() case None => () // doing nothing, waiting for in preStart() to be completed } override def preStart(): Unit = try { val conn = dataSource.getConnection val autoCommit = conn.getAutoCommit conn.setAutoCommit(false) val stmt = creator(conn) val rs = stmt.executeQuery() // rs.setFetchDirection(ResultSet.TYPE_FORWARD_ONLY) rs.setFetchSize(fetchRowSize) maybeConn = Option((conn, autoCommit, stmt, rs)) } catch { case NonFatal(e) => failStage(e) } override def postStop(): Unit = for { (conn, autoCommit, stmt, rs) <- maybeConn } { JdbcUtils.closeResultSet(rs) JdbcUtils.closeStatement(stmt) conn.setAutoCommit(autoCommit) JdbcUtils.closeConnection(conn) } } }
Example 7
Source File: EtlStreamFactory.scala From fusion-data with Apache License 2.0 | 5 votes |
package mass.rdp.etl.graph import java.sql.PreparedStatement import akka.NotUsed import akka.stream.scaladsl.{ Sink, Source } import fusion.jdbc.util.JdbcUtils import mass.connector.Connector import mass.connector.sql._ import mass.core.event.{ EventData, EventDataSimple } import scala.concurrent.Future trait EtlStreamFactory { def `type`: String def buildSource(c: Connector, s: EtlSource): Source[EventDataSql, NotUsed] def buildSink(c: Connector, s: EtlSink): Sink[EventData, Future[JdbcSinkResult]] } class EtlStreamJdbcFactory extends EtlStreamFactory { override def `type`: String = "jdbc" override def buildSource(c: Connector, s: EtlSource): Source[EventDataSql, NotUsed] = JdbcSource(s.script.content.get, Nil, 1000)(c.asInstanceOf[SQLConnector].dataSource) .via(JdbcFlow.flowJdbcResultSet) .map(jrs => EventDataSql(jrs)) def buildSink(c: Connector, s: EtlSink): Sink[EventData, Future[JdbcSinkResult]] = { def action(event: EventData, stmt: PreparedStatement): Unit = { val args: Iterable[Any] = event match { case _: EventDataSimple => event.data.asInstanceOf[Iterable[Any]] case eventDataSql: EventDataSql => eventDataSql.data.values case _ => throw new EtlGraphException(s"Invalid EventData: $event.") } JdbcUtils.setStatementParameters(stmt, args) } JdbcSink[EventData](conn => conn.prepareStatement(s.script.content.get), action, 1000)( c.asInstanceOf[SQLConnector].dataSource) } }
Example 8
Source File: ProcessDefinitionMarshaller.scala From sundial with MIT License | 5 votes |
package dao.postgres.marshalling import java.sql.{Connection, PreparedStatement, ResultSet} import dao.postgres.common.ProcessDefinitionTable import model.{ EmailNotification, Notification, ProcessDefinition, ProcessOverlapAction } import util.JdbcUtil._ object ProcessDefinitionMarshaller { private val postgresJsonMarshaller = new PostgresJsonMarshaller def marshal(definition: ProcessDefinition, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)(implicit conn: Connection) = { import ProcessDefinitionTable._ var index = startIndex columns.foreach { col => col match { case COL_NAME => stmt.setString(index, definition.name) case COL_DESCRIPTION => stmt.setString(index, definition.description.orNull) case COL_SCHEDULE => stmt.setString( index, definition.schedule.map(PostgresJsonMarshaller.toJson).orNull) case COL_OVERLAP_ACTION => stmt.setString(index, definition.overlapAction match { case ProcessOverlapAction.Wait => OVERLAP_WAIT case ProcessOverlapAction.Terminate => OVERLAP_TERMINATE }) case COL_TEAMS => stmt.setString(index, "[]") case COL_NOTIFICATIONS => stmt.setString( index, postgresJsonMarshaller.toJson(definition.notifications)) case COL_DISABLED => stmt.setBoolean(index, definition.isPaused) case COL_CREATED_AT => stmt.setTimestamp(index, definition.createdAt) } index += 1 } } def unmarshal(rs: ResultSet): ProcessDefinition = { import ProcessDefinitionTable._ ProcessDefinition( name = rs.getString(COL_NAME), description = Option(rs.getString(COL_DESCRIPTION)), schedule = Option(rs.getString(COL_SCHEDULE)) .map(PostgresJsonMarshaller.toSchedule), overlapAction = rs.getString(COL_OVERLAP_ACTION) match { case OVERLAP_WAIT => ProcessOverlapAction.Wait case OVERLAP_TERMINATE => ProcessOverlapAction.Terminate }, notifications = this.getNotifications(rs), isPaused = rs.getBoolean(COL_DISABLED), createdAt = javaDate(rs.getTimestamp(COL_CREATED_AT)) ) } private def getNotifications(rs: ResultSet): Seq[Notification] = { import ProcessDefinitionTable._ val teams = PostgresJsonMarshaller .toTeams(rs.getString(COL_TEAMS)) .map(team => EmailNotification(team.name, team.email, team.notifyAction)) val notifications = postgresJsonMarshaller.toNotifications(rs.getString(COL_NOTIFICATIONS)) notifications ++ teams } }
Example 9
Source File: TaskDefinitionMarshaller.scala From sundial with MIT License | 5 votes |
package dao.postgres.marshalling import java.sql.{Connection, PreparedStatement, ResultSet} import java.util.UUID import dao.postgres.common.TaskDefinitionTable import model.{TaskBackoff, TaskDefinition, TaskDependencies, TaskLimits} import util.JdbcUtil._ object TaskDefinitionMarshaller { def marshal(definition: TaskDefinition, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)(implicit conn: Connection) = { import TaskDefinitionTable._ var index = startIndex columns.foreach { col => col match { case COL_NAME => stmt.setString(index, definition.name) case COL_PROC_ID => stmt.setObject(index, definition.processId) case COL_EXECUTABLE => stmt.setString(index, PostgresJsonMarshaller.toJson(definition.executable)) case COL_MAX_ATTEMPTS => stmt.setInt(index, definition.limits.maxAttempts) case COL_MAX_EXECUTION_TIME => stmt.setObject(index, definition.limits.maxExecutionTimeSeconds.orNull) case COL_BACKOFF_SECONDS => stmt.setInt(index, definition.backoff.seconds) case COL_BACKOFF_EXPONENT => stmt.setDouble(index, definition.backoff.exponent) case COL_REQUIRED_DEPS => stmt.setArray(index, makeStringArray(definition.dependencies.required)) case COL_OPTIONAL_DEPS => stmt.setArray(index, makeStringArray(definition.dependencies.optional)) case COL_REQUIRE_EXPLICIT_SUCCESS => stmt.setBoolean(index, definition.requireExplicitSuccess) } index += 1 } } def unmarshal(rs: ResultSet): TaskDefinition = { import TaskDefinitionTable._ TaskDefinition( name = rs.getString(COL_NAME), processId = rs.getObject(COL_PROC_ID).asInstanceOf[UUID], executable = PostgresJsonMarshaller.toExecutable(rs.getString(COL_EXECUTABLE)), limits = TaskLimits( maxAttempts = rs.getInt(COL_MAX_ATTEMPTS), maxExecutionTimeSeconds = getIntOption(rs, COL_MAX_EXECUTION_TIME) ), backoff = TaskBackoff( seconds = rs.getInt(COL_BACKOFF_SECONDS), exponent = rs.getDouble(COL_BACKOFF_EXPONENT) ), dependencies = TaskDependencies( required = getStringArray(rs, COL_REQUIRED_DEPS).getOrElse(Seq.empty), optional = getStringArray(rs, COL_OPTIONAL_DEPS).getOrElse(Seq.empty) ), requireExplicitSuccess = rs.getBoolean(COL_REQUIRE_EXPLICIT_SUCCESS) ) } }
Example 10
Source File: TimeBasedDataService.scala From kafka-jdbc-connector with Apache License 2.0 | 5 votes |
package com.agoda.kafka.connector.jdbc.services import java.sql.{Connection, PreparedStatement, ResultSet, Timestamp} import java.util.{Date, GregorianCalendar, TimeZone} import com.agoda.kafka.connector.jdbc.JdbcSourceConnectorConstants import com.agoda.kafka.connector.jdbc.models.DatabaseProduct import com.agoda.kafka.connector.jdbc.models.DatabaseProduct.{MsSQL, MySQL} import com.agoda.kafka.connector.jdbc.models.Mode.TimestampMode import com.agoda.kafka.connector.jdbc.utils.DataConverter import org.apache.kafka.connect.data.Schema import org.apache.kafka.connect.source.SourceRecord import scala.collection.JavaConverters._ import scala.collection.mutable.ListBuffer import scala.util.Try case class TimeBasedDataService(databaseProduct: DatabaseProduct, storedProcedureName: String, batchSize: Int, batchSizeVariableName: String, timestampVariableName: String, var timestampOffset: Long, timestampFieldName: String, topic: String, keyFieldOpt: Option[String], dataConverter: DataConverter, calendar: GregorianCalendar = new GregorianCalendar(TimeZone.getTimeZone("UTC")) ) extends DataService { override def createPreparedStatement(connection: Connection): Try[PreparedStatement] = Try { val preparedStatement = databaseProduct match { case MsSQL => connection.prepareStatement(s"EXECUTE $storedProcedureName @$timestampVariableName = ?, @$batchSizeVariableName = ?") case MySQL => connection.prepareStatement(s"CALL $storedProcedureName (@$timestampVariableName := ?, @$batchSizeVariableName := ?)") } preparedStatement.setTimestamp(1, new Timestamp(timestampOffset), calendar) preparedStatement.setObject(2, batchSize) preparedStatement } override def extractRecords(resultSet: ResultSet, schema: Schema): Try[Seq[SourceRecord]] = Try { val sourceRecords = ListBuffer.empty[SourceRecord] var max = timestampOffset while (resultSet.next()) { dataConverter.convertRecord(schema, resultSet) map { record => val time = record.get(timestampFieldName).asInstanceOf[Date].getTime max = if(time > max) { keyFieldOpt match { case Some(keyField) => sourceRecords += new SourceRecord( Map(JdbcSourceConnectorConstants.STORED_PROCEDURE_NAME_KEY -> storedProcedureName).asJava, Map(TimestampMode.entryName -> time).asJava, topic, null, schema, record.get(keyField), schema, record ) case None => sourceRecords += new SourceRecord( Map(JdbcSourceConnectorConstants.STORED_PROCEDURE_NAME_KEY -> storedProcedureName).asJava, Map(TimestampMode.entryName -> time).asJava, topic, schema, record ) } time } else max } } timestampOffset = max sourceRecords } override def toString: String = { s""" |{ | "name" : "${this.getClass.getSimpleName}" | "mode" : "${TimestampMode.entryName}" | "stored-procedure.name" : "$storedProcedureName" |} """.stripMargin } }
Example 11
Source File: TaskDefinitionTemplateMarshaller.scala From sundial with MIT License | 5 votes |
package dao.postgres.marshalling import java.sql.{Connection, PreparedStatement, ResultSet} import dao.postgres.common.TaskDefinitionTemplateTable import model._ import util.JdbcUtil._ object TaskDefinitionTemplateMarshaller { def marshal(definition: TaskDefinitionTemplate, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)(implicit conn: Connection) = { import TaskDefinitionTemplateTable._ var index = startIndex columns.foreach { col => col match { case COL_NAME => stmt.setString(index, definition.name) case COL_PROC_DEF_NAME => stmt.setString(index, definition.processDefinitionName) case COL_EXECUTABLE => stmt.setString(index, PostgresJsonMarshaller.toJson(definition.executable)) case COL_MAX_ATTEMPTS => stmt.setInt(index, definition.limits.maxAttempts) case COL_MAX_EXECUTION_TIME => stmt.setObject(index, definition.limits.maxExecutionTimeSeconds.orNull) case COL_BACKOFF_SECONDS => stmt.setInt(index, definition.backoff.seconds) case COL_BACKOFF_EXPONENT => stmt.setDouble(index, definition.backoff.exponent) case COL_REQUIRED_DEPS => stmt.setArray(index, makeStringArray(definition.dependencies.required)) case COL_OPTIONAL_DEPS => stmt.setArray(index, makeStringArray(definition.dependencies.optional)) case COL_REQUIRE_EXPLICIT_SUCCESS => stmt.setBoolean(index, definition.requireExplicitSuccess) } index += 1 } } def unmarshal(rs: ResultSet): TaskDefinitionTemplate = { import TaskDefinitionTemplateTable._ TaskDefinitionTemplate( name = rs.getString(COL_NAME), processDefinitionName = rs.getString(COL_PROC_DEF_NAME), executable = PostgresJsonMarshaller.toExecutable(rs.getString(COL_EXECUTABLE)), limits = TaskLimits( maxAttempts = rs.getInt(COL_MAX_ATTEMPTS), maxExecutionTimeSeconds = getIntOption(rs, COL_MAX_EXECUTION_TIME) ), backoff = TaskBackoff( seconds = rs.getInt(COL_BACKOFF_SECONDS), exponent = rs.getDouble(COL_BACKOFF_EXPONENT) ), dependencies = TaskDependencies( required = getStringArray(rs, COL_REQUIRED_DEPS).getOrElse(Seq.empty), optional = getStringArray(rs, COL_OPTIONAL_DEPS).getOrElse(Seq.empty) ), requireExplicitSuccess = rs.getBoolean(COL_REQUIRE_EXPLICIT_SUCCESS) ) } }
Example 12
Source File: ProcessTriggerRequestMarshaller.scala From sundial with MIT License | 5 votes |
package dao.postgres.marshalling import java.sql.{Connection, PreparedStatement, ResultSet} import java.util.UUID import dao.postgres.common.{ProcessTriggerRequestTable, TaskTriggerRequestTable} import model.ProcessTriggerRequest import util.JdbcUtil._ object ProcessTriggerRequestMarshaller { def marshal(request: ProcessTriggerRequest, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)(implicit conn: Connection) = { import ProcessTriggerRequestTable._ var index = startIndex columns.foreach { col => col match { case COL_REQUEST_ID => stmt.setObject(index, request.requestId) case COL_PROCESS_DEF_NAME => stmt.setString(index, request.processDefinitionName) case COL_REQUESTED_AT => stmt.setTimestamp(index, request.requestedAt) case COL_STARTED_PROCESS_ID => stmt.setObject(index, request.startedProcessId.orNull) case COL_TASK_FILTER => stmt.setArray(index, request.taskFilter.map(makeStringArray).orNull) } index += 1 } } def unmarshal(rs: ResultSet): ProcessTriggerRequest = { import ProcessTriggerRequestTable._ ProcessTriggerRequest( requestId = rs.getObject(COL_REQUEST_ID).asInstanceOf[UUID], processDefinitionName = rs.getString(COL_PROCESS_DEF_NAME), requestedAt = javaDate(rs.getTimestamp(COL_REQUESTED_AT)), startedProcessId = Option(rs.getObject(COL_STARTED_PROCESS_ID)).map(_.asInstanceOf[UUID]), taskFilter = getStringArray(rs, COL_TASK_FILTER) ) } }
Example 13
Source File: TaskMarshaller.scala From sundial with MIT License | 5 votes |
package dao.postgres.marshalling import java.sql.{Connection, PreparedStatement, ResultSet} import java.util.UUID import dao.postgres.common.TaskTable import model.{Task, TaskStatus} import util.JdbcUtil._ object TaskMarshaller { def unmarshalTask(rs: ResultSet): Task = { import TaskTable._ Task( id = rs.getObject(COL_ID).asInstanceOf[UUID], processId = rs.getObject(COL_PROCESS_ID).asInstanceOf[UUID], processDefinitionName = rs.getString(COL_PROC_DEF_NAME), taskDefinitionName = rs.getString(COL_TASK_DEF_NAME), executable = PostgresJsonMarshaller.toExecutable(rs.getString(COL_EXECUTABLE)), previousAttempts = rs.getInt(COL_ATTEMPTS), startedAt = javaDate(rs.getTimestamp(COL_STARTED)), status = rs.getString(COL_STATUS) match { case STATUS_SUCCEEDED => TaskStatus.Success(javaDate(rs.getTimestamp(COL_ENDED_AT))) case STATUS_FAILED => TaskStatus.Failure(javaDate(rs.getTimestamp(COL_ENDED_AT)), Option(rs.getString(COL_REASON))) case STATUS_RUNNING => TaskStatus.Running() } ) } def marshalTask(task: Task, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)(implicit conn: Connection) = { import TaskTable._ var index = startIndex columns.foreach { col => col match { case COL_ID => stmt.setObject(index, task.id) case COL_PROCESS_ID => stmt.setObject(index, task.processId) case COL_PROC_DEF_NAME => stmt.setString(index, task.processDefinitionName) case COL_TASK_DEF_NAME => stmt.setString(index, task.taskDefinitionName) case COL_EXECUTABLE => stmt.setString(index, PostgresJsonMarshaller.toJson(task.executable)) case COL_ATTEMPTS => stmt.setInt(index, task.previousAttempts) case COL_STARTED => stmt.setTimestamp(index, task.startedAt) case COL_STATUS => stmt.setString(index, task.status match { case TaskStatus.Success(_) => STATUS_SUCCEEDED case TaskStatus.Failure(_, _) => STATUS_FAILED case TaskStatus.Running() => STATUS_RUNNING }) case COL_REASON => stmt.setString(index, task.status match { case TaskStatus.Failure(_, reasons) => reasons.mkString(",") case _ => null }) case COL_ENDED_AT => stmt.setTimestamp(index, task.endedAt.getOrElse(null)) } index += 1 } } }
Example 14
Source File: TaskTriggerRequestMarshaller.scala From sundial with MIT License | 5 votes |
package dao.postgres.marshalling import java.sql.{Connection, PreparedStatement, ResultSet} import java.util.UUID import dao.postgres.common.TaskTriggerRequestTable import model.TaskTriggerRequest import util.JdbcUtil._ object TaskTriggerRequestMarshaller { def marshal(request: TaskTriggerRequest, stmt: PreparedStatement, columns: Seq[String], startIndex: Int = 1)(implicit conn: Connection) = { import TaskTriggerRequestTable._ var index = startIndex columns.foreach { col => col match { case COL_REQUEST_ID => stmt.setObject(index, request.requestId) case COL_PROCESS_DEF_NAME => stmt.setString(index, request.processDefinitionName) case COL_TASK_DEF_NAME => stmt.setString(index, request.taskDefinitionName) case COL_REQUESTED_AT => stmt.setTimestamp(index, request.requestedAt) case COL_STARTED_PROCESS_ID => stmt.setObject(index, request.startedProcessId.orNull) } index += 1 } } def unmarshal(rs: ResultSet): TaskTriggerRequest = { import TaskTriggerRequestTable._ TaskTriggerRequest( requestId = rs.getObject(COL_REQUEST_ID).asInstanceOf[UUID], processDefinitionName = rs.getString(COL_PROCESS_DEF_NAME), taskDefinitionName = rs.getString(COL_TASK_DEF_NAME), requestedAt = javaDate(rs.getTimestamp(COL_REQUESTED_AT)), startedProcessId = Option(rs.getObject(COL_STARTED_PROCESS_ID)).map(_.asInstanceOf[UUID]) ) } }
Example 15
Source File: MysqlSink.scala From flink-rookie with Apache License 2.0 | 5 votes |
package com.venn.stream.api.jdbcOutput import java.sql.{Connection, DriverManager, PreparedStatement, SQLException} import org.apache.flink.configuration.Configuration import org.apache.flink.streaming.api.functions.sink.{RichSinkFunction, SinkFunction} import org.slf4j.{Logger, LoggerFactory} class MysqlSink extends RichSinkFunction[User] { val logger: Logger = LoggerFactory.getLogger("MysqlSink") var conn: Connection = _ var ps: PreparedStatement = _ val jdbcUrl = "jdbc:mysql://192.168.229.128:3306?useSSL=false&allowPublicKeyRetrieval=true" val username = "root" val password = "123456" val driverName = "com.mysql.jdbc.Driver" override def open(parameters: Configuration): Unit = { Class.forName(driverName) try { Class.forName(driverName) conn = DriverManager.getConnection(jdbcUrl, username, password) // close auto commit conn.setAutoCommit(false) } catch { case e@(_: ClassNotFoundException | _: SQLException) => logger.error("init mysql error") e.printStackTrace() System.exit(-1); } } override def invoke(user: User, context: SinkFunction.Context[_]): Unit = { println("get user : " + user.toString) ps = conn.prepareStatement("insert into async.user(username, password, sex, phone) values(?,?,?,?)") ps.setString(1, user.username) ps.setString(2, user.password) ps.setInt(3, user.sex) ps.setString(4, user.phone) ps.execute() conn.commit() } override def close(): Unit = { if (conn != null){ conn.commit() conn.close() } } }
Example 16
Source File: MysqlSink1.scala From flink-rookie with Apache License 2.0 | 5 votes |
package com.venn.stream.api.jdbcOutput import java.sql.{Connection, DriverManager, PreparedStatement, SQLException} import org.apache.flink.api.common.io.OutputFormat import org.apache.flink.configuration.Configuration import org.slf4j.{Logger, LoggerFactory} class MysqlSink1 extends OutputFormat[User]{ val logger: Logger = LoggerFactory.getLogger("MysqlSink1") var conn: Connection = _ var ps: PreparedStatement = _ val jdbcUrl = "jdbc:mysql://192.168.229.128:3306?useSSL=false&allowPublicKeyRetrieval=true" val username = "root" val password = "123456" val driverName = "com.mysql.jdbc.Driver" override def configure(parameters: Configuration): Unit = { // not need } override def open(taskNumber: Int, numTasks: Int): Unit = { Class.forName(driverName) try { Class.forName(driverName) conn = DriverManager.getConnection(jdbcUrl, username, password) // close auto commit conn.setAutoCommit(false) } catch { case e@(_: ClassNotFoundException | _: SQLException) => logger.error("init mysql error") e.printStackTrace() System.exit(-1); } } override def writeRecord(user: User): Unit = { println("get user : " + user.toString) ps = conn.prepareStatement("insert into async.user(username, password, sex, phone) values(?,?,?,?)") ps.setString(1, user.username) ps.setString(2, user.password) ps.setInt(3, user.sex) ps.setString(4, user.phone) ps.execute() conn.commit() } override def close(): Unit = { if (conn != null){ conn.commit() conn.close() } } }
Example 17
Source File: CustomerTimerDemo.scala From flink-rookie with Apache License 2.0 | 5 votes |
package com.venn.stream.api.timer import java.io.File import java.sql.{Connection, DriverManager, PreparedStatement, SQLException} import java.util import java.util.{Timer, TimerTask} import org.apache.flink.api.scala._ import com.venn.common.Common import com.venn.util.TwoStringSource import org.apache.flink.api.common.functions.RichMapFunction import org.apache.flink.api.common.serialization.SimpleStringSchema import org.apache.flink.configuration.Configuration import org.apache.flink.runtime.state.filesystem.FsStateBackend import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment import org.apache.flink.streaming.api.{CheckpointingMode, TimeCharacteristic} import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer import org.slf4j.LoggerFactory def query() = { logger.info("query mysql") try { Class.forName(driverName) conn = DriverManager.getConnection(jdbcUrl, username, password) ps = conn.prepareStatement("select id,name from venn.timer") val rs = ps.executeQuery while (!rs.isClosed && rs.next) { val id = rs.getString(1) val name = rs.getString(2) map.put(id, name) } logger.info("get config from db size : {}", map.size()) } catch { case e@(_: ClassNotFoundException | _: SQLException) => e.printStackTrace() } finally { if (conn != null) { conn.close() } } } }) // .print() val sink = new FlinkKafkaProducer[String]("timer_out" , new SimpleStringSchema() , Common.getProp) stream.addSink(sink) env.execute(this.getClass.getName) } }
Example 18
Source File: DataService.scala From kafka-jdbc-connector with Apache License 2.0 | 5 votes |
package com.agoda.kafka.connector.jdbc.services import java.sql.{Connection, PreparedStatement, ResultSet} import com.agoda.kafka.connector.jdbc.utils.DataConverter import org.apache.kafka.connect.data.Schema import org.apache.kafka.connect.source.SourceRecord import scala.concurrent.duration.Duration import scala.util.Try trait DataService { def getRecords(connection: Connection, timeout: Duration): Try[Seq[SourceRecord]] = { for { preparedStatement <- createPreparedStatement(connection) resultSet <- executeStoredProcedure(preparedStatement, timeout) schema <- dataConverter.convertSchema(storedProcedureName, resultSet.getMetaData) records <- extractRecords(resultSet, schema) } yield records } protected def createPreparedStatement(connection: Connection): Try[PreparedStatement] protected def extractRecords(resultSet: ResultSet, schema: Schema): Try[Seq[SourceRecord]] private def executeStoredProcedure(preparedStatement: PreparedStatement, timeout: Duration): Try[ResultSet] = Try { preparedStatement.setQueryTimeout(timeout.toSeconds.toInt) preparedStatement.executeQuery } }
Example 19
Source File: JdbcUtil.scala From bahir with Apache License 2.0 | 5 votes |
package org.apache.bahir.sql.streaming.jdbc import java.sql.{Connection, PreparedStatement} import java.util.Locale import org.apache.spark.sql.Row import org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils import org.apache.spark.sql.jdbc.{JdbcDialect, JdbcType} import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.UTF8String object JdbcUtil { def getJdbcType(dt: DataType, dialect: JdbcDialect): JdbcType = { dialect.getJDBCType(dt).orElse(JdbcUtils.getCommonJDBCType(dt)).getOrElse( throw new IllegalArgumentException(s"Can't get JDBC type for ${dt.simpleString}")) } // A `JDBCValueSetter` is responsible for setting a value from `Row` into a field for // `PreparedStatement`. The last argument `Int` means the index for the value to be set // in the SQL statement and also used for the value in `Row`. type JDBCValueSetter = (PreparedStatement, Row, Int) => Unit def makeSetter( conn: Connection, dialect: JdbcDialect, dataType: DataType): JDBCValueSetter = dataType match { case IntegerType => (stmt: PreparedStatement, row: Row, pos: Int) => stmt.setInt(pos + 1, row.getInt(pos)) case LongType => (stmt: PreparedStatement, row: Row, pos: Int) => stmt.setLong(pos + 1, row.getLong(pos)) case DoubleType => (stmt: PreparedStatement, row: Row, pos: Int) => stmt.setDouble(pos + 1, row.getDouble(pos)) case FloatType => (stmt: PreparedStatement, row: Row, pos: Int) => stmt.setFloat(pos + 1, row.getFloat(pos)) case ShortType => (stmt: PreparedStatement, row: Row, pos: Int) => stmt.setInt(pos + 1, row.getShort(pos)) case ByteType => (stmt: PreparedStatement, row: Row, pos: Int) => stmt.setInt(pos + 1, row.getByte(pos)) case BooleanType => (stmt: PreparedStatement, row: Row, pos: Int) => stmt.setBoolean(pos + 1, row.getBoolean(pos)) case StringType => (stmt: PreparedStatement, row: Row, pos: Int) => val strValue = row.get(pos) match { case str: UTF8String => str.toString case str: String => str } stmt.setString(pos + 1, strValue) case BinaryType => (stmt: PreparedStatement, row: Row, pos: Int) => stmt.setBytes(pos + 1, row.getAs[Array[Byte]](pos)) case TimestampType => (stmt: PreparedStatement, row: Row, pos: Int) => stmt.setTimestamp(pos + 1, row.getAs[java.sql.Timestamp](pos)) case DateType => (stmt: PreparedStatement, row: Row, pos: Int) => stmt.setDate(pos + 1, row.getAs[java.sql.Date](pos)) case t: DecimalType => (stmt: PreparedStatement, row: Row, pos: Int) => stmt.setBigDecimal(pos + 1, row.getDecimal(pos)) case ArrayType(et, _) => // remove type length parameters from end of type name val typeName = getJdbcType(et, dialect).databaseTypeDefinition .toLowerCase(Locale.ROOT).split("\\(")(0) (stmt: PreparedStatement, row: Row, pos: Int) => val array = conn.createArrayOf( typeName, row.getSeq[AnyRef](pos).toArray) stmt.setArray(pos + 1, array) case _ => (_: PreparedStatement, _: Row, pos: Int) => throw new IllegalArgumentException( s"Can't translate non-null value for field $pos") } }
Example 20
Source File: DaoComponent.scala From Scala-Design-Patterns-Second-Edition with MIT License | 5 votes |
package com.ivan.nikolov.cake import java.sql.{ResultSet, PreparedStatement} import com.ivan.nikolov.cake.model.{Person, Class} trait DaoComponent { this: DatabaseComponent => val dao: Dao class Dao() { def getPeople: List[Person] = { val connection = databaseService.getConnection try { executeSelect( connection.prepareStatement("SELECT id, name, age FROM people") ) { rs => readResultSet(rs) { row => Person(row.getInt(1), row.getString(2), row.getInt(3)) } } } finally { connection.close() } } def getClasses: List[Class] = { val connection = databaseService.getConnection try { executeSelect( connection.prepareStatement("SELECT id, name FROM classes") ) { rs => readResultSet(rs) { row => Class(row.getInt(1), row.getString(2)) } } } finally { connection.close() } } def getPeopleInClass(className: String): List[Person] = { val connection = databaseService.getConnection try { val statement = connection.prepareStatement( """ |SELECT p.id, p.name, p.age |FROM people p | JOIN people_classes pc ON p.id = pc.person_id | JOIN classes c ON c.id = pc.class_id |WHERE c.name = ? """.stripMargin ) statement.setString(1, className) executeSelect( statement ) { rs => readResultSet(rs) { row => Person(row.getInt(1), row.getString(2), row.getInt(3)) } } } finally { connection.close() } } private def executeSelect[T](preparedStatement: PreparedStatement)(f: (ResultSet) => List[T]): List[T] = try { f(preparedStatement.executeQuery()) } finally { preparedStatement.close() } private def readResultSet[T](rs: ResultSet)(f: ResultSet => T): List[T] = Iterator.continually((rs.next(), rs)).takeWhile(_._1).map { case (_, row) => f(rs) }.toList } }
Example 21
Source File: DaoComponent.scala From Scala-Design-Patterns-Second-Edition with MIT License | 5 votes |
package com.ivan.nikolov.cake import java.sql.{ResultSet, PreparedStatement} import com.ivan.nikolov.cake.model.{Person, Class} trait DaoComponent { this: DatabaseComponent => val dao: Dao class Dao() { def getPeople: List[Person] = { val connection = databaseService.getConnection try { executeSelect( connection.prepareStatement("SELECT id, name, age FROM people") ) { rs => readResultSet(rs) { row => Person(row.getInt(1), row.getString(2), row.getInt(3)) } } } finally { connection.close() } } def getClasses: List[Class] = { val connection = databaseService.getConnection try { executeSelect( connection.prepareStatement("SELECT id, name FROM classes") ) { rs => readResultSet(rs) { row => Class(row.getInt(1), row.getString(2)) } } } finally { connection.close() } } def getPeopleInClass(className: String): List[Person] = { val connection = databaseService.getConnection try { val statement = connection.prepareStatement( """ |SELECT p.id, p.name, p.age |FROM people p | JOIN people_classes pc ON p.id = pc.person_id | JOIN classes c ON c.id = pc.class_id |WHERE c.name = ? """.stripMargin ) statement.setString(1, className) executeSelect( statement ) { rs => readResultSet(rs) { row => Person(row.getInt(1), row.getString(2), row.getInt(3)) } } } finally { connection.close() } } private def executeSelect[T](preparedStatement: PreparedStatement)(f: (ResultSet) => List[T]): List[T] = try { f(preparedStatement.executeQuery()) } finally { preparedStatement.close() } private def readResultSet[T](rs: ResultSet)(f: ResultSet => T): List[T] = Iterator.continually((rs.next(), rs)).takeWhile(_._1).map { case (_, row) => f(rs) }.toList } }
Example 22
Source File: DaoServiceComponent.scala From Scala-Design-Patterns-Second-Edition with MIT License | 5 votes |
package com.ivan.nikolov.scheduler.dao import java.sql.{Connection, ResultSet, PreparedStatement} trait DaoService { def getConnection(): Connection def executeSelect[T](preparedStatement: PreparedStatement)(f: (ResultSet) => List[T]): List[T] = try { f(preparedStatement.executeQuery()) } finally { preparedStatement.close() } def readResultSet[T](rs: ResultSet)(f: ResultSet => T): List[T] = Iterator.continually((rs.next(), rs)).takeWhile(_._1).map { case (_, row) => f(rs) }.toList } trait DaoServiceComponent { this: DatabaseServiceComponent => val daoService: DaoService class DaoServiceImpl extends DaoService { override def getConnection(): Connection = databaseService.getConnection } }
Example 23
Source File: DaoServiceComponent.scala From Scala-Design-Patterns-Second-Edition with MIT License | 5 votes |
package com.ivan.nikolov.scheduler.dao import java.sql.{Connection, ResultSet, PreparedStatement} trait DaoService { def getConnection(): Connection def executeSelect[T](preparedStatement: PreparedStatement)(f: (ResultSet) => List[T]): List[T] = try { f(preparedStatement.executeQuery()) } finally { preparedStatement.close() } def readResultSet[T](rs: ResultSet)(f: ResultSet => T): List[T] = Iterator.continually((rs.next(), rs)).takeWhile(_._1).map { case (_, row) => f(rs) }.toList } trait DaoServiceComponent { this: DatabaseServiceComponent => val daoService: DaoService class DaoServiceImpl extends DaoService { override def getConnection(): Connection = databaseService.getConnection } }
Example 24
Source File: MemsqlRDD.scala From memsql-spark-connector with Apache License 2.0 | 5 votes |
package com.memsql.spark import java.sql.{Connection, PreparedStatement, ResultSet} import com.memsql.spark.SQLGen.VariableList import org.apache.spark.rdd.RDD import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcUtils} import org.apache.spark.sql.types._ import org.apache.spark.{InterruptibleIterator, Partition, SparkContext, TaskContext} case class MemsqlRDD(query: String, variables: VariableList, options: MemsqlOptions, schema: StructType, expectedOutput: Seq[Attribute], @transient val sc: SparkContext) extends RDD[Row](sc, Nil) { override protected def getPartitions: Array[Partition] = MemsqlQueryHelpers.GetPartitions(options, query, variables) override def compute(rawPartition: Partition, context: TaskContext): Iterator[Row] = { var closed = false var rs: ResultSet = null var stmt: PreparedStatement = null var conn: Connection = null var partition: MemsqlPartition = rawPartition.asInstanceOf[MemsqlPartition] def tryClose(name: String, what: AutoCloseable): Unit = { try { if (what != null) { what.close() } } catch { case e: Exception => logWarning(s"Exception closing $name", e) } } def close(): Unit = { if (closed) { return } tryClose("resultset", rs) tryClose("statement", stmt) tryClose("connection", conn) closed = true } context.addTaskCompletionListener { context => close() } conn = JdbcUtils.createConnectionFactory(partition.connectionInfo)() stmt = conn.prepareStatement(partition.query) JdbcHelpers.fillStatement(stmt, partition.variables) rs = stmt.executeQuery() var rowsIter = JdbcUtils.resultSetToRows(rs, schema) if (expectedOutput.nonEmpty) { val schemaDatatypes = schema.map(_.dataType) val expectedDatatypes = expectedOutput.map(_.dataType) if (schemaDatatypes != expectedDatatypes) { val columnEncoders = schemaDatatypes.zip(expectedDatatypes).zipWithIndex.map { case ((_: StringType, _: NullType), _) => ((_: Row) => null) case ((_: ShortType, _: BooleanType), i) => ((r: Row) => r.getShort(i) != 0) case ((_: IntegerType, _: BooleanType), i) => ((r: Row) => r.getInt(i) != 0) case ((_: LongType, _: BooleanType), i) => ((r: Row) => r.getLong(i) != 0) case ((l, r), i) => { options.assert(l == r, s"MemsqlRDD: unable to encode ${l} into ${r}") ((r: Row) => r.get(i)) } } rowsIter = rowsIter .map(row => Row.fromSeq(columnEncoders.map(_(row)))) } } CompletionIterator[Row, Iterator[Row]](new InterruptibleIterator[Row](context, rowsIter), close) } }
Example 25
Source File: SqlStatement.scala From gatling-sql with Apache License 2.0 | 5 votes |
package io.github.gatling.sql import java.sql.{Connection, PreparedStatement} import com.typesafe.scalalogging.StrictLogging import io.github.gatling.sql.db.ConnectionPool import io.gatling.commons.validation.Validation import io.gatling.core.session.{Expression, Session} import io.gatling.commons.validation._ trait SqlStatement extends StrictLogging { def apply(session:Session): Validation[PreparedStatement] def connection = ConnectionPool.connection } case class SimpleSqlStatement(statement: Expression[String]) extends SqlStatement { def apply(session: Session): Validation[PreparedStatement] = statement(session).flatMap { stmt => logger.debug(s"STMT: ${stmt}") connection.prepareStatement(stmt).success } }
Example 26
Source File: SqlActionBuilder.scala From gatling-sql with Apache License 2.0 | 5 votes |
package io.github.gatling.sql.action import io.github.gatling.sql.protocol.SqlProtocol import io.github.gatling.sql.request.SqlAttributes import io.gatling.commons.stats.{KO, OK} import io.gatling.commons.util.ClockSingleton.nowMillis import io.gatling.commons.validation.Validation import io.gatling.core.action.builder.ActionBuilder import io.gatling.core.action.{Action, ExitableAction} import io.gatling.core.protocol.ProtocolComponentsRegistry import io.gatling.core.session.Session import io.gatling.core.stats.StatsEngine import io.gatling.core.stats.message.ResponseTimings import io.gatling.core.structure.ScenarioContext import io.gatling.core.util.NameGen import java.sql.PreparedStatement import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future class SqlActionBuilder(attr: SqlAttributes) extends ActionBuilder with NameGen { private def components(protocolComponentsRegistry: ProtocolComponentsRegistry) = protocolComponentsRegistry.components(SqlProtocol.SqlProtocolKey) override def build(ctx: ScenarioContext, next: Action): Action = { import ctx._ val statsEngine = coreComponents.statsEngine val sqlComponents = components(protocolComponentsRegistry) new SqlAction(genName(s"SQL: ${attr.tag}"), sqlComponents.sqlProtocol, statsEngine, next, attr) } } class SqlAction(val name: String, protocol: SqlProtocol, val statsEngine: StatsEngine, val next: Action, val attr: SqlAttributes) extends ExitableAction { def execute(session: Session): Unit = { val stmt: Validation[PreparedStatement] = attr.statement(session) stmt.onFailure(err => { statsEngine.logResponse(session, name, ResponseTimings(nowMillis, nowMillis), KO, None, Some("Error setting up statement: " + err), Nil) next ! session.markAsFailed }) stmt.onSuccess({ stmt => val start = nowMillis val result = Future { stmt.execute() } result.onFailure { case t => statsEngine.reportUnbuildableRequest(session, name, t.getMessage) } result.onSuccess { case result => val requestEndDate = nowMillis statsEngine.logResponse( session, name, ResponseTimings(startTimestamp = start, endTimestamp = requestEndDate), if (result) OK else KO, None, if (result) None else Some("Failed... TBD") ) next ! session.markAsSucceeded } }) } }
Example 27
Source File: ConnectionPool.scala From airframe with Apache License 2.0 | 5 votes |
package wvlet.airframe.jdbc import java.sql.{Connection, PreparedStatement, ResultSet} import wvlet.log.LogSupport import wvlet.log.io.IOUtil.withResource object ConnectionPool { def apply(config: DbConfig): ConnectionPool = { val pool: ConnectionPool = config.`type` match { case "sqlite" => new SQLiteConnectionPool(config) case other => new GenericConnectionPool(config) } pool } def newFactory: ConnectionPoolFactory = new ConnectionPoolFactory() } trait ConnectionPool extends LogSupport with AutoCloseable { def config: DbConfig def withConnection[U](body: Connection => U): U def withTransaction[U](body: Connection => U): U = { withConnection { conn => conn.setAutoCommit(false) var failed = false try { body(conn) } catch { case e: Throwable => // Need to set the failed flag first because the rollback might fail failed = true conn.rollback() throw e } finally { if (failed == false) { conn.commit() } } } } def stop: Unit override def close(): Unit = stop def executeQuery[U](sql: String)(handler: ResultSet => U): U = { withConnection { conn => withResource(conn.createStatement()) { stmt => debug(s"execute query: ${sql}") withResource(stmt.executeQuery(sql)) { rs => handler(rs) } } } } def executeUpdate(sql: String): Int = { // TODO Add update retry withConnection { conn => withResource(conn.createStatement()) { stmt => debug(s"execute update: ${sql}") stmt.executeUpdate(sql) } } } def queryWith[U](preparedStatement: String)(body: PreparedStatement => Unit)(handler: ResultSet => U): U = { withConnection { conn => withResource(conn.prepareStatement(preparedStatement)) { stmt => body(stmt) debug(s"execute query: ${preparedStatement}") withResource(stmt.executeQuery) { rs => handler(rs) } } } } def updateWith(preparedStatement: String)(body: PreparedStatement => Unit): Unit = { withConnection { conn => withResource(conn.prepareStatement(preparedStatement)) { stmt => body(stmt) stmt.executeUpdate() } } } }
Example 28
Source File: JdbcPublisher.scala From eel-sdk with Apache License 2.0 | 5 votes |
package io.eels.component.jdbc import java.sql.{Connection, PreparedStatement} import java.util.concurrent.atomic.AtomicBoolean import com.sksamuel.exts.io.Using import com.sksamuel.exts.metrics.Timed import io.eels.Row import io.eels.component.jdbc.dialect.JdbcDialect import io.eels.datastream.{Publisher, Subscriber, Subscription} import scala.collection.mutable.ArrayBuffer class JdbcPublisher(connFn: () => Connection, query: String, bindFn: (PreparedStatement) => Unit, fetchSize: Int, dialect: JdbcDialect ) extends Publisher[Seq[Row]] with Timed with JdbcPrimitives with Using { override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = { try { using(connFn()) { conn => logger.debug(s"Preparing query $query") using(conn.prepareStatement(query)) { stmt => stmt.setFetchSize(fetchSize) bindFn(stmt) logger.debug(s"Executing query $query") using(stmt.executeQuery()) { rs => val schema = schemaFor(dialect, rs) val running = new AtomicBoolean(true) subscriber.subscribed(Subscription.fromRunning(running)) val buffer = new ArrayBuffer[Row](fetchSize) while (rs.next && running.get) { val values = schema.fieldNames().map { name => val raw = rs.getObject(name) dialect.sanitize(raw) } buffer append Row(schema, values) if (buffer.size == fetchSize) { subscriber.next(buffer.toVector) buffer.clear() } } if (buffer.nonEmpty) subscriber.next(buffer.toVector) subscriber.completed() } } } } catch { case t: Throwable => subscriber.error(t) } } }
Example 29
Source File: BucketPartitionStrategy.scala From eel-sdk with Apache License 2.0 | 5 votes |
package io.eels.component.jdbc import java.sql.{Connection, PreparedStatement} import io.eels.Row import io.eels.component.jdbc.dialect.JdbcDialect import io.eels.datastream.Publisher case class BucketPartitionStrategy(columnName: String, values: Set[String]) extends JdbcPartitionStrategy { override def parts(connFn: () => Connection, query: String, bindFn: (PreparedStatement) => Unit, fetchSize: Int, dialect: JdbcDialect): Seq[Publisher[Seq[Row]]] = { values.map { value => val partitionedQuery = s""" SELECT * FROM ( $query ) WHERE $columnName = '$value' """ new JdbcPublisher(connFn, partitionedQuery, bindFn, fetchSize, dialect) }.toSeq } }
Example 30
Source File: JdbcSource.scala From eel-sdk with Apache License 2.0 | 5 votes |
package io.eels.component.jdbc import java.sql.{Connection, DriverManager, PreparedStatement} import com.sksamuel.exts.Logging import com.sksamuel.exts.io.Using import com.sksamuel.exts.metrics.Timed import io.eels.{Row, Source} import io.eels.component.jdbc.dialect.{GenericJdbcDialect, JdbcDialect} import io.eels.datastream.Publisher import io.eels.schema.StructType object JdbcSource { def apply(url: String, query: String): JdbcSource = JdbcSource(() => DriverManager.getConnection(url), query) } case class JdbcSource(connFn: () => Connection, query: String, bindFn: (PreparedStatement) => Unit = stmt => (), fetchSize: Int = 200, providedSchema: Option[StructType] = None, providedDialect: Option[JdbcDialect] = None, partitionStrategy: JdbcPartitionStrategy = SinglePartitionStrategy) extends Source with JdbcPrimitives with Logging with Using with Timed { override lazy val schema: StructType = providedSchema.getOrElse(fetchSchema()) def withBind(bind: (PreparedStatement) => Unit): JdbcSource = copy(bindFn = bind) def withFetchSize(fetchSize: Int): JdbcSource = copy(fetchSize = fetchSize) def withProvidedSchema(schema: StructType): JdbcSource = copy(providedSchema = Option(schema)) def withProvidedDialect(dialect: JdbcDialect): JdbcSource = copy(providedDialect = Option(dialect)) def withPartitionStrategy(strategy: JdbcPartitionStrategy): JdbcSource = copy(partitionStrategy = strategy) private def dialect(): JdbcDialect = providedDialect.getOrElse(new GenericJdbcDialect()) override def parts(): Seq[Publisher[Seq[Row]]] = partitionStrategy.parts(connFn, query, bindFn, fetchSize, dialect()) def fetchSchema(): StructType = { using(connFn()) { conn => val schemaQuery = s"SELECT * FROM ($query) tmp WHERE 1=0" using(conn.prepareStatement(schemaQuery)) { stmt => stmt.setFetchSize(fetchSize) bindFn(stmt) val rs = timed(s"Executing query $query") { stmt.executeQuery() } val schema = schemaFor(dialect(), rs) rs.close() schema } } } }
Example 31
Source File: HashPartitionStrategy.scala From eel-sdk with Apache License 2.0 | 5 votes |
package io.eels.component.jdbc import java.sql.{Connection, PreparedStatement} import io.eels.Row import io.eels.component.jdbc.dialect.JdbcDialect import io.eels.datastream.Publisher case class HashPartitionStrategy(hashExpression: String, numberOfPartitions: Int) extends JdbcPartitionStrategy { def partitionedQuery(partNum: Int, query: String): String = s"""SELECT * from ($query) WHERE $hashExpression = $partNum""".stripMargin override def parts(connFn: () => Connection, query: String, bindFn: (PreparedStatement) => Unit, fetchSize: Int, dialect: JdbcDialect): Seq[Publisher[Seq[Row]]] = { for (k <- 0 until numberOfPartitions) yield { new JdbcPublisher(connFn, partitionedQuery(k, query), bindFn, fetchSize, dialect) } } }
Example 32
Source File: RangePartitionStrategy.scala From eel-sdk with Apache License 2.0 | 5 votes |
package io.eels.component.jdbc import java.sql.{Connection, PreparedStatement} import io.eels.Row import io.eels.component.jdbc.dialect.JdbcDialect import io.eels.datastream.Publisher case class RangePartitionStrategy(columnName: String, numberOfPartitions: Int, min: Long, max: Long) extends JdbcPartitionStrategy { def ranges: Seq[(Long, Long)] = { // distribute surplus as evenly as possible across buckets // min max + 1 because the min-max range is inclusive val surplus = (max - min + 1) % numberOfPartitions val gap = (max - min + 1) / numberOfPartitions List.tabulate(numberOfPartitions) { k => val start = min + k * gap + Math.min(k, surplus) val end = min + ((k + 1) * gap) + Math.min(k + 1, surplus) (start, end - 1) } } override def parts(connFn: () => Connection, query: String, bindFn: (PreparedStatement) => Unit, fetchSize: Int, dialect: JdbcDialect): Seq[Publisher[Seq[Row]]] = { ranges.map { case (start, end) => val partitionedQuery = s"""SELECT * FROM ( $query ) WHERE $start <= $columnName AND $columnName <= $end""" new JdbcPublisher(connFn, partitionedQuery, bindFn, fetchSize, dialect) } } }
Example 33
Source File: BucketExprPartitionStrategy.scala From eel-sdk with Apache License 2.0 | 5 votes |
package io.eels.component.jdbc import java.sql.{Connection, PreparedStatement} import io.eels.Row import io.eels.component.jdbc.dialect.JdbcDialect import io.eels.datastream.Publisher case class BucketExprPartitionStrategy(bucketExpressions: Seq[String]) extends JdbcPartitionStrategy { override def parts(connFn: () => Connection, query: String, bindFn: (PreparedStatement) => Unit, fetchSize: Int, dialect: JdbcDialect): Seq[Publisher[Seq[Row]]] = { bucketExpressions.map { bucketExpression => val partitionedQuery = s""" SELECT * FROM ( $query ) WHERE $bucketExpression """ new JdbcPublisher(connFn, partitionedQuery, bindFn, fetchSize, dialect) } } }