org.apache.kafka.common.config.ConfigDef Scala Examples
The following examples show how to use org.apache.kafka.common.config.ConfigDef.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: HiveSinkConfigDef.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.landoop.streamreactor.connect.hive.sink.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object HiveSinkConfigDef { import SinkConfigSettings._ val config: ConfigDef = new ConfigDef() .define(PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, PROGRESS_COUNTER_ENABLED_DISPLAY) .define(KcqlKey, Type.STRING, Importance.HIGH, KCQL_DOC) .define(DatabaseNameKey, Type.STRING, Importance.HIGH, DatabaseNameDoc) .define(MetastoreTypeKey, Type.STRING, Importance.HIGH, MetastoreTypeDoc) .define(MetastoreUrisKey, Type.STRING, Importance.HIGH, MetastoreUrisDoc) .define(FsDefaultKey, Type.STRING, Importance.HIGH, FsDefaultDoc) //config folders .define(HdfsConfigDirKey, Type.STRING, HdfsConfigDirDefault, Importance.MEDIUM, HdfsConfigDirDoc, "Configs", 1, ConfigDef.Width.MEDIUM, HdfsConfigDirDisplay) .define(HiveConfigDirKey, Type.STRING, HiveConfigDirDefault, Importance.MEDIUM, HiveConfigDirDoc, "Configs", 2, ConfigDef.Width.MEDIUM, HiveConfigDirDisplay) //security .define(KerberosKey, Type.BOOLEAN, KerberosDefault, Importance.MEDIUM, KerberosDoc, "Security", 1, ConfigDef.Width.MEDIUM, KerberosDisplay) .define(KerberosAuthModeKey, Type.STRING, KerberosAuthModeDefault, Importance.MEDIUM, KerberosAuthModeDoc, "Security", 2, ConfigDef.Width.MEDIUM, KerberosAuthModeDisplay) //keytab .define(PrincipalKey, Type.STRING, PrincipalDefault, Importance.MEDIUM, PrincipalDoc, "Kerberos Keytab", 1, ConfigDef.Width.MEDIUM, PrincipalDisplay) .define(KerberosKeyTabKey, Type.STRING, KerberosKeyTabDefault, Importance.MEDIUM, KerberosKeyTabDoc, "Kerberos Keytab", 2, ConfigDef.Width.MEDIUM, KerberosKeyTabDisplay) .define(NameNodePrincipalKey, Type.STRING, NameNodePrincipalDefault, Importance.MEDIUM, NameNodePrincipalDoc, "Kerberos Keytab", 3, ConfigDef.Width.MEDIUM, NameNodePrincipalDisplay) .define(KerberosTicketRenewalKey, Type.LONG, KerberosTicketRenewalDefault, Importance.MEDIUM, KerberosTicketRenewalDoc, "Kerberos Keytab", 4, ConfigDef.Width.MEDIUM, KerberosTicketRenewalDisplay) //user password .define(KerberosUserKey, Type.STRING, KerberosUserDefault, Importance.MEDIUM, KerberosUserDoc, "Kerberos User Password", 1, ConfigDef.Width.MEDIUM, KerberosUserDisplay) .define(KerberosPasswordKey, Type.PASSWORD, KerberosPasswordDefault, Importance.MEDIUM, KerberosPasswordDoc, "Kerberos User Password", 2, ConfigDef.Width.MEDIUM, KerberosPasswordDisplay) .define(KerberosKrb5Key, Type.STRING, KerberosKrb5Default, Importance.MEDIUM, KerberosKrb5Doc, "Kerberos User Password", 3, ConfigDef.Width.MEDIUM, KerberosKrb5Display) .define(KerberosJaasKey, Type.STRING, KerberosJaasDefault, Importance.MEDIUM, KerberosJaasDoc, "Kerberos User Password", 4, ConfigDef.Width.MEDIUM, KerberosJaasDisplay) .define(JaasEntryNameKey, Type.STRING, JaasEntryNameDefault, Importance.MEDIUM, JaasEntryNameDoc, "Kerberos User Password", 5, ConfigDef.Width.MEDIUM, JaasEntryNameDisplay) } case class HiveSinkConfigDefBuilder(props: util.Map[String, String]) extends BaseConfig(SinkConfigSettings.CONNECTOR_PREFIX, HiveSinkConfigDef.config, props) with KcqlSettings with ErrorPolicySettings with NumberRetriesSettings with UserSettings with ConnectionSettings
Example 2
Source File: ReThinkSinkConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.rethink.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object ReThinkSinkConfig extends ReThinkConfig { val config: ConfigDef = baseConfig .define(ReThinkConfigConstants.ERROR_POLICY, Type.STRING, ReThinkConfigConstants.ERROR_POLICY_DEFAULT, Importance.HIGH, ReThinkConfigConstants.ERROR_POLICY_DOC, "Connection", 9, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.ERROR_POLICY) .define(ReThinkConfigConstants.ERROR_RETRY_INTERVAL, Type.INT, ReThinkConfigConstants.ERROR_RETRY_INTERVAL_DEFAULT, Importance.MEDIUM, ReThinkConfigConstants.ERROR_RETRY_INTERVAL_DOC, "Connection", 10, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.ERROR_RETRY_INTERVAL) .define(ReThinkConfigConstants.NBR_OF_RETRIES, Type.INT, ReThinkConfigConstants.NBR_OF_RETIRES_DEFAULT, Importance.MEDIUM, ReThinkConfigConstants.NBR_OF_RETRIES_DOC, "Connection", 11, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.NBR_OF_RETRIES) } case class ReThinkSinkConfig(props: util.Map[String, String]) extends BaseConfig(ReThinkConfigConstants.RETHINK_CONNECTOR_PREFIX, ReThinkSinkConfig.config, props) with ErrorPolicySettings with NumberRetriesSettings with KcqlSettings with DatabaseSettings with RetryIntervalSettings
Example 3
Source File: ReThinkConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.rethink.config import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} trait ReThinkConfig { val baseConfig: ConfigDef = new ConfigDef() .define(ReThinkConfigConstants.RETHINK_HOST, Type.STRING, ReThinkConfigConstants.RETHINK_HOST_DEFAULT, Importance.HIGH, ReThinkConfigConstants.RETHINK_HOST_DOC, "Connection", 1, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.RETHINK_HOST) .define(ReThinkConfigConstants.RETHINK_DB, Type.STRING, ReThinkConfigConstants.RETHINK_DB_DEFAULT, Importance.HIGH, ReThinkConfigConstants.RETHINK_DB_DOC, "Connection", 2, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.RETHINK_DB) .define(ReThinkConfigConstants.RETHINK_PORT, Type.INT, ReThinkConfigConstants.RETHINK_PORT_DEFAULT, Importance.MEDIUM, ReThinkConfigConstants.RETHINK_PORT_DOC, "Connection", 3, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.RETHINK_PORT) .define(ReThinkConfigConstants.KCQL, Type.STRING, Importance.HIGH, ReThinkConfigConstants.KCQL_DOC, "Connection", 4, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.KCQL) .define(ReThinkConfigConstants.USERNAME, Type.STRING, ReThinkConfigConstants.USERNAME_DEFAULT, Importance.MEDIUM, ReThinkConfigConstants.USERNAME_DOC, "Connection", 5, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.USERNAME) .define(ReThinkConfigConstants.PASSWORD, Type.PASSWORD, ReThinkConfigConstants.PASSWORD_DEFAULT, Importance.MEDIUM, ReThinkConfigConstants.PASSWORD_DOC, "Connection", 6, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.PASSWORD) .define(ReThinkConfigConstants.AUTH_KEY, Type.PASSWORD, ReThinkConfigConstants.AUTH_KEY_DEFAULT, Importance.MEDIUM, ReThinkConfigConstants.AUTH_KEY_DOC, "Connection", 7, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.AUTH_KEY) .define(ReThinkConfigConstants.CERT_FILE, Type.STRING, ReThinkConfigConstants.CERT_FILE_DEFAULT, Importance.MEDIUM, ReThinkConfigConstants.CERT_FILE_DOC, "Connection", 8, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.CERT_FILE) .define(ReThinkConfigConstants.PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, ReThinkConfigConstants.PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, ReThinkConfigConstants.PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.PROGRESS_COUNTER_ENABLED_DISPLAY) }
Example 4
Source File: KuduConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.kudu.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object KuduConfig { val config: ConfigDef = new ConfigDef() .define(KuduConfigConstants.KUDU_MASTER, Type.STRING, KuduConfigConstants.KUDU_MASTER_DEFAULT, Importance.HIGH, KuduConfigConstants.KUDU_MASTER_DOC, "Connection", 1, ConfigDef.Width.MEDIUM, KuduConfigConstants.KUDU_MASTER) .define(KuduConfigConstants.KCQL, Type.STRING, Importance.HIGH, KuduConfigConstants.KCQL, "Connection", 2, ConfigDef.Width.MEDIUM, KuduConfigConstants.KCQL) .define(KuduConfigConstants.ERROR_POLICY, Type.STRING, KuduConfigConstants.ERROR_POLICY_DEFAULT, Importance.HIGH, KuduConfigConstants.ERROR_POLICY_DOC, "Connection", 3, ConfigDef.Width.MEDIUM, KuduConfigConstants.ERROR_POLICY) .define(KuduConfigConstants.ERROR_RETRY_INTERVAL, Type.INT, KuduConfigConstants.ERROR_RETRY_INTERVAL_DEFAULT, Importance.MEDIUM, KuduConfigConstants.ERROR_RETRY_INTERVAL_DOC, "Connection", 4, ConfigDef.Width.MEDIUM, KuduConfigConstants.ERROR_RETRY_INTERVAL) .define(KuduConfigConstants.NBR_OF_RETRIES, Type.INT, KuduConfigConstants.NBR_OF_RETIRES_DEFAULT, Importance.MEDIUM, KuduConfigConstants.NBR_OF_RETRIES_DOC, "Connection", 5, ConfigDef.Width.MEDIUM, KuduConfigConstants.NBR_OF_RETRIES) .define(KuduConfigConstants.SCHEMA_REGISTRY_URL, Type.STRING, KuduConfigConstants.SCHEMA_REGISTRY_URL_DEFAULT, Importance.HIGH, KuduConfigConstants.SCHEMA_REGISTRY_URL_DOC, "Connection", 6, ConfigDef.Width.MEDIUM, KuduConfigConstants.SCHEMA_REGISTRY_URL) .define(KuduConfigConstants.WRITE_FLUSH_MODE, Type.STRING, KuduConfigConstants.WRITE_FLUSH_MODE_DEFAULT, Importance.MEDIUM, KuduConfigConstants.WRITE_FLUSH_MODE_DOC, "Connection", 7, ConfigDef.Width.MEDIUM, KuduConfigConstants.WRITE_FLUSH_MODE) .define(KuduConfigConstants.MUTATION_BUFFER_SPACE, Type.INT, KuduConfigConstants.MUTATION_BUFFER_SPACE_DEFAULT, Importance.MEDIUM, KuduConfigConstants.MUTATION_BUFFER_SPACE_DOC, "Connection", 8, ConfigDef.Width.MEDIUM, KuduConfigConstants.MUTATION_BUFFER_SPACE) .define(KuduConfigConstants.PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, KuduConfigConstants.PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, KuduConfigConstants.PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, KuduConfigConstants.PROGRESS_COUNTER_ENABLED_DISPLAY) } class KuduConfig(props: util.Map[String, String]) extends BaseConfig(KuduConfigConstants.CONNECTOR_PREFIX, KuduConfig.config, props) with KcqlSettings with DatabaseSettings with ErrorPolicySettings with NumberRetriesSettings with ConnectionSettings { def getWriteFlushMode() = WriteFlushMode.withName( props.getOrDefault( KuduConfigConstants.WRITE_FLUSH_MODE, KuduConfigConstants.WRITE_FLUSH_MODE_DEFAULT).toUpperCase) }
Example 5
Source File: ElasticSinkConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.elastic6 import java.util import com.datamountaineer.streamreactor.connect.config.Helpers import com.datamountaineer.streamreactor.connect.elastic6.config.{ElasticConfig, ElasticConfigConstants} import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.sink.SinkConnector import scala.collection.JavaConverters._ class ElasticSinkConnector extends SinkConnector with StrictLogging { private var configProps : Option[util.Map[String, String]] = None private val configDef = ElasticConfig.config private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation) override def start(props: util.Map[String, String]): Unit = { logger.info(s"Starting Elastic sink task.") Helpers.checkInputTopics(ElasticConfigConstants.KCQL, props.asScala.toMap) configProps = Some(props) } override def stop(): Unit = {} override def version(): String = manifest.version() override def config(): ConfigDef = configDef }
Example 6
Source File: PulsarSinkConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.pulsar.sink import java.util import com.datamountaineer.streamreactor.connect.config.Helpers import com.datamountaineer.streamreactor.connect.pulsar.config.{PulsarConfigConstants, PulsarSinkConfig} import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.sink.SinkConnector import scala.collection.JavaConverters._ class PulsarSinkConnector extends SinkConnector with StrictLogging { private val configDef = PulsarSinkConfig.config private var configProps: Option[util.Map[String, String]] = None private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation) override def start(props: util.Map[String, String]): Unit = { logger.info(s"Starting Pulsar sink connector.") Helpers.checkInputTopics(PulsarConfigConstants.KCQL_CONFIG, props.asScala.toMap) configProps = Some(props) } override def taskClass(): Class[_ <: Task] = classOf[PulsarSinkTask] override def version(): String = manifest.version() override def stop(): Unit = {} override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = { logger.info(s"Setting task configurations for $maxTasks workers.") (1 to maxTasks).map(_ => configProps.get).toList.asJava } override def config(): ConfigDef = configDef }
Example 7
Source File: PulsarConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.pulsar.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object PulsarConfig { val config: ConfigDef = new ConfigDef() .define(PulsarConfigConstants.HOSTS_CONFIG, Type.STRING, Importance.HIGH, PulsarConfigConstants.HOSTS_DOC, "Connection", 1, ConfigDef.Width.MEDIUM, PulsarConfigConstants.HOSTS_DISPLAY) //ssl .define(PulsarConfigConstants.SSL_CA_CERT_CONFIG, Type.STRING, null, Importance.MEDIUM, PulsarConfigConstants.SSL_CA_CERT_DOC, "TLS", 1, ConfigDef.Width.MEDIUM, PulsarConfigConstants.SSL_CA_CERT_DISPLAY) .define(PulsarConfigConstants.SSL_CERT_CONFIG, Type.STRING, null, Importance.MEDIUM, PulsarConfigConstants.SSL_CERT_DOC, "TLS", 2, ConfigDef.Width.MEDIUM, PulsarConfigConstants.SSL_CERT_DISPLAY) .define(PulsarConfigConstants.SSL_CERT_KEY_CONFIG, Type.STRING, null, Importance.MEDIUM, PulsarConfigConstants.SSL_CERT_KEY_DOC, "TLS", 3, ConfigDef.Width.MEDIUM, PulsarConfigConstants.SSL_CERT_KEY_DISPLAY) //kcql .define(PulsarConfigConstants.KCQL_CONFIG, Type.STRING, Importance.HIGH, PulsarConfigConstants.KCQL_DOC, "KCQL", 1, ConfigDef.Width.MEDIUM, PulsarConfigConstants.KCQL_DISPLAY) .define(PulsarConfigConstants.PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, PulsarConfigConstants.PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, PulsarConfigConstants.PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, PulsarConfigConstants.PROGRESS_COUNTER_ENABLED_DISPLAY) } object PulsarSourceConfig { val config = PulsarConfig.config //converter .define(PulsarConfigConstants.THROW_ON_CONVERT_ERRORS_CONFIG, Type.BOOLEAN, PulsarConfigConstants.THROW_ON_CONVERT_ERRORS_DEFAULT, Importance.HIGH, PulsarConfigConstants.THROW_ON_CONVERT_ERRORS_DOC, "Converter", 1, ConfigDef.Width.MEDIUM, PulsarConfigConstants.THROW_ON_CONVERT_ERRORS_DISPLAY) .define(PulsarConfigConstants.AVRO_CONVERTERS_SCHEMA_FILES, Type.STRING, PulsarConfigConstants.AVRO_CONVERTERS_SCHEMA_FILES_DEFAULT, Importance.HIGH, PulsarConfigConstants.AVRO_CONVERTERS_SCHEMA_FILES_DOC, "Converter", 3, ConfigDef.Width.MEDIUM, PulsarConfigConstants.AVRO_CONVERTERS_SCHEMA_FILES) //manager .define(PulsarConfigConstants.POLLING_TIMEOUT_CONFIG, Type.INT, PulsarConfigConstants.POLLING_TIMEOUT_DEFAULT, Importance.LOW, PulsarConfigConstants.POLLING_TIMEOUT_DOC, "Manager", 1, ConfigDef.Width.MEDIUM, PulsarConfigConstants.POLLING_TIMEOUT_DISPLAY) .define(PulsarConfigConstants.INTERNAL_BATCH_SIZE, Type.INT, PulsarConfigConstants.INTERNAL_BATCH_SIZE_DEFAULT, Importance.LOW, PulsarConfigConstants.INTERNAL_BATCH_SIZE_DOC, "Manager", 2, ConfigDef.Width.MEDIUM, PulsarConfigConstants.INTERNAL_BATCH_SIZE) } case class PulsarSourceConfig(props: util.Map[String, String]) extends BaseConfig(PulsarConfigConstants.CONNECTOR_PREFIX, PulsarSourceConfig.config, props) with PulsarConfigBase object PulsarSinkConfig { val config = PulsarConfig.config .define(PulsarConfigConstants.ERROR_POLICY, Type.STRING, PulsarConfigConstants.ERROR_POLICY_DEFAULT, Importance.HIGH, PulsarConfigConstants.ERROR_POLICY_DOC, "Connection", 9, ConfigDef.Width.MEDIUM, PulsarConfigConstants.ERROR_POLICY) .define(PulsarConfigConstants.ERROR_RETRY_INTERVAL, Type.INT, PulsarConfigConstants.ERROR_RETRY_INTERVAL_DEFAULT, Importance.MEDIUM, PulsarConfigConstants.ERROR_RETRY_INTERVAL_DOC, "Connection", 10, ConfigDef.Width.MEDIUM, PulsarConfigConstants.ERROR_RETRY_INTERVAL) .define(PulsarConfigConstants.NBR_OF_RETRIES, Type.INT, PulsarConfigConstants.NBR_OF_RETIRES_DEFAULT, Importance.MEDIUM, PulsarConfigConstants.NBR_OF_RETRIES_DOC, "Connection", 11, ConfigDef.Width.MEDIUM, PulsarConfigConstants.NBR_OF_RETRIES) } case class PulsarSinkConfig(props: util.Map[String, String]) extends BaseConfig(PulsarConfigConstants.CONNECTOR_PREFIX, PulsarSinkConfig.config, props) with PulsarConfigBase case class PulsarConfig(props: util.Map[String, String]) extends BaseConfig(PulsarConfigConstants.CONNECTOR_PREFIX, PulsarConfig.config, props) with PulsarConfigBase trait PulsarConfigBase extends KcqlSettings with NumberRetriesSettings with ErrorPolicySettings with SSLSettings with ConnectionSettings with UserSettings
Example 8
Source File: RedisConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.redis.sink.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object RedisConfig { val config: ConfigDef = new ConfigDef() .define(RedisConfigConstants.REDIS_HOST, Type.STRING, Importance.HIGH, RedisConfigConstants.REDIS_HOST_DOC, "Connection", 2, ConfigDef.Width.MEDIUM, RedisConfigConstants.REDIS_HOST) .define(RedisConfigConstants.REDIS_PORT, Type.INT, Importance.HIGH, RedisConfigConstants.REDIS_PORT_DOC, "Connection", 3, ConfigDef.Width.MEDIUM, RedisConfigConstants.REDIS_PORT) .define(RedisConfigConstants.REDIS_PASSWORD, Type.PASSWORD, null, Importance.LOW, RedisConfigConstants.REDIS_PASSWORD_DOC, "Connection", 4, ConfigDef.Width.MEDIUM, RedisConfigConstants.REDIS_PASSWORD) .define(RedisConfigConstants.REDIS_SSL_ENABLED, Type.BOOLEAN, false, Importance.LOW, RedisConfigConstants.REDIS_SSL_ENABLED_DOC, "Connection", 5, ConfigDef.Width.MEDIUM, RedisConfigConstants.REDIS_SSL_ENABLED) .define(RedisConfigConstants.KCQL_CONFIG, Type.STRING, Importance.HIGH, RedisConfigConstants.KCQL_DOC, "Connection", 1, ConfigDef.Width.MEDIUM, RedisConfigConstants.KCQL_CONFIG) .define(RedisConfigConstants.ERROR_POLICY, Type.STRING, RedisConfigConstants.ERROR_POLICY_DEFAULT, Importance.HIGH, RedisConfigConstants.ERROR_POLICY_DOC, "Connection", 5, ConfigDef.Width.MEDIUM, RedisConfigConstants.ERROR_POLICY) .define(RedisConfigConstants.ERROR_RETRY_INTERVAL, Type.INT, RedisConfigConstants.ERROR_RETRY_INTERVAL_DEFAULT, Importance.MEDIUM, RedisConfigConstants.ERROR_RETRY_INTERVAL_DOC, "Connection", 6, ConfigDef.Width.MEDIUM, RedisConfigConstants.ERROR_RETRY_INTERVAL) .define(RedisConfigConstants.NBR_OF_RETRIES, Type.INT, RedisConfigConstants.NBR_OF_RETIRES_DEFAULT, Importance.MEDIUM, RedisConfigConstants.NBR_OF_RETRIES_DOC, "Connection", 7, ConfigDef.Width.MEDIUM, RedisConfigConstants.NBR_OF_RETRIES) .define(RedisConfigConstants.PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, RedisConfigConstants.PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, RedisConfigConstants.PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, RedisConfigConstants.PROGRESS_COUNTER_ENABLED_DISPLAY) .define(RedisConfigConstants.REDIS_PK_DELIMITER, Type.STRING, RedisConfigConstants.REDIS_PK_DELIMITER_DEFAULT_VALUE, Importance.LOW, RedisConfigConstants.REDIS_PK_DELIMITER_DOC) .withClientSslSupport() } case class RedisConfig(props: util.Map[String, String]) extends BaseConfig(RedisConfigConstants.CONNECTOR_PREFIX, RedisConfig.config, props) with KcqlSettings with ErrorPolicySettings with NumberRetriesSettings with UserSettings
Example 9
Source File: MqttSinkConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.mqtt.sink import java.util import com.datamountaineer.streamreactor.connect.config.Helpers import com.datamountaineer.streamreactor.connect.mqtt.config.{MqttConfigConstants, MqttSinkConfig} import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.sink.SinkConnector import scala.collection.JavaConverters._ class MqttSinkConnector extends SinkConnector with StrictLogging { private val configDef = MqttSinkConfig.config private var configProps: Option[util.Map[String, String]] = None private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation) override def start(props: util.Map[String, String]): Unit = { logger.info(s"Starting Mqtt sink connector.") Helpers.checkInputTopics(MqttConfigConstants.KCQL_CONFIG, props.asScala.toMap) configProps = Some(props) } override def taskClass(): Class[_ <: Task] = classOf[MqttSinkTask] override def version(): String = manifest.version() override def stop(): Unit = {} override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = { logger.info(s"Setting task configurations for $maxTasks workers.") (1 to maxTasks).map(_ => configProps.get).toList.asJava } override def config(): ConfigDef = configDef }
Example 10
Source File: HiveSinkConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.landoop.streamreactor.connect.hive.sink import java.util import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.landoop.streamreactor.connect.hive.sink.config.HiveSinkConfigDef import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.sink.SinkConnector import scala.collection.JavaConverters._ class HiveSinkConnector extends SinkConnector { val logger = org.slf4j.LoggerFactory.getLogger(getClass.getName) private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation) private var props: util.Map[String, String] = _ override def version(): String = manifest.version() override def taskClass(): Class[_ <: Task] = classOf[HiveSinkTask] override def config(): ConfigDef = HiveSinkConfigDef.config override def start(props: util.Map[String, String]): Unit = { logger.info(s"Creating hive sink connector") this.props = props } override def stop(): Unit = () override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = { logger.info(s"Creating $maxTasks tasks config") List.fill(maxTasks)(props).asJava } }
Example 11
Source File: ReThinkSourceConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.rethink.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits.{BaseConfig, BatchSizeSettings, DatabaseSettings, KcqlSettings} import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object ReThinkSourceConfig extends ReThinkConfig { val config: ConfigDef = baseConfig .define(ReThinkConfigConstants.SOURCE_LINGER_MS, Type.LONG, ReThinkConfigConstants.SOURCE_LINGER_MS_DEFAULT, Importance.MEDIUM, ReThinkConfigConstants.SOURCE_LINGER_MS_DOC, "Connection", 10, ConfigDef.Width.MEDIUM, ReThinkConfigConstants.SOURCE_LINGER_MS) } case class ReThinkSourceConfig(props: util.Map[String, String]) extends BaseConfig(ReThinkConfigConstants.RETHINK_CONNECTOR_PREFIX, ReThinkSourceConfig.config, props) with KcqlSettings with DatabaseSettings with BatchSizeSettings
Example 12
Source File: HiveSourceConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.landoop.streamreactor.connect.hive.source import java.util import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.landoop.streamreactor.connect.hive.sink.config.HiveSinkConfigDef import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.source.SourceConnector import scala.collection.JavaConverters._ class HiveSourceConnector extends SourceConnector with StrictLogging { private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation) private var props: util.Map[String, String] = _ override def version(): String = manifest.version() override def taskClass(): Class[_ <: Task] = classOf[HiveSourceTask] override def config(): ConfigDef = HiveSinkConfigDef.config override def start(props: util.Map[String, String]): Unit = { logger.info(s"Creating hive sink connector") this.props = props } override def stop(): Unit = () override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = { logger.info(s"Creating $maxTasks tasks config") List.fill(maxTasks)(props).asJava } }
Example 13
Source File: HiveSourceConfigDef.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.landoop.streamreactor.connect.hive.source.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object HiveSourceConfigDef { import HiveSourceConfigConstants._ val config: ConfigDef = new ConfigDef() .define(PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, PROGRESS_COUNTER_ENABLED_DISPLAY) .define(KcqlKey, Type.STRING, Importance.HIGH, KCQL_DOC) .define(DatabaseNameKey, Type.STRING, Importance.HIGH, DatabaseNameDoc) .define(MetastoreTypeKey, Type.STRING, Importance.HIGH, MetastoreTypeDoc) .define(MetastoreUrisKey, Type.STRING, Importance.HIGH, MetastoreUrisDoc) .define(FsDefaultKey, Type.STRING, Importance.HIGH, FsDefaultDoc) .define(PollSizeKey, Type.INT, 1024, Importance.HIGH, PollSizeDoc) //config folders .define(HdfsConfigDirKey, Type.STRING, HdfsConfigDirDefault, Importance.MEDIUM, HdfsConfigDirDoc, "Configs", 1, ConfigDef.Width.MEDIUM, HdfsConfigDirDisplay) .define(HiveConfigDirKey, Type.STRING, HiveConfigDirDefault, Importance.MEDIUM, HiveConfigDirDoc, "Configs", 2, ConfigDef.Width.MEDIUM, HiveConfigDirDisplay) //security .define(KerberosKey, Type.BOOLEAN, KerberosDefault, Importance.MEDIUM, KerberosDoc, "Security", 1, ConfigDef.Width.MEDIUM, KerberosDisplay) .define(KerberosAuthModeKey, Type.STRING, KerberosAuthModeDefault, Importance.MEDIUM, KerberosAuthModeDoc, "Security", 2, ConfigDef.Width.MEDIUM, KerberosAuthModeDisplay) //keytab .define(PrincipalKey, Type.STRING, PrincipalDefault, Importance.MEDIUM, PrincipalDoc, "Kerberos Keytab", 1, ConfigDef.Width.MEDIUM, PrincipalDisplay) .define(KerberosKeyTabKey, Type.STRING, KerberosKeyTabDefault, Importance.MEDIUM, KerberosKeyTabDoc, "Kerberos Keytab", 2, ConfigDef.Width.MEDIUM, KerberosKeyTabDisplay) .define(NameNodePrincipalKey, Type.STRING, NameNodePrincipalDefault, Importance.MEDIUM, NameNodePrincipalDoc, "Kerberos Keytab", 3, ConfigDef.Width.MEDIUM, NameNodePrincipalDisplay) .define(KerberosTicketRenewalKey, Type.LONG, KerberosTicketRenewalDefault, Importance.MEDIUM, KerberosTicketRenewalDoc, "Kerberos Keytab", 4, ConfigDef.Width.MEDIUM, KerberosTicketRenewalDisplay) //user password .define(KerberosUserKey, Type.STRING, KerberosUserDefault, Importance.MEDIUM, KerberosUserDoc, "Kerberos User Password", 1, ConfigDef.Width.MEDIUM, KerberosUserDisplay) .define(KerberosPasswordKey, Type.PASSWORD, KerberosPasswordDefault, Importance.MEDIUM, KerberosPasswordDoc, "Kerberos User Password", 2, ConfigDef.Width.MEDIUM, KerberosPasswordDisplay) .define(KerberosKrb5Key, Type.STRING, KerberosKrb5Default, Importance.MEDIUM, KerberosKrb5Doc, "Kerberos User Password", 3, ConfigDef.Width.MEDIUM, KerberosKrb5Display) .define(KerberosJaasKey, Type.STRING, KerberosJaasDefault, Importance.MEDIUM, KerberosJaasDoc, "Kerberos User Password", 4, ConfigDef.Width.MEDIUM, KerberosJaasDisplay) .define(JaasEntryNameKey, Type.STRING, JaasEntryNameDefault, Importance.MEDIUM, JaasEntryNameDoc, "Kerberos User Password", 5, ConfigDef.Width.MEDIUM, JaasEntryNameDisplay) } case class HiveSourceConfigDefBuilder(props: util.Map[String, String]) extends BaseConfig(HiveSourceConfigConstants.CONNECTOR_PREFIX, HiveSourceConfigDef.config, props) with KcqlSettings with ErrorPolicySettings with NumberRetriesSettings with UserSettings with ConnectionSettings
Example 14
Source File: VoltSinkConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.voltdb.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object VoltSinkConfig { val config: ConfigDef = new ConfigDef() .define(VoltSinkConfigConstants.SERVERS_CONFIG, Type.STRING, Importance.HIGH, VoltSinkConfigConstants.SERVERS_DOC, "Connection", 1, ConfigDef.Width.MEDIUM, VoltSinkConfigConstants.SERVERS_CONFIG) .define(VoltSinkConfigConstants.USER_CONFIG, Type.STRING, Importance.HIGH, VoltSinkConfigConstants.USER_DOC, "Connection", 2, ConfigDef.Width.MEDIUM, VoltSinkConfigConstants.USER_CONFIG) .define(VoltSinkConfigConstants.PASSWORD_CONFIG, Type.PASSWORD, Importance.HIGH, VoltSinkConfigConstants.PASSWORD_DOC, "Connection", 3, ConfigDef.Width.MEDIUM, VoltSinkConfigConstants.PASSWORD_CONFIG) .define(VoltSinkConfigConstants.KCQL_CONFIG, Type.STRING, Importance.HIGH, VoltSinkConfigConstants.KCQL_CONFIG_DOC, "Connection", 4, ConfigDef.Width.MEDIUM, VoltSinkConfigConstants.KCQL_CONFIG) .define(VoltSinkConfigConstants.ERROR_POLICY_CONFIG, Type.STRING, VoltSinkConfigConstants.ERROR_POLICY_DEFAULT, Importance.HIGH, VoltSinkConfigConstants.ERROR_POLICY_DOC, "Connection", 5, ConfigDef.Width.MEDIUM, VoltSinkConfigConstants.ERROR_POLICY_CONFIG) .define(VoltSinkConfigConstants.ERROR_RETRY_INTERVAL_CONFIG, Type.INT, VoltSinkConfigConstants.ERROR_RETRY_INTERVAL_DEFAULT, Importance.MEDIUM, VoltSinkConfigConstants.ERROR_RETRY_INTERVAL_DOC, "Connection", 1, ConfigDef.Width.MEDIUM, VoltSinkConfigConstants.ERROR_RETRY_INTERVAL_CONFIG) .define(VoltSinkConfigConstants.NBR_OF_RETRIES_CONFIG, Type.INT, VoltSinkConfigConstants.NBR_OF_RETIRES_DEFAULT, Importance.MEDIUM, VoltSinkConfigConstants.NBR_OF_RETRIES_DOC, "Connection", 6, ConfigDef.Width.MEDIUM, VoltSinkConfigConstants.NBR_OF_RETRIES_CONFIG) .define(VoltSinkConfigConstants.PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, VoltSinkConfigConstants.PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, VoltSinkConfigConstants.PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, VoltSinkConfigConstants.PROGRESS_COUNTER_ENABLED_DISPLAY) } case class VoltSinkConfig(props: util.Map[String, String]) extends BaseConfig(VoltSinkConfigConstants.CONNECTOR_PREFIX, VoltSinkConfig.config, props) with KcqlSettings with UserSettings with ErrorPolicySettings with NumberRetriesSettings
Example 15
Source File: CoapSinkConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.coap.sink import java.util import com.datamountaineer.streamreactor.connect.coap.configs.{CoapConstants, CoapSinkConfig} import com.datamountaineer.streamreactor.connect.config.Helpers import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.sink.SinkConnector import scala.collection.JavaConverters._ class CoapSinkConnector extends SinkConnector with StrictLogging { private var configProps: util.Map[String, String] = _ private val configDef = CoapSinkConfig.config private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation) override def taskClass(): Class[_ <: Task] = classOf[CoapSinkTask] override def start(props: util.Map[String, String]): Unit = { Helpers.checkInputTopics(CoapConstants.COAP_KCQL, props.asScala.toMap) configProps = props } override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = { logger.info(s"Setting task configurations for $maxTasks workers.") (1 to maxTasks).map(_ => configProps).toList.asJava } override def stop(): Unit = {} override def config(): ConfigDef = configDef override def version(): String = manifest.version() }
Example 16
Source File: CoapSourceConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.coap.source import java.util import com.datamountaineer.streamreactor.connect.coap.configs.{CoapConstants, CoapSourceConfig} import com.datamountaineer.streamreactor.connect.utils.JarManifest import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.source.SourceConnector import org.apache.kafka.connect.util.ConnectorUtils import scala.collection.JavaConverters._ class CoapSourceConnector extends SourceConnector { private var configProps: util.Map[String, String] = _ private val configDef = CoapSourceConfig.config private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation) override def taskClass(): Class[_ <: Task] = classOf[CoapSourceTask] override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = { val raw = configProps.get(CoapConstants.COAP_KCQL) require(raw != null && !raw.isEmpty, s"No ${CoapConstants.COAP_KCQL} provided!") //sql1, sql2 val kcqls = raw.split(";") val groups = ConnectorUtils.groupPartitions(kcqls.toList.asJava, maxTasks).asScala //split up the kcql statement based on the number of tasks. groups .filterNot(g => g.asScala.isEmpty) .map(g => { val taskConfigs = new java.util.HashMap[String,String] taskConfigs.putAll(configProps) taskConfigs.put(CoapConstants.COAP_KCQL, g.asScala.mkString(";")) //overwrite taskConfigs.asScala.asJava }) }.asJava override def config(): ConfigDef = configDef override def start(props: util.Map[String, String]): Unit = configProps = props override def stop(): Unit = {} override def version(): String = manifest.version() }
Example 17
Source File: FtpSourceConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.ftp.source import java.util import com.datamountaineer.streamreactor.connect.ftp.source.MonitorMode.MonitorMode import org.apache.kafka.common.config.ConfigDef.{Importance, Type} import org.apache.kafka.common.config.{AbstractConfig, ConfigDef} import scala.collection.JavaConverters._ object MonitorMode extends Enumeration { type MonitorMode = Value val Tail, Update = Value } case class MonitorConfig(topic:String, path:String, mode: MonitorMode) object KeyStyle extends Enumeration { type KeyStyle = Value val String = Value(FtpSourceConfig.StringKeyStyle) val Struct = Value(FtpSourceConfig.StructKeyStyle) } object FtpProtocol extends Enumeration { type FtpProtocol = Value val FTPS, FTP = Value } import com.datamountaineer.streamreactor.connect.ftp.source.KeyStyle._ object FtpSourceConfig { val Address = "connect.ftp.address" val User = "connect.ftp.user" val Password = "connect.ftp.password" val FtpTimeout = "connect.ftp.timeout" val MaxBackoff = "connect.ftp.max.backoff" val RefreshRate = "connect.ftp.refresh" val MonitorTail = "connect.ftp.monitor.tail" val MonitorUpdate = "connect.ftp.monitor.update" val MonitorSliceSize = "connect.ftp.monitor.slicesize" val FileMaxAge = "connect.ftp.file.maxage" val KeyStyle = "connect.ftp.keystyle" val StringKeyStyle = "string" val StructKeyStyle = "struct" val FileConverter = "connect.ftp.fileconverter" val SourceRecordConverter = "connect.ftp.sourcerecordconverter" val FtpMaxPollRecords = "connect.ftp.max.poll.records" val protocol = "connect.ftp.protocol" val fileFilter = "connect.ftp.filter" val definition: ConfigDef = new ConfigDef() .define(Address, Type.STRING, Importance.HIGH, "ftp address[:port]") .define(User, Type.STRING, Importance.HIGH, "ftp user name to login") .define(Password, Type.PASSWORD, Importance.HIGH, "ftp password to login") .define(RefreshRate, Type.STRING, Importance.HIGH, "how often the ftp server is polled; ISO8601 duration") .define(MaxBackoff, Type.STRING,"PT30M", Importance.HIGH, "on failure, exponentially backoff to at most this ISO8601 duration") .define(FileMaxAge, Type.STRING, Importance.HIGH, "ignore files older than this; ISO8601 duration") .define(MonitorTail, Type.LIST, "", Importance.HIGH, "comma separated lists of path:destinationtopic; tail of file is tracked") .define(MonitorUpdate, Type.LIST, "", Importance.HIGH, "comma separated lists of path:destinationtopic; whole file is tracked") .define(MonitorSliceSize, Type.INT, -1, Importance.HIGH, "slice size in bytes") .define(KeyStyle, Type.STRING, Importance.HIGH, s"what the output key is set to: `${StringKeyStyle}` => filename; `${StructKeyStyle}` => structure with filename and offset") .define(FileConverter, Type.CLASS, "com.datamountaineer.streamreactor.connect.ftp.source.SimpleFileConverter", Importance.HIGH, s"TODO") .define(SourceRecordConverter, Type.CLASS, "com.datamountaineer.streamreactor.connect.ftp.source.NopSourceRecordConverter", Importance.HIGH, s"TODO") .define(FtpMaxPollRecords, Type.INT, 10000, Importance.LOW, "Max number of records returned per poll") .define(protocol, Type.STRING, "ftp", Importance.LOW, "FTPS or FTP protocol") .define(fileFilter, Type.STRING, ".*", Importance.LOW, "Regular expression to use when selecting files for processing ignoring file which do not match") .define(FtpTimeout, Type.INT, 30000, Importance.LOW, "Ftp connection timeout in milliseconds") } // abstracts the properties away a bit class FtpSourceConfig(props: util.Map[String, String]) extends AbstractConfig(FtpSourceConfig.definition, props) { // don't leak our ugly config! def ftpMonitorConfigs(): Seq[MonitorConfig] = { lazy val topicPathRegex = "([^:]*):(.*)".r getList(FtpSourceConfig.MonitorTail).asScala.map { case topicPathRegex(path, topic) => MonitorConfig(topic, path, mode = MonitorMode.Tail) } ++ getList(FtpSourceConfig.MonitorUpdate).asScala.map { case topicPathRegex(path, topic) => MonitorConfig(topic, path, mode = MonitorMode.Update) } } def address(): (String, Option[Int]) = { lazy val hostIpRegex = "([^:]*):?([0-9]*)".r val hostIpRegex(host, port) = getString(FtpSourceConfig.Address) (host, if (port.isEmpty) None else Some(port.toInt)) } def keyStyle(): KeyStyle = KeyStyle.values.find(_.toString.toLowerCase == getString(FtpSourceConfig.KeyStyle)).get def sourceRecordConverter(): SourceRecordConverter = getConfiguredInstance(FtpSourceConfig.SourceRecordConverter, classOf[SourceRecordConverter]) def fileConverter = getClass(FtpSourceConfig.FileConverter) def timeoutMs() = getInt(FtpSourceConfig.FtpTimeout) def maxPollRecords = getInt(FtpSourceConfig.FtpMaxPollRecords) def getProtocol = FtpProtocol.withName(getString(FtpSourceConfig.protocol).toUpperCase) }
Example 18
Source File: TwitterSinkConfig.scala From kafka-tweet-producer with Apache License 2.0 | 5 votes |
package com.eneco.trading.kafka.connect.twitter import java.util import org.apache.kafka.common.config.{AbstractConfig, ConfigDef} import org.apache.kafka.common.config.ConfigDef.{Importance, Type} import org.apache.kafka.connect.sink.SinkTask object TwitterSinkConfig { val CONSUMER_KEY_CONFIG = "twitter.consumerkey" val CONSUMER_KEY_CONFIG_DOC = "Twitter account consumer key." val CONSUMER_SECRET_CONFIG = "twitter.consumersecret" val CONSUMER_SECRET_CONFIG_DOC = "Twitter account consumer secret." val TOKEN_CONFIG = "twitter.token" val TOKEN_CONFIG_DOC = "Twitter account token." val SECRET_CONFIG = "twitter.secret" val SECRET_CONFIG_DOC = "Twitter account secret." val TOPICS = SinkTask.TOPICS_CONFIG val TOPICS_DOC = "The Kafka topic to read from." val config: ConfigDef = new ConfigDef() .define(CONSUMER_KEY_CONFIG, Type.STRING, Importance.HIGH, CONSUMER_KEY_CONFIG_DOC) .define(CONSUMER_SECRET_CONFIG, Type.PASSWORD, Importance.HIGH, CONSUMER_SECRET_CONFIG_DOC) .define(TOKEN_CONFIG, Type.STRING, Importance.HIGH, TOKEN_CONFIG_DOC) .define(SECRET_CONFIG, Type.PASSWORD, Importance.HIGH, SECRET_CONFIG_DOC) .define(TOPICS, Type.LIST, Importance.HIGH, TOPICS_DOC) } class TwitterSinkConfig(props: util.Map[String, String]) extends AbstractConfig(TwitterSinkConfig.config, props) { }
Example 19
Source File: TwitterSinkConfig.scala From kafka-connect-twitter with Apache License 2.0 | 5 votes |
package com.eneco.trading.kafka.connect.twitter import java.util import org.apache.kafka.common.config.{AbstractConfig, ConfigDef} import org.apache.kafka.common.config.ConfigDef.{Importance, Type} import org.apache.kafka.connect.sink.SinkTask object TwitterSinkConfig { val CONSUMER_KEY_CONFIG = "twitter.consumerkey" val CONSUMER_KEY_CONFIG_DOC = "Twitter account consumer key." val CONSUMER_SECRET_CONFIG = "twitter.consumersecret" val CONSUMER_SECRET_CONFIG_DOC = "Twitter account consumer secret." val TOKEN_CONFIG = "twitter.token" val TOKEN_CONFIG_DOC = "Twitter account token." val SECRET_CONFIG = "twitter.secret" val SECRET_CONFIG_DOC = "Twitter account secret." val TOPICS = SinkTask.TOPICS_CONFIG val TOPICS_DOC = "The Kafka topic to read from." val config: ConfigDef = new ConfigDef() .define(CONSUMER_KEY_CONFIG, Type.STRING, Importance.HIGH, CONSUMER_KEY_CONFIG_DOC) .define(CONSUMER_SECRET_CONFIG, Type.PASSWORD, Importance.HIGH, CONSUMER_SECRET_CONFIG_DOC) .define(TOKEN_CONFIG, Type.STRING, Importance.HIGH, TOKEN_CONFIG_DOC) .define(SECRET_CONFIG, Type.PASSWORD, Importance.HIGH, SECRET_CONFIG_DOC) .define(TOPICS, Type.LIST, Importance.HIGH, TOPICS_DOC) } class TwitterSinkConfig(props: util.Map[String, String]) extends AbstractConfig(TwitterSinkConfig.config, props) { }
Example 20
Source File: JMSConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.jms.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object JMSConfig { val config: ConfigDef = new ConfigDef() .define(JMSConfigConstants.JMS_URL, Type.STRING, Importance.HIGH, JMSConfigConstants.JMS_URL_DOC, "Connection", 1, ConfigDef.Width.MEDIUM, JMSConfigConstants.JMS_URL) .define(JMSConfigConstants.INITIAL_CONTEXT_FACTORY, Type.STRING, Importance.HIGH, JMSConfigConstants.INITIAL_CONTEXT_FACTORY_DOC, "Connection", 2, ConfigDef.Width.MEDIUM, JMSConfigConstants.INITIAL_CONTEXT_FACTORY) .define(JMSConfigConstants.CONNECTION_FACTORY, Type.STRING, JMSConfigConstants.CONNECTION_FACTORY_DEFAULT, Importance.HIGH, JMSConfigConstants.CONNECTION_FACTORY_DOC, "Connection", 3, ConfigDef.Width.MEDIUM, JMSConfigConstants.CONNECTION_FACTORY) .define(JMSConfigConstants.KCQL, Type.STRING, Importance.HIGH, JMSConfigConstants.KCQL, "Connection", 4, ConfigDef.Width.MEDIUM, JMSConfigConstants.KCQL) .define(JMSConfigConstants.TOPIC_SUBSCRIPTION_NAME, Type.STRING, null, Importance.HIGH, JMSConfigConstants.TOPIC_SUBSCRIPTION_NAME_DOC, "Connection", 5, ConfigDef.Width.MEDIUM, JMSConfigConstants.TOPIC_SUBSCRIPTION_NAME) .define(JMSConfigConstants.JMS_PASSWORD, Type.PASSWORD, null, Importance.HIGH, JMSConfigConstants.JMS_PASSWORD_DOC, "Connection", 6, ConfigDef.Width.MEDIUM, JMSConfigConstants.JMS_PASSWORD) .define(JMSConfigConstants.JMS_USER, Type.STRING, null, Importance.HIGH, JMSConfigConstants.JMS_USER_DOC, "Connection", 7, ConfigDef.Width.MEDIUM, JMSConfigConstants.JMS_USER) .define(JMSConfigConstants.ERROR_POLICY, Type.STRING, JMSConfigConstants.ERROR_POLICY_DEFAULT, Importance.HIGH, JMSConfigConstants.ERROR_POLICY_DOC, "Connection", 8, ConfigDef.Width.MEDIUM, JMSConfigConstants.ERROR_POLICY) .define(JMSConfigConstants.ERROR_RETRY_INTERVAL, Type.INT, JMSConfigConstants.ERROR_RETRY_INTERVAL_DEFAULT, Importance.MEDIUM, JMSConfigConstants.ERROR_RETRY_INTERVAL_DOC, "Connection", 9, ConfigDef.Width.MEDIUM, JMSConfigConstants.ERROR_RETRY_INTERVAL) .define(JMSConfigConstants.NBR_OF_RETRIES, Type.INT, JMSConfigConstants.NBR_OF_RETIRES_DEFAULT, Importance.MEDIUM, JMSConfigConstants.NBR_OF_RETRIES_DOC, "Connection", 10, ConfigDef.Width.MEDIUM, JMSConfigConstants.NBR_OF_RETRIES) .define(JMSConfigConstants.DESTINATION_SELECTOR, Type.STRING, JMSConfigConstants.DESTINATION_SELECTOR_DEFAULT, Importance.MEDIUM, JMSConfigConstants.DESTINATION_SELECTOR_DOC, "Connection", 11, ConfigDef.Width.MEDIUM, JMSConfigConstants.DESTINATION_SELECTOR) .define(JMSConfigConstants.EXTRA_PROPS, Type.LIST, JMSConfigConstants.EXTRA_PROPS_DEFAULT, Importance.MEDIUM, JMSConfigConstants.EXTRA_PROPS_DOC, "Connection", 12, ConfigDef.Width.MEDIUM, JMSConfigConstants.EXTRA_PROPS) .define(JMSConfigConstants.BATCH_SIZE, Type.INT, JMSConfigConstants.BATCH_SIZE_DEFAULT, Importance.MEDIUM, JMSConfigConstants.BATCH_SIZE_DOC, "Connection", 13, ConfigDef.Width.MEDIUM, JMSConfigConstants.BATCH_SIZE) .define(JMSConfigConstants.POLLING_TIMEOUT_CONFIG, Type.LONG, JMSConfigConstants.POLLING_TIMEOUT_DEFAULT, Importance.MEDIUM, JMSConfigConstants.POLLING_TIMEOUT_DOC, "Connection", 14, ConfigDef.Width.MEDIUM, JMSConfigConstants.POLLING_TIMEOUT_CONFIG) //converters .define(JMSConfigConstants.DEFAULT_CONVERTER_CONFIG, Type.STRING, "", Importance.HIGH, JMSConfigConstants.DEFAULT_CONVERTER_DOC, "Converter", 1, ConfigDef.Width.MEDIUM, JMSConfigConstants.DEFAULT_CONVERTER_DISPLAY) .define(JMSConfigConstants.THROW_ON_CONVERT_ERRORS_CONFIG, Type.BOOLEAN, JMSConfigConstants.THROW_ON_CONVERT_ERRORS_DEFAULT, Importance.HIGH, JMSConfigConstants.THROW_ON_CONVERT_ERRORS_DOC, "Converter", 2, ConfigDef.Width.MEDIUM, JMSConfigConstants.THROW_ON_CONVERT_ERRORS_DISPLAY) .define(JMSConfigConstants.AVRO_CONVERTERS_SCHEMA_FILES, Type.STRING, JMSConfigConstants.AVRO_CONVERTERS_SCHEMA_FILES_DEFAULT, Importance.HIGH, JMSConfigConstants.AVRO_CONVERTERS_SCHEMA_FILES_DOC, "Converter", 3, ConfigDef.Width.MEDIUM, JMSConfigConstants.AVRO_CONVERTERS_SCHEMA_FILES) .define(JMSConfigConstants.HEADERS_CONFIG, Type.STRING, "", Importance.LOW, JMSConfigConstants.HEADERS_CONFIG_DOC, "Converter", 4, ConfigDef.Width.MEDIUM, JMSConfigConstants.HEADERS_CONFIG_DISPLAY) .define(JMSConfigConstants.PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, JMSConfigConstants.PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, JMSConfigConstants.PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, JMSConfigConstants.PROGRESS_COUNTER_ENABLED_DISPLAY) .define(JMSConfigConstants.EVICT_UNCOMMITTED_MINUTES, Type.INT, JMSConfigConstants.EVICT_UNCOMMITTED_MINUTES_DEFAULT, Importance.MEDIUM, JMSConfigConstants.EVICT_UNCOMMITTED_MINUTES_DOC, "Settings", 1, ConfigDef.Width.MEDIUM, JMSConfigConstants.EVICT_UNCOMMITTED_MINUTES_DOC) .define(JMSConfigConstants.EVICT_THRESHOLD_MINUTES, Type.INT, JMSConfigConstants.EVICT_THRESHOLD_MINUTES_DEFAULT, Importance.MEDIUM, JMSConfigConstants.EVICT_THRESHOLD_MINUTES_DOC, "Settings", 2, ConfigDef.Width.MEDIUM, JMSConfigConstants.EVICT_THRESHOLD_MINUTES_DOC) .define(JMSConfigConstants.TASK_PARALLELIZATION_TYPE, Type.STRING, JMSConfigConstants.TASK_PARALLELIZATION_TYPE_DEFAULT, Importance.MEDIUM, JMSConfigConstants.TASK_PARALLELIZATION_TYPE_DOC, "Settings", 4, ConfigDef.Width.MEDIUM, JMSConfigConstants.TASK_PARALLELIZATION_TYPE_DOC) } case class JMSConfig(props: util.Map[String, String]) extends BaseConfig(JMSConfigConstants.CONNECTOR_PREFIX, JMSConfig.config, props) with KcqlSettings with ErrorPolicySettings with NumberRetriesSettings with UserSettings with ConnectionSettings
Example 21
Source File: IotHubSinkConfig.scala From toketi-kafka-connect-iothub with MIT License | 5 votes |
package com.microsoft.azure.iot.kafka.connect.sink import java.util.Map import com.microsoft.azure.sdk.iot.service.DeliveryAcknowledgement import org.apache.kafka.common.config.ConfigDef.{Importance, Type, Width} import org.apache.kafka.common.config.{AbstractConfig, ConfigDef} object IotHubSinkConfig { val IotHubConnectionString = "IotHub.ConnectionString" val IotHubMessageDeliveryAcknowledgement = "IotHub.MessageDeliveryAcknowledgement" private val IotHubConnectionStringDoc = """IoT Hub ConnectionString. (see "IoT Hub" >> your hub >> "Shared access policies" >> "service" >> """ + """"Connection string")""" private val IotHubMessageDeliveryAcknowledgementDoc = "The type of delivery acknowledgement for a C2D message. " + "Valid values are None, Full, NegativeOnly, PositiveOnly" private val iotConfigGroup = "Azure IoT Hub" private val validDeliveryAcknowledgementString = ConfigDef.ValidString.in( DeliveryAcknowledgement.None.toString, DeliveryAcknowledgement.Full.toString, DeliveryAcknowledgement.PositiveOnly.toString, DeliveryAcknowledgement.NegativeOnly.toString) lazy val configDef = new ConfigDef() .define(IotHubConnectionString, Type.STRING, Importance.HIGH, IotHubConnectionStringDoc, iotConfigGroup, 1, Width.MEDIUM, "IoT Hub Connection String") .define(IotHubMessageDeliveryAcknowledgement, Type.STRING, DeliveryAcknowledgement.None.toString, validDeliveryAcknowledgementString, Importance.HIGH, IotHubMessageDeliveryAcknowledgementDoc, iotConfigGroup, 1, Width.MEDIUM, "Delivery acknowledgement") def getConfig(configValues: Map[String, String]): IotHubSinkConfig = { new IotHubSinkConfig(configDef, configValues) } } class IotHubSinkConfig(configDef: ConfigDef, configValues: Map[String, String]) extends AbstractConfig(configDef, configValues)
Example 22
Source File: IotHubSinkConnector.scala From toketi-kafka-connect-iothub with MIT License | 5 votes |
package com.microsoft.azure.iot.kafka.connect.sink import java.util import com.microsoft.azure.iot.kafka.connect.source.JsonSerialization import com.typesafe.scalalogging.LazyLogging import org.apache.kafka.common.config.{ConfigDef, ConfigException} import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.errors.ConnectException import org.apache.kafka.connect.sink.SinkConnector import scala.collection.JavaConverters._ class IotHubSinkConnector extends SinkConnector with LazyLogging with JsonSerialization { private[this] var props: Map[String, String] = _ override def taskClass(): Class[_ <: Task] = classOf[IotHubSinkTask] override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = { (1 to maxTasks).map(_ => this.props.asJava).toList.asJava } override def stop(): Unit = { logger.info("Stopping IotHubSinkConnector") } override def config(): ConfigDef = IotHubSinkConfig.configDef override def start(props: util.Map[String, String]): Unit = { logger.info("Starting IotHubSinkConnector") try { val iotHubSinkConfig = IotHubSinkConfig.getConfig(props) this.props = Map[String, String]( IotHubSinkConfig.IotHubConnectionString -> iotHubSinkConfig.getString(IotHubSinkConfig.IotHubConnectionString), IotHubSinkConfig.IotHubMessageDeliveryAcknowledgement → iotHubSinkConfig.getString(IotHubSinkConfig.IotHubMessageDeliveryAcknowledgement) ) } catch { case ex: ConfigException ⇒ throw new ConnectException("Could not start IotHubSinkConnector due to a " + "configuration exception", ex) } } override def version(): String = getClass.getPackage.getImplementationVersion }
Example 23
Source File: IotHubSourceConfig.scala From toketi-kafka-connect-iothub with MIT License | 5 votes |
// Copyright (c) Microsoft. All rights reserved. package com.microsoft.azure.iot.kafka.connect.source import com.microsoft.azure.eventhubs.EventHubClient import org.apache.kafka.common.config.ConfigDef.{Importance, Type, Width} import org.apache.kafka.common.config.{AbstractConfig, ConfigDef} import java.util.Map object IotHubSourceConfig { private val defaultBatchSize = 100 private val defaultReceiveTimeout = 60 private val iotConfigGroup = "Azure IoT Hub" private val kafkaConfig = "Kafka" val EventHubCompatibleConnectionString = "IotHub.EventHubCompatibleConnectionString" val EventHubCompatibleName = "IotHub.EventHubCompatibleName" val EventHubCompatibleNameDoc = """EventHub compatible name ("IoT Hub" >> your hub >> "Endpoints" >> "Events" >> "Event Hub-compatible name")""" val EventHubCompatibleEndpoint = "IotHub.EventHubCompatibleEndpoint" val EventHubCompatibleEndpointDoc = """EventHub compatible endpoint ("IoT Hub" >> your hub >> "Endpoints" >> "Events" >> "Event Hub-compatible """ + """endpoint")""" val IotHubAccessKeyName = "IotHub.AccessKeyName" val IotHubAccessKeyNameDoc = """IotHub access key name ("IoT Hub" >> your hub >> "Shared access policies", default is service)""" val IotHubAccessKeyValue = "IotHub.AccessKeyValue" val IotHubAccessKeyValueDoc = """IotHub access key value ("IoT Hub" >> your hub >> "Shared access policies" >> key name >> "Primary key")""" val IotHubConsumerGroup = "IotHub.ConsumerGroup" val IotHubConsumerGroupDoc = "The IoT Hub consumer group" val IotHubPartitions = "IotHub.Partitions" val IotHubPartitionsDoc = "Number of IoT Hub partitions" val KafkaTopic = "Kafka.Topic" val KafkaTopicDoc = "Kafka topic to copy data to" val BatchSize = "BatchSize" val BatchSizeDoc = "The batch size for fetching records from IoT Hub" val ReceiveTimeout = "ReceiveTimeout" val ReceiveTimeoutDoc = "Max time to spend receiving messages from IoT Hub" val IotHubOffset = "IotHub.Offsets" val IotHubOffsetDoc = "Offset for each partition in IotHub, as a comma separated string. This value is ignored if IotHubStartTime is specified." val IotHubStartTime = "IotHub.StartTime" val IotHubStartTimeDoc = "The time after which to process messages from IoT Hub If this value " + "is specified, IotHubOffset value is ignored." val TaskPartitionOffsetsMap = "TaskPartitions" lazy val configDef = new ConfigDef() .define(EventHubCompatibleName, Type.STRING, Importance.HIGH, EventHubCompatibleNameDoc, iotConfigGroup, 1, Width .MEDIUM, "Event Hub compatible name") .define(EventHubCompatibleEndpoint, Type.STRING, Importance.HIGH, EventHubCompatibleEndpointDoc, iotConfigGroup, 2, Width.MEDIUM, "Event Hub compatible endpoint") .define(IotHubAccessKeyName, Type.STRING, Importance.HIGH, IotHubAccessKeyNameDoc, iotConfigGroup, 3, Width.SHORT, "Access key name") .define(IotHubAccessKeyValue, Type.STRING, Importance.HIGH, IotHubAccessKeyValueDoc, iotConfigGroup, 4, Width.LONG, "Access key value") .define(IotHubConsumerGroup, Type.STRING, EventHubClient.DEFAULT_CONSUMER_GROUP_NAME, Importance.MEDIUM, IotHubConsumerGroupDoc, iotConfigGroup, 5, Width.SHORT, "Consumer group") .define(IotHubPartitions, Type.INT, Importance.HIGH, IotHubPartitionsDoc, iotConfigGroup, 6, Width.SHORT, "IoT Hub partitions") .define(IotHubStartTime, Type.STRING, "", Importance.MEDIUM, IotHubStartTimeDoc, iotConfigGroup, 7, Width.MEDIUM, "Start time") .define(IotHubOffset, Type.STRING, "", Importance.MEDIUM, IotHubOffsetDoc, iotConfigGroup, 8, Width.MEDIUM, "Per partition offsets") .define(BatchSize, Type.INT, defaultBatchSize, Importance.MEDIUM, IotHubOffsetDoc, iotConfigGroup, 9, Width.SHORT, "Batch size") .define(ReceiveTimeout, Type.INT, defaultReceiveTimeout, Importance.MEDIUM, ReceiveTimeoutDoc, iotConfigGroup, 10, Width.SHORT, "Receive Timeout") .define(KafkaTopic, Type.STRING, Importance.HIGH, KafkaTopicDoc, kafkaConfig, 11, Width.MEDIUM, "Kafka topic") def getConfig(configValues: Map[String, String]): IotHubSourceConfig = { new IotHubSourceConfig(configDef, configValues) } } class IotHubSourceConfig(configDef: ConfigDef, configValues: Map[String, String]) extends AbstractConfig(configDef, configValues)
Example 24
Source File: Conf.scala From sqs-kafka-connect with Apache License 2.0 | 5 votes |
package com.hivehome.kafka.connect.sqs import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} import org.apache.kafka.connect.errors.ConnectException import scala.util.Try case class Conf(queueName: Option[String] = None, topicName: Option[String] = None, awsRegion: String = "eu-west-1", awsKey: Option[String] = None, awsSecret: Option[String] = None) { def toMap: Map[String, String] = { import Conf._ Map[String, Option[String]]( SourceSqsQueue -> queueName, DestinationKafkaTopic -> topicName, AwsKey -> awsKey, AwsSecret -> awsSecret, AwsRegion -> Some(awsRegion) ).collect { case (k, Some(v)) => (k, v) } } } object Conf { val DestinationKafkaTopic = "destination.topic" val SourceSqsQueue = "source.queue" val AwsKey = "aws.key" val AwsSecret = "aws.secret" val AwsRegion = "aws.region" val ConfigDef = new ConfigDef() .define(SourceSqsQueue, Type.STRING, Importance.HIGH, "Source SQS queue name to consumer from.") .define(DestinationKafkaTopic, Type.STRING, Importance.HIGH, "Destination Kafka topicName to publish data to") .define(AwsKey, Type.STRING, "", Importance.MEDIUM, "AWS Key to connect to SQS") .define(AwsSecret, Type.STRING, "", Importance.MEDIUM, "AWS secret to connect to SQS") def parse(props: Map[String, String]): Try[Conf] = Try { val queueName = props.get(Conf.SourceSqsQueue) val topicName = props.get(Conf.DestinationKafkaTopic) val awsKey = props.get(Conf.AwsKey).filter(_.nonEmpty) val awsSecret = props.get(Conf.AwsSecret).filter(_.nonEmpty) val awsRegion = props.get(Conf.AwsRegion) if (queueName == null || queueName.isEmpty) throw new ConnectException("Configuration must include 'queueName' setting") if (queueName.contains(",")) throw new ConnectException("Configuration should only have a single queueName when used as a source.") if (topicName == null || topicName.isEmpty) throw new ConnectException("Configuration must include 'topicName' setting") val conf = Conf(queueName = queueName, topicName = topicName, awsKey = awsKey, awsSecret = awsSecret) awsRegion match { case Some(region) => conf.copy(awsRegion = region) case _ => conf } } }
Example 25
Source File: DocumentDbConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.azure.documentdb.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import com.microsoft.azure.documentdb.ConsistencyLevel import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object DocumentDbConfig { val config: ConfigDef = new ConfigDef() .define(DocumentDbConfigConstants.CONNECTION_CONFIG, Type.STRING, Importance.HIGH, DocumentDbConfigConstants.CONNECTION_CONFIG_DOC, "Connection", 1, ConfigDef.Width.LONG, DocumentDbConfigConstants.CONNECTION_DISPLAY) .define(DocumentDbConfigConstants.MASTER_KEY_CONFIG, Type.PASSWORD, Importance.HIGH, DocumentDbConfigConstants.MASTER_KEY_DOC, "Connection", 2, ConfigDef.Width.LONG, DocumentDbConfigConstants.MASTER_KEY_CONFIG) .define(DocumentDbConfigConstants.CONSISTENCY_CONFIG, Type.STRING, DocumentDbConfigConstants.CONSISTENCY_DEFAULT, Importance.HIGH, DocumentDbConfigConstants.CONSISTENCY_DOC, "Connection", 3, ConfigDef.Width.LONG, DocumentDbConfigConstants.CONSISTENCY_DISPLAY) .define(DocumentDbConfigConstants.DATABASE_CONFIG, Type.STRING, Importance.HIGH, DocumentDbConfigConstants.DATABASE_CONFIG_DOC, "Connection", 4, ConfigDef.Width.MEDIUM, DocumentDbConfigConstants.DATABASE_CONFIG) .define(DocumentDbConfigConstants.CREATE_DATABASE_CONFIG, Type.BOOLEAN, DocumentDbConfigConstants.CREATE_DATABASE_DEFAULT, Importance.MEDIUM, DocumentDbConfigConstants.CREATE_DATABASE_DOC, "Connection", 5, ConfigDef.Width.MEDIUM, DocumentDbConfigConstants.CREATE_DATABASE_DISPLAY) .define(DocumentDbConfigConstants.PROXY_HOST_CONFIG, Type.STRING, null, Importance.LOW, DocumentDbConfigConstants.PROXY_HOST_DOC, "Connection", 5, ConfigDef.Width.MEDIUM, DocumentDbConfigConstants.PROXY_HOST_DISPLAY) .define(DocumentDbConfigConstants.KCQL_CONFIG, Type.STRING, Importance.HIGH, DocumentDbConfigConstants.KCQL_DOC, "Mappings", 1, ConfigDef.Width.LONG, DocumentDbConfigConstants.KCQL_CONFIG) .define(DocumentDbConfigConstants.ERROR_POLICY_CONFIG, Type.STRING, DocumentDbConfigConstants.ERROR_POLICY_DEFAULT, Importance.HIGH, DocumentDbConfigConstants.ERROR_POLICY_DOC, "Error", 1, ConfigDef.Width.LONG, DocumentDbConfigConstants.ERROR_POLICY_CONFIG) .define(DocumentDbConfigConstants.NBR_OF_RETRIES_CONFIG, Type.INT, DocumentDbConfigConstants.NBR_OF_RETIRES_DEFAULT, Importance.MEDIUM, DocumentDbConfigConstants.NBR_OF_RETRIES_DOC, "Error", 2, ConfigDef.Width.LONG, DocumentDbConfigConstants.NBR_OF_RETRIES_CONFIG) .define(DocumentDbConfigConstants.ERROR_RETRY_INTERVAL_CONFIG, Type.INT, DocumentDbConfigConstants.ERROR_RETRY_INTERVAL_DEFAULT, Importance.MEDIUM, DocumentDbConfigConstants.ERROR_RETRY_INTERVAL_DOC, "Error", 3, ConfigDef.Width.LONG, DocumentDbConfigConstants.ERROR_RETRY_INTERVAL_CONFIG) .define(DocumentDbConfigConstants.PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, DocumentDbConfigConstants.PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, DocumentDbConfigConstants.PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, DocumentDbConfigConstants.PROGRESS_COUNTER_ENABLED_DISPLAY) } case class DocumentDbConfig(props: util.Map[String, String]) extends BaseConfig(DocumentDbConfigConstants.CONNECTOR_PREFIX, DocumentDbConfig.config, props) with KcqlSettings with DatabaseSettings with NumberRetriesSettings with ErrorPolicySettings with ConsistencyLevelSettings[ConsistencyLevel]
Example 26
Source File: MongoSinkConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.mongodb.sink import java.util import com.datamountaineer.streamreactor.connect.config.Helpers import com.datamountaineer.streamreactor.connect.mongodb.config.{MongoConfig, MongoConfigConstants} import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.common.config.{Config, ConfigDef} import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.errors.ConnectException import org.apache.kafka.connect.sink.SinkConnector import scala.collection.JavaConverters._ import scala.util.{Failure, Try} override def start(props: util.Map[String, String]): Unit = { Helpers.checkInputTopics(MongoConfigConstants.KCQL_CONFIG, props.asScala.toMap) Try(MongoConfig(props)) match { case Failure(f) => throw new ConnectException(s"Couldn't start Mongo sink due to configuration error: ${f.getMessage}", f) case _ => } configProps = props } override def stop(): Unit = {} override def version(): String = manifest.version() override def config(): ConfigDef = MongoConfig.config override def validate(connectorConfigs: util.Map[String, String]): Config = super.validate(connectorConfigs) }
Example 27
Source File: MongoConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.mongodb.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object MongoConfig { val config = new ConfigDef() .define(MongoConfigConstants.CONNECTION_CONFIG, Type.STRING, Importance.HIGH, MongoConfigConstants.CONNECTION_CONFIG_DOC, "Connection", 1, ConfigDef.Width.LONG, MongoConfigConstants.CONNECTION_CONFIG) .define(MongoConfigConstants.DATABASE_CONFIG, Type.STRING, Importance.HIGH, MongoConfigConstants.DATABASE_CONFIG_DOC, "Connection", 2, ConfigDef.Width.MEDIUM, MongoConfigConstants.DATABASE_CONFIG) .define(MongoConfigConstants.USERNAME_CONFIG, Type.STRING, MongoConfigConstants.USERNAME_CONFIG_DEFAULT, Importance.MEDIUM, MongoConfigConstants.USERNAME_CONFIG_DOC, "Connection", 3, ConfigDef.Width.MEDIUM, MongoConfigConstants.USERNAME_CONFIG) .define(MongoConfigConstants.PASSWORD_CONFIG, Type.PASSWORD, MongoConfigConstants.PASSWORD_CONFIG_DEFAULT, Importance.MEDIUM, MongoConfigConstants.PASSWORD_CONFIG_DOC, "Connection", 4, ConfigDef.Width.MEDIUM, MongoConfigConstants.PASSWORD_CONFIG) .define(MongoConfigConstants.AUTHENTICATION_MECHANISM, Type.STRING, MongoConfigConstants.AUTHENTICATION_MECHANISM_DEFAULT, Importance.MEDIUM, MongoConfigConstants.AUTHENTICATION_MECHANISM_DOC, "Connection", 5, ConfigDef.Width.MEDIUM, MongoConfigConstants.AUTHENTICATION_MECHANISM) .define(MongoConfigConstants.KCQL_CONFIG, Type.STRING, Importance.HIGH, MongoConfigConstants.KCQL_DOC, "Mappings", 1, ConfigDef.Width.LONG, MongoConfigConstants.KCQL_CONFIG) .define(MongoConfigConstants.JSON_DATETIME_FIELDS_CONFIG, Type.LIST, "", Importance.LOW, MongoConfigConstants.JSON_DATETIME_FIELDS_DOC, "Mappings", 1, ConfigDef.Width.NONE, MongoConfigConstants.JSON_DATETIME_FIELDS_CONFIG) .define(MongoConfigConstants.ERROR_POLICY_CONFIG, Type.STRING, MongoConfigConstants.ERROR_POLICY_DEFAULT, Importance.HIGH, MongoConfigConstants.ERROR_POLICY_DOC, "Error", 1, ConfigDef.Width.LONG, MongoConfigConstants.ERROR_POLICY_CONFIG) .define(MongoConfigConstants.NBR_OF_RETRIES_CONFIG, Type.INT, MongoConfigConstants.NBR_OF_RETIRES_DEFAULT, Importance.MEDIUM, MongoConfigConstants.NBR_OF_RETRIES_DOC, "Error", 2, ConfigDef.Width.LONG, MongoConfigConstants.NBR_OF_RETRIES_CONFIG) .define(MongoConfigConstants.ERROR_RETRY_INTERVAL_CONFIG, Type.INT, MongoConfigConstants.ERROR_RETRY_INTERVAL_DEFAULT, Importance.MEDIUM, MongoConfigConstants.ERROR_RETRY_INTERVAL_DOC, "Error", 3, ConfigDef.Width.LONG, MongoConfigConstants.ERROR_RETRY_INTERVAL_CONFIG) .define(MongoConfigConstants.PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, MongoConfigConstants.PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, MongoConfigConstants.PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, MongoConfigConstants.PROGRESS_COUNTER_ENABLED_DISPLAY) .withClientSslSupport() } case class MongoConfig(props: util.Map[String, String]) extends BaseConfig(MongoConfigConstants.CONNECTOR_PREFIX, MongoConfig.config, props) with KcqlSettings with DatabaseSettings with ErrorPolicySettings with NumberRetriesSettings with UserSettings
Example 28
Source File: CassandraSinkConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.cassandra.sink import java.util import com.datamountaineer.streamreactor.connect.cassandra.config.{CassandraConfigConstants, CassandraConfigSink} import com.datamountaineer.streamreactor.connect.config.Helpers import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.errors.ConnectException import org.apache.kafka.connect.sink.SinkConnector import scala.collection.JavaConverters._ import scala.util.{Failure, Try} override def start(props: util.Map[String, String]): Unit = { //check input topics Helpers.checkInputTopics(CassandraConfigConstants.KCQL, props.asScala.toMap) configProps = props Try(new CassandraConfigSink(props)) match { case Failure(f) => throw new ConnectException(s"Couldn't start Cassandra sink due to configuration error: ${f.getMessage}", f) case _ => } } override def stop(): Unit = {} override def version(): String = manifest.version() override def config(): ConfigDef = configDef }
Example 29
Source File: JMSSourceConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.jms.source import java.util import com.datamountaineer.streamreactor.connect.jms.config.{JMSConfig, JMSConfigConstants} import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.source.SourceConnector import org.apache.kafka.connect.util.ConnectorUtils import scala.collection.JavaConverters._ class JMSSourceConnector extends SourceConnector with StrictLogging { private var configProps: util.Map[String, String] = _ private val configDef = JMSConfig.config private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation) override def taskClass(): Class[_ <: Task] = classOf[JMSSourceTask] def kcqlTaskScaling(maxTasks: Int): util.List[util.Map[String, String]] = { val raw = configProps.get(JMSConfigConstants.KCQL) require(raw != null && !raw.isEmpty, s"No ${JMSConfigConstants.KCQL} provided!") //sql1, sql2 val kcqls = raw.split(";") val groups = ConnectorUtils.groupPartitions(kcqls.toList.asJava, maxTasks).asScala //split up the kcql statement based on the number of tasks. groups .filterNot(_.isEmpty) .map { g => val taskConfigs = new java.util.HashMap[String, String] taskConfigs.putAll(configProps) taskConfigs.put(JMSConfigConstants.KCQL, g.asScala.mkString(";")) //overwrite taskConfigs.asScala.toMap.asJava } }.asJava def defaultTaskScaling(maxTasks: Int): util.List[util.Map[String, String]] = { val raw = configProps.get(JMSConfigConstants.KCQL) require(raw != null && !raw.isEmpty, s"No ${JMSConfigConstants.KCQL} provided!") (1 to maxTasks).map { _ => val taskConfigs: util.Map[String, String] = new java.util.HashMap[String, String] taskConfigs.putAll(configProps) taskConfigs }.toList.asJava } override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = { val config = new JMSConfig(configProps) val scaleType = config.getString(JMSConfigConstants.TASK_PARALLELIZATION_TYPE).toLowerCase() if (scaleType == JMSConfigConstants.TASK_PARALLELIZATION_TYPE_DEFAULT) { kcqlTaskScaling(maxTasks) } else defaultTaskScaling(maxTasks) } override def config(): ConfigDef = configDef override def start(props: util.Map[String, String]): Unit = { val config = new JMSConfig(props) configProps = config.props } override def stop(): Unit = {} override def version(): String = manifest.version() }
Example 30
Source File: Validator.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.connector.validation import java.util import oharastream.ohara.client.configurator.InspectApi import oharastream.ohara.common.util.VersionUtils import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.source.SourceConnector import scala.jdk.CollectionConverters._ class Validator extends SourceConnector { private[this] var props: util.Map[String, String] = _ override def version(): String = VersionUtils.VERSION override def start(props: util.Map[String, String]): Unit = { this.props = new util.HashMap[String, String](props) // we don't want to make any exception here } override def taskClass(): Class[_ <: Task] = classOf[ValidatorTask] override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = Seq.fill(maxTasks)(new util.HashMap[String, String](props)).map(_.asInstanceOf[util.Map[String, String]]).asJava override def stop(): Unit = { // do nothing } override def config(): ConfigDef = new ConfigDef().define(InspectApi.TARGET_KEY, Type.STRING, null, Importance.HIGH, "target type") }
Example 31
Source File: HBaseConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.hbase.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import com.datamountaineer.streamreactor.connect.hbase.config.HBaseConfigConstants.CONNECTOR_PREFIX import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object HBaseConfig { import HBaseConfigConstants._ val config: ConfigDef = new ConfigDef() .define(COLUMN_FAMILY, Type.STRING, Importance.HIGH, COLUMN_FAMILY_DOC, "Connection", 1, ConfigDef.Width.MEDIUM, COLUMN_FAMILY) .define(KCQL_QUERY, Type.STRING, Importance.HIGH, KCQL_QUERY, "Connection", 2, ConfigDef.Width.MEDIUM, KCQL_QUERY) .define(ERROR_POLICY, Type.STRING, ERROR_POLICY_DEFAULT, Importance.HIGH, ERROR_POLICY_DOC, "Connection", 3, ConfigDef.Width.MEDIUM, ERROR_POLICY) .define(ERROR_RETRY_INTERVAL, Type.INT, ERROR_RETRY_INTERVAL_DEFAULT, Importance.MEDIUM, ERROR_RETRY_INTERVAL_DOC, "Connection", 4, ConfigDef.Width.MEDIUM, ERROR_RETRY_INTERVAL) .define(NBR_OF_RETRIES, Type.INT, NBR_OF_RETIRES_DEFAULT, Importance.MEDIUM, NBR_OF_RETRIES_DOC, "Connection", 5, ConfigDef.Width.MEDIUM, NBR_OF_RETRIES) .define(PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, PROGRESS_COUNTER_ENABLED_DISPLAY) //config folders .define(HBASE_CONFIG_DIR, Type.STRING, HBASE_CONFIG_DIR_DEFAULT, Importance.MEDIUM, HBASE_CONFIG_DIR_DOC, "Configs", 1, ConfigDef.Width.MEDIUM, HBASE_CONFIG_DIR_DISPLAY) //security .define(KerberosKey, Type.BOOLEAN, KerberosDefault, Importance.MEDIUM, KerberosDoc, "Security", 1, ConfigDef.Width.MEDIUM, KerberosDisplay) .define(KerberosAuthModeKey, Type.STRING, KerberosAuthModeDefault, Importance.MEDIUM, KerberosAuthModeDoc, "Security", 2, ConfigDef.Width.MEDIUM, KerberosAuthModeDisplay) .define(KerberosDebugKey, Type.BOOLEAN, KerberosDebugDefault, Importance.MEDIUM, KerberosDebugDoc, "Security", 3, ConfigDef.Width.MEDIUM, KerberosDebugDisplay) //keytab .define(PrincipalKey, Type.STRING, PrincipalDefault, Importance.MEDIUM, PrincipalDoc, "Kerberos Keytab", 1, ConfigDef.Width.MEDIUM, PrincipalDisplay) .define(KerberosKeyTabKey, Type.STRING, KerberosKeyTabDefault, Importance.MEDIUM, KerberosKeyTabDoc, "Kerberos Keytab", 2, ConfigDef.Width.MEDIUM, KerberosKeyTabDisplay) .define(NameNodePrincipalKey, Type.STRING, NameNodePrincipalDefault, Importance.MEDIUM, NameNodePrincipalDoc, "Kerberos Keytab", 3, ConfigDef.Width.MEDIUM, NameNodePrincipalDisplay) .define(KerberosTicketRenewalKey, Type.LONG, KerberosTicketRenewalDefault, Importance.MEDIUM, KerberosTicketRenewalDoc, "Kerberos Keytab", 4, ConfigDef.Width.MEDIUM, KerberosTicketRenewalDisplay) //user password .define(KerberosUserKey, Type.STRING, KerberosUserDefault, Importance.MEDIUM, KerberosUserDoc, "Kerberos User Password", 1, ConfigDef.Width.MEDIUM, KerberosUserDisplay) .define(KerberosPasswordKey, Type.PASSWORD, KerberosPasswordDefault, Importance.MEDIUM, KerberosPasswordDoc, "Kerberos User Password", 2, ConfigDef.Width.MEDIUM, KerberosPasswordDisplay) .define(KerberosKrb5Key, Type.STRING, KerberosKrb5Default, Importance.MEDIUM, KerberosKrb5Doc, "Kerberos User Password", 3, ConfigDef.Width.MEDIUM, KerberosKrb5Display) .define(KerberosJaasKey, Type.STRING, KerberosJaasDefault, Importance.MEDIUM, KerberosJaasDoc, "Kerberos User Password", 4, ConfigDef.Width.MEDIUM, KerberosJaasDisplay) .define(JaasEntryNameKey, Type.STRING, JaasEntryNameDefault, Importance.MEDIUM, JaasEntryNameDoc, "Kerberos User Password", 5, ConfigDef.Width.MEDIUM, JaasEntryNameDisplay) } case class HBaseConfig(props: util.Map[String, String]) extends BaseConfig(CONNECTOR_PREFIX, HBaseConfig.config, props) with KcqlSettings with ErrorPolicySettings with NumberRetriesSettings
Example 32
Source File: InfluxConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.influx.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} import org.influxdb.InfluxDB.ConsistencyLevel object InfluxConfig { val config: ConfigDef = new ConfigDef() .define(InfluxConfigConstants.INFLUX_URL_CONFIG, Type.STRING, Importance.HIGH, InfluxConfigConstants.INFLUX_URL_DOC, "Connection", 1, ConfigDef.Width.MEDIUM, InfluxConfigConstants.INFLUX_URL_CONFIG) .define(InfluxConfigConstants.INFLUX_DATABASE_CONFIG, Type.STRING, Importance.HIGH, InfluxConfigConstants.INFLUX_DATABASE_DOC, "Connection", 2, ConfigDef.Width.MEDIUM, InfluxConfigConstants.INFLUX_DATABASE_CONFIG) .define(InfluxConfigConstants.INFLUX_CONNECTION_USER_CONFIG, Type.STRING, Importance.HIGH, InfluxConfigConstants.INFLUX_CONNECTION_USER_DOC, "Connection", 3, ConfigDef.Width.MEDIUM, InfluxConfigConstants.INFLUX_CONNECTION_USER_CONFIG) .define(InfluxConfigConstants.INFLUX_CONNECTION_PASSWORD_CONFIG, Type.PASSWORD, "", Importance.HIGH, InfluxConfigConstants.INFLUX_CONNECTION_PASSWORD_DOC, "Connection", 4, ConfigDef.Width.MEDIUM, InfluxConfigConstants.INFLUX_CONNECTION_PASSWORD_CONFIG) .define(InfluxConfigConstants.KCQL_CONFIG, Type.STRING, Importance.HIGH, InfluxConfigConstants.KCQL_DOC, "Connection", 5, ConfigDef.Width.MEDIUM, InfluxConfigConstants.KCQL_DISPLAY) .define(InfluxConfigConstants.ERROR_POLICY_CONFIG, Type.STRING, InfluxConfigConstants.ERROR_POLICY_DEFAULT, Importance.HIGH, InfluxConfigConstants.ERROR_POLICY_DOC, "Miscellaneous", 1, ConfigDef.Width.MEDIUM, InfluxConfigConstants.ERROR_POLICY_CONFIG) .define(InfluxConfigConstants.ERROR_RETRY_INTERVAL_CONFIG, Type.INT, InfluxConfigConstants.ERROR_RETRY_INTERVAL_DEFAULT, Importance.MEDIUM, InfluxConfigConstants.ERROR_RETRY_INTERVAL_DOC, "Miscellaneous", 2, ConfigDef.Width.MEDIUM, InfluxConfigConstants.ERROR_RETRY_INTERVAL_CONFIG) .define(InfluxConfigConstants.NBR_OF_RETRIES_CONFIG, Type.INT, InfluxConfigConstants.NBR_OF_RETIRES_DEFAULT, Importance.MEDIUM, InfluxConfigConstants.NBR_OF_RETRIES_DOC, "Miscellaneous", 3, ConfigDef.Width.MEDIUM, InfluxConfigConstants.NBR_OF_RETRIES_CONFIG) .define(InfluxConfigConstants.RETENTION_POLICY_CONFIG, Type.STRING, InfluxConfigConstants.RETENTION_POLICY_DEFAULT, Importance.HIGH, InfluxConfigConstants.RETENTION_POLICY_DOC, "Writes", 1, ConfigDef.Width.MEDIUM, InfluxConfigConstants.RETENTION_POLICY_DOC) .define(InfluxConfigConstants.CONSISTENCY_CONFIG, Type.STRING, InfluxConfigConstants.CONSISTENCY_DEFAULT, Importance.MEDIUM, InfluxConfigConstants.CONSISTENCY_DOC, "Writes", 2, ConfigDef.Width.MEDIUM, InfluxConfigConstants.CONSISTENCY_DISPLAY) .define(InfluxConfigConstants.PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, InfluxConfigConstants.PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, InfluxConfigConstants.PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, InfluxConfigConstants.PROGRESS_COUNTER_ENABLED_DISPLAY) } case class InfluxConfig(props: util.Map[String, String]) extends BaseConfig(InfluxConfigConstants.CONNECTOR_PREFIX, InfluxConfig.config, props) with KcqlSettings with ErrorPolicySettings with NumberRetriesSettings with DatabaseSettings with ConsistencyLevelSettings[ConsistencyLevel] with UserSettings
Example 33
Source File: HiveSinkConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.landoop.streamreactor.connect.hive.sink import java.util import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.landoop.streamreactor.connect.hive.sink.config.HiveSinkConfigDef import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.sink.SinkConnector import scala.collection.JavaConverters._ class HiveSinkConnector extends SinkConnector { val logger = org.slf4j.LoggerFactory.getLogger(getClass.getName) private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation) private var props: util.Map[String, String] = _ override def version(): String = manifest.version() override def taskClass(): Class[_ <: Task] = classOf[HiveSinkTask] override def config(): ConfigDef = HiveSinkConfigDef.config override def start(props: util.Map[String, String]): Unit = { logger.info(s"Creating hive sink connector") this.props = props } override def stop(): Unit = () override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = { logger.info(s"Creating $maxTasks tasks config") List.fill(maxTasks)(props).asJava } }
Example 34
Source File: HiveSinkConfigDef.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.landoop.streamreactor.connect.hive.sink.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object HiveSinkConfigDef { import SinkConfigSettings._ val config: ConfigDef = new ConfigDef() .define(PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, PROGRESS_COUNTER_ENABLED_DISPLAY) .define(KcqlKey, Type.STRING, Importance.HIGH, KCQL_DOC) .define(DatabaseNameKey, Type.STRING, Importance.HIGH, DatabaseNameDoc) .define(MetastoreTypeKey, Type.STRING, Importance.HIGH, MetastoreTypeDoc) .define(MetastoreUrisKey, Type.STRING, Importance.HIGH, MetastoreUrisDoc) .define(FsDefaultKey, Type.STRING, Importance.HIGH, FsDefaultDoc) //config folders .define(HdfsConfigDirKey, Type.STRING, HdfsConfigDirDefault, Importance.MEDIUM, HdfsConfigDirDoc, "Configs", 1, ConfigDef.Width.MEDIUM, HdfsConfigDirDisplay) .define(HiveConfigDirKey, Type.STRING, HiveConfigDirDefault, Importance.MEDIUM, HiveConfigDirDoc, "Configs", 2, ConfigDef.Width.MEDIUM, HiveConfigDirDisplay) //security .define(KerberosKey, Type.BOOLEAN, KerberosDefault, Importance.MEDIUM, KerberosDoc, "Security", 1, ConfigDef.Width.MEDIUM, KerberosDisplay) .define(KerberosAuthModeKey, Type.STRING, KerberosAuthModeDefault, Importance.MEDIUM, KerberosAuthModeDoc, "Security", 2, ConfigDef.Width.MEDIUM, KerberosAuthModeDisplay) //keytab .define(PrincipalKey, Type.STRING, PrincipalDefault, Importance.MEDIUM, PrincipalDoc, "Kerberos Keytab", 1, ConfigDef.Width.MEDIUM, PrincipalDisplay) .define(KerberosKeyTabKey, Type.STRING, KerberosKeyTabDefault, Importance.MEDIUM, KerberosKeyTabDoc, "Kerberos Keytab", 2, ConfigDef.Width.MEDIUM, KerberosKeyTabDisplay) .define(NameNodePrincipalKey, Type.STRING, NameNodePrincipalDefault, Importance.MEDIUM, NameNodePrincipalDoc, "Kerberos Keytab", 3, ConfigDef.Width.MEDIUM, NameNodePrincipalDisplay) .define(KerberosTicketRenewalKey, Type.LONG, KerberosTicketRenewalDefault, Importance.MEDIUM, KerberosTicketRenewalDoc, "Kerberos Keytab", 4, ConfigDef.Width.MEDIUM, KerberosTicketRenewalDisplay) //user password .define(KerberosUserKey, Type.STRING, KerberosUserDefault, Importance.MEDIUM, KerberosUserDoc, "Kerberos User Password", 1, ConfigDef.Width.MEDIUM, KerberosUserDisplay) .define(KerberosPasswordKey, Type.PASSWORD, KerberosPasswordDefault, Importance.MEDIUM, KerberosPasswordDoc, "Kerberos User Password", 2, ConfigDef.Width.MEDIUM, KerberosPasswordDisplay) .define(KerberosKrb5Key, Type.STRING, KerberosKrb5Default, Importance.MEDIUM, KerberosKrb5Doc, "Kerberos User Password", 3, ConfigDef.Width.MEDIUM, KerberosKrb5Display) .define(KerberosJaasKey, Type.STRING, KerberosJaasDefault, Importance.MEDIUM, KerberosJaasDoc, "Kerberos User Password", 4, ConfigDef.Width.MEDIUM, KerberosJaasDisplay) .define(JaasEntryNameKey, Type.STRING, JaasEntryNameDefault, Importance.MEDIUM, JaasEntryNameDoc, "Kerberos User Password", 5, ConfigDef.Width.MEDIUM, JaasEntryNameDisplay) } case class HiveSinkConfigDefBuilder(props: util.Map[String, String]) extends BaseConfig(SinkConfigSettings.CONNECTOR_PREFIX, HiveSinkConfigDef.config, props) with KcqlSettings with ErrorPolicySettings with NumberRetriesSettings with UserSettings with ConnectionSettings
Example 35
Source File: HiveSourceConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.landoop.streamreactor.connect.hive.source import java.util import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.landoop.streamreactor.connect.hive.sink.config.HiveSinkConfigDef import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.source.SourceConnector import scala.collection.JavaConverters._ class HiveSourceConnector extends SourceConnector with StrictLogging { private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation) private var props: util.Map[String, String] = _ override def version(): String = manifest.version() override def taskClass(): Class[_ <: Task] = classOf[HiveSourceTask] override def config(): ConfigDef = HiveSinkConfigDef.config override def start(props: util.Map[String, String]): Unit = { logger.info(s"Creating hive sink connector") this.props = props } override def stop(): Unit = () override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = { logger.info(s"Creating $maxTasks tasks config") List.fill(maxTasks)(props).asJava } }
Example 36
Source File: HiveSourceConfigDef.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.landoop.streamreactor.connect.hive.source.config import java.util import com.datamountaineer.streamreactor.connect.config.base.traits._ import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.common.config.ConfigDef.{Importance, Type} object HiveSourceConfigDef { import HiveSourceConfigConstants._ val config: ConfigDef = new ConfigDef() .define(PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, PROGRESS_COUNTER_ENABLED_DISPLAY) .define(KcqlKey, Type.STRING, Importance.HIGH, KCQL_DOC) .define(DatabaseNameKey, Type.STRING, Importance.HIGH, DatabaseNameDoc) .define(MetastoreTypeKey, Type.STRING, Importance.HIGH, MetastoreTypeDoc) .define(MetastoreUrisKey, Type.STRING, Importance.HIGH, MetastoreUrisDoc) .define(FsDefaultKey, Type.STRING, Importance.HIGH, FsDefaultDoc) .define(PollSizeKey, Type.INT, 1024, Importance.HIGH, PollSizeDoc) //config folders .define(HdfsConfigDirKey, Type.STRING, HdfsConfigDirDefault, Importance.MEDIUM, HdfsConfigDirDoc, "Configs", 1, ConfigDef.Width.MEDIUM, HdfsConfigDirDisplay) .define(HiveConfigDirKey, Type.STRING, HiveConfigDirDefault, Importance.MEDIUM, HiveConfigDirDoc, "Configs", 2, ConfigDef.Width.MEDIUM, HiveConfigDirDisplay) //security .define(KerberosKey, Type.BOOLEAN, KerberosDefault, Importance.MEDIUM, KerberosDoc, "Security", 1, ConfigDef.Width.MEDIUM, KerberosDisplay) .define(KerberosAuthModeKey, Type.STRING, KerberosAuthModeDefault, Importance.MEDIUM, KerberosAuthModeDoc, "Security", 2, ConfigDef.Width.MEDIUM, KerberosAuthModeDisplay) //keytab .define(PrincipalKey, Type.STRING, PrincipalDefault, Importance.MEDIUM, PrincipalDoc, "Kerberos Keytab", 1, ConfigDef.Width.MEDIUM, PrincipalDisplay) .define(KerberosKeyTabKey, Type.STRING, KerberosKeyTabDefault, Importance.MEDIUM, KerberosKeyTabDoc, "Kerberos Keytab", 2, ConfigDef.Width.MEDIUM, KerberosKeyTabDisplay) .define(NameNodePrincipalKey, Type.STRING, NameNodePrincipalDefault, Importance.MEDIUM, NameNodePrincipalDoc, "Kerberos Keytab", 3, ConfigDef.Width.MEDIUM, NameNodePrincipalDisplay) .define(KerberosTicketRenewalKey, Type.LONG, KerberosTicketRenewalDefault, Importance.MEDIUM, KerberosTicketRenewalDoc, "Kerberos Keytab", 4, ConfigDef.Width.MEDIUM, KerberosTicketRenewalDisplay) //user password .define(KerberosUserKey, Type.STRING, KerberosUserDefault, Importance.MEDIUM, KerberosUserDoc, "Kerberos User Password", 1, ConfigDef.Width.MEDIUM, KerberosUserDisplay) .define(KerberosPasswordKey, Type.PASSWORD, KerberosPasswordDefault, Importance.MEDIUM, KerberosPasswordDoc, "Kerberos User Password", 2, ConfigDef.Width.MEDIUM, KerberosPasswordDisplay) .define(KerberosKrb5Key, Type.STRING, KerberosKrb5Default, Importance.MEDIUM, KerberosKrb5Doc, "Kerberos User Password", 3, ConfigDef.Width.MEDIUM, KerberosKrb5Display) .define(KerberosJaasKey, Type.STRING, KerberosJaasDefault, Importance.MEDIUM, KerberosJaasDoc, "Kerberos User Password", 4, ConfigDef.Width.MEDIUM, KerberosJaasDisplay) .define(JaasEntryNameKey, Type.STRING, JaasEntryNameDefault, Importance.MEDIUM, JaasEntryNameDoc, "Kerberos User Password", 5, ConfigDef.Width.MEDIUM, JaasEntryNameDisplay) } case class HiveSourceConfigDefBuilder(props: util.Map[String, String]) extends BaseConfig(HiveSourceConfigConstants.CONNECTOR_PREFIX, HiveSourceConfigDef.config, props) with KcqlSettings with ErrorPolicySettings with NumberRetriesSettings with UserSettings with ConnectionSettings
Example 37
Source File: BloombergSourceConfig.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.bloomberg.config import org.apache.kafka.common.config.ConfigDef.{Importance, Type} import org.apache.kafka.common.config.{AbstractConfig, ConfigDef} class BloombergSourceConfig(map: java.util.Map[String, String]) extends AbstractConfig(BloombergSourceConfig.config, map) object BloombergSourceConfig { lazy val config: ConfigDef = new ConfigDef() .define(BloombergSourceConfigConstants.SERVER_HOST, Type.STRING, Importance.HIGH, BloombergSourceConfigConstants.SERVER_HOST_DOC, "Connection", 1, ConfigDef.Width.MEDIUM, BloombergSourceConfigConstants.SERVER_HOST) .define(BloombergSourceConfigConstants.SERVER_PORT, Type.INT, Importance.HIGH, BloombergSourceConfigConstants.SERVER_PORT_DOC, "Connection", 2, ConfigDef.Width.MEDIUM, BloombergSourceConfigConstants.SERVER_PORT) .define(BloombergSourceConfigConstants.SERVICE_URI, Type.STRING, Importance.HIGH, BloombergSourceConfigConstants.SERVICE_URI_DOC, "Connection", 3, ConfigDef.Width.MEDIUM, BloombergSourceConfigConstants.SERVICE_URI) .define(BloombergSourceConfigConstants.SUBSCRIPTIONS, Type.STRING, Importance.HIGH, BloombergSourceConfigConstants.SUBSCRIPTION_DOC, "Subscription", 1, ConfigDef.Width.MEDIUM, BloombergSourceConfigConstants.SUBSCRIPTIONS) .define(BloombergSourceConfigConstants.AUTHENTICATION_MODE, Type.STRING, Importance.LOW, BloombergSourceConfigConstants.AUTHENTICATION_MODE_DOC, "Connection", 4, ConfigDef.Width.MEDIUM, BloombergSourceConfigConstants.AUTHENTICATION_MODE) .define(BloombergSourceConfigConstants.KAFKA_TOPIC, Type.STRING, Importance.HIGH, BloombergSourceConfigConstants.KAFKA_TOPIC_DOC, "Subscription", 2, ConfigDef.Width.MEDIUM, BloombergSourceConfigConstants.KAFKA_TOPIC) .define(BloombergSourceConfigConstants.BUFFER_SIZE, Type.INT, Importance.MEDIUM, BloombergSourceConfigConstants.BUFFER_SIZE_DOC, "Connection", 5, ConfigDef.Width.SHORT, BloombergSourceConfigConstants.BUFFER_SIZE) .define(BloombergSourceConfigConstants.PAYLOAD_TYPE, Type.STRING, Importance.MEDIUM, BloombergSourceConfigConstants.PAYLOAD_TYPE_DOC, "Subscription", 3, ConfigDef.Width.SHORT, BloombergSourceConfigConstants.PAYLOAD_TYPE) .define(BloombergSourceConfigConstants.PROGRESS_COUNTER_ENABLED, Type.BOOLEAN, BloombergSourceConfigConstants.PROGRESS_COUNTER_ENABLED_DEFAULT, Importance.MEDIUM, BloombergSourceConfigConstants.PROGRESS_COUNTER_ENABLED_DOC, "Metrics", 1, ConfigDef.Width.MEDIUM, BloombergSourceConfigConstants.PROGRESS_COUNTER_ENABLED_DISPLAY) .define(BloombergSourceConfigConstants.SERVICE_AUTHORIZATION, Type.BOOLEAN, Importance.LOW, BloombergSourceConfigConstants.SERVICE_AUTHORIZATION_DOC, "Connection", 6, ConfigDef.Width.MEDIUM, BloombergSourceConfigConstants.SERVICE_AUTHORIZATION) }
Example 38
Source File: ReThinkSinkConnector.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.rethink.sink import java.util import com.datamountaineer.streamreactor.connect.config.Helpers import com.datamountaineer.streamreactor.connect.rethink.ReThinkConnection import com.datamountaineer.streamreactor.connect.rethink.config.{ReThinkConfigConstants, ReThinkSinkConfig, ReThinkSinkSettings} import com.datamountaineer.streamreactor.connect.utils.JarManifest import com.rethinkdb.RethinkDB import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.common.config.ConfigDef import org.apache.kafka.connect.connector.Task import org.apache.kafka.connect.sink.SinkConnector import scala.collection.JavaConverters._ val rethink = RethinkDB.r initializeTables(rethink, props) configProps = props } def initializeTables(rethink: RethinkDB, props: util.Map[String, String]): Unit = { val config = ReThinkSinkConfig(props) val settings = ReThinkSinkSettings(config) val rethinkHost = config.getString(ReThinkConfigConstants.RETHINK_HOST) val port = config.getInt(ReThinkConfigConstants.RETHINK_PORT) val conn = ReThinkConnection(rethink, config) ReThinkHelper.checkAndCreateTables(rethink, settings, conn) conn.close() } override def stop(): Unit = {} override def version(): String = manifest.version() override def config(): ConfigDef = configDef }