| /* |
| * Licensed to the Apache Software Foundation (ASF) under one or more |
| * contributor license agreements. See the NOTICE file distributed with |
| * this work for additional information regarding copyright ownership. |
| * The ASF licenses this file to You under the Apache License, Version 2.0 |
| * (the "License"); you may not use this file except in compliance with |
| * the License. You may obtain a copy of the License at |
| * |
| * http://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| */ |
| package org.apache.camel.component.debezium.springboot; |
| |
| import javax.annotation.Generated; |
| import org.apache.camel.spring.boot.ComponentConfigurationPropertiesCommon; |
| import org.springframework.boot.context.properties.ConfigurationProperties; |
| |
| /** |
| * Represents a Debezium MySQL endpoint which is used to capture changes in |
| * MySQL database so that that applications can see those changes and respond to |
| * them. |
| * |
| * Generated by camel-package-maven-plugin - do not edit this file! |
| */ |
| @Generated("org.apache.camel.maven.packaging.SpringBootAutoConfigurationMojo") |
| @ConfigurationProperties(prefix = "camel.component.debezium-mysql") |
| public class DebeziumMySqlComponentConfiguration |
| extends |
| ComponentConfigurationPropertiesCommon { |
| |
| /** |
| * Whether to enable auto configuration of the debezium-mysql component. |
| * This is enabled by default. |
| */ |
| private Boolean enabled; |
| /** |
| * Allow pre-configured Configurations to be set. |
| */ |
| private MySqlConnectorEmbeddedDebeziumConfigurationNestedConfiguration configuration; |
| /** |
| * Whether the component should use basic property binding (Camel 2.x) or |
| * the newer property binding with additional capabilities |
| */ |
| private Boolean basicPropertyBinding = false; |
| /** |
| * Whether the producer should be started lazy (on the first message). By |
| * starting lazy you can use this to allow CamelContext and routes to |
| * startup in situations where a producer may otherwise fail during starting |
| * and cause the route to fail being started. By deferring this startup to |
| * be lazy then the startup failure can be handled during routing messages |
| * via Camel's routing error handlers. Beware that when the first message is |
| * processed then creating and starting the producer may take a little time |
| * and prolong the total processing time of the processing. |
| */ |
| private Boolean lazyStartProducer = false; |
| /** |
| * Allows for bridging the consumer to the Camel routing Error Handler, |
| * which mean any exceptions occurred while the consumer is trying to pickup |
| * incoming messages, or the likes, will now be processed as a message and |
| * handled by the routing Error Handler. By default the consumer will use |
| * the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that |
| * will be logged at WARN or ERROR level and ignored. |
| */ |
| private Boolean bridgeErrorHandler = false; |
| |
| public MySqlConnectorEmbeddedDebeziumConfigurationNestedConfiguration getConfiguration() { |
| return configuration; |
| } |
| |
| public void setConfiguration( |
| MySqlConnectorEmbeddedDebeziumConfigurationNestedConfiguration configuration) { |
| this.configuration = configuration; |
| } |
| |
| public Boolean getBasicPropertyBinding() { |
| return basicPropertyBinding; |
| } |
| |
| public void setBasicPropertyBinding(Boolean basicPropertyBinding) { |
| this.basicPropertyBinding = basicPropertyBinding; |
| } |
| |
| public Boolean getLazyStartProducer() { |
| return lazyStartProducer; |
| } |
| |
| public void setLazyStartProducer(Boolean lazyStartProducer) { |
| this.lazyStartProducer = lazyStartProducer; |
| } |
| |
| public Boolean getBridgeErrorHandler() { |
| return bridgeErrorHandler; |
| } |
| |
| public void setBridgeErrorHandler(Boolean bridgeErrorHandler) { |
| this.bridgeErrorHandler = bridgeErrorHandler; |
| } |
| |
| public static class MySqlConnectorEmbeddedDebeziumConfigurationNestedConfiguration { |
| public static final Class CAMEL_NESTED_CLASS = org.apache.camel.component.debezium.configuration.MySqlConnectorEmbeddedDebeziumConfiguration.class; |
| /** |
| * Controls how long the connector holds onto the global read lock while |
| * it is performing a snapshot. The default is 'minimal', which means |
| * the connector holds the global read lock (and thus prevents any |
| * updates) for just the initial portion of the snapshot while the |
| * database schemas and other metadata are being read. The remaining |
| * work in a snapshot involves selecting all rows from each table, and |
| * this can be done using the snapshot process' REPEATABLE READ |
| * transaction even when the lock is no longer held and other operations |
| * are updating the database. However, in some cases it may be desirable |
| * to block all writes for the entire duration of the snapshot; in such |
| * cases set this property to 'extended'. Using a value of 'none' will |
| * prevent the connector from acquiring any table locks during the |
| * snapshot process. This mode can only be used in combination with |
| * snapshot.mode values of 'schema_only' or 'schema_only_recovery' and |
| * is only safe to use if no schema changes are happening while the |
| * snapshot is taken. |
| */ |
| private String snapshotLockingMode = "minimal"; |
| /** |
| * A semicolon-separated list of expressions that match fully-qualified |
| * tables and column(s) to be used as message key. Each expression must |
| * match the pattern '<fully-qualified table name>:<key columns>',where |
| * the table names could be defined as (DB_NAME.TABLE_NAME) or |
| * (SCHEMA_NAME.TABLE_NAME), depending on the specific connector,and the |
| * key columns are a comma-separated list of columns representing the |
| * custom key. For any table without an explicit key configuration the |
| * table's primary key column(s) will be used as message key.Example: |
| * dbserver1.inventory.orderlines:orderId,orderLineId;dbserver1.inventory.orders:id |
| */ |
| private String messageKeyColumns; |
| /** |
| * Description is not available here, please check Debezium website for |
| * corresponding key 'column.blacklist' description. |
| */ |
| private String columnBlacklist; |
| /** |
| * Description is not available here, please check Debezium website for |
| * corresponding key 'table.blacklist' description. |
| */ |
| private String tableBlacklist; |
| /** |
| * Whether the connector should publish changes in the database schema |
| * to a Kafka topic with the same name as the database server ID. Each |
| * schema change will be recorded using a key that contains the database |
| * name and whose value includes the DDL statement(s).The default is |
| * 'true'. This is independent of how the connector internally records |
| * database history. |
| */ |
| private Boolean includeSchemaChanges = true; |
| /** |
| * The source UUIDs used to include GTID ranges when determine the |
| * starting position in the MySQL server's binlog. |
| */ |
| private String gtidSourceIncludes; |
| /** |
| * JDBC Driver class name used to connect to the MySQL database server. |
| */ |
| private String databaseJdbcDriver = "class com.mysql.cj.jdbc.Driver"; |
| /** |
| * The number of milliseconds to wait while polling for persisted data |
| * during recovery. |
| */ |
| private Integer databaseHistoryKafkaRecoveryPollIntervalMs = 100; |
| /** |
| * Frequency in milliseconds to wait for new change events to appear |
| * after receiving no events. Defaults to 500ms. |
| */ |
| private Long pollIntervalMs = 500L; |
| /** |
| * A semicolon separated list of SQL statements to be executed when a |
| * JDBC connection (not binlog reading connection) to the database is |
| * established. Note that the connector may establish JDBC connections |
| * at its own discretion, so this should typically be used for |
| * configuration of session parameters only,but not for executing DML |
| * statements. Use doubled semicolon (';;') to use a semicolon as a |
| * character and not as a delimiter. |
| */ |
| private String databaseInitialStatements; |
| /** |
| * The prefix that is used to name heartbeat topics.Defaults to |
| * __debezium-heartbeat. |
| */ |
| private String heartbeatTopicsPrefix = "__debezium-heartbeat"; |
| /** |
| * The size of a look-ahead buffer used by the binlog reader to decide |
| * whether the transaction in progress is going to be committed or |
| * rolled back. Use 0 to disable look-ahead buffering. Defaults to 0 |
| * (i.e. buffering is disabled). |
| */ |
| private Integer binlogBufferSize = 0; |
| /** |
| * The maximum number of records that should be loaded into memory while |
| * performing a snapshot |
| */ |
| private Integer snapshotFetchSize; |
| /** |
| * Name of the MySQL database user to be used when connecting to the |
| * database. |
| */ |
| private String databaseUser; |
| /** |
| * The source UUIDs used to exclude GTID ranges when determine the |
| * starting position in the MySQL server's binlog. |
| */ |
| private String gtidSourceExcludes; |
| /** |
| * This property contains a comma-separated list of fully-qualified |
| * tables (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on |
| * thespecific connectors . Select statements for the individual tables |
| * are specified in further configuration properties, one for each |
| * table, identified by the id |
| * 'snapshot.select.statement.overrides.[DB_NAME].[TABLE_NAME]' or |
| * 'snapshot.select.statement.overrides.[SCHEMA_NAME].[TABLE_NAME]', |
| * respectively. The value of those properties is the select statement |
| * to use when retrieving data from the specific table during |
| * snapshotting. A possible use case for large append-only tables is |
| * setting a specific point where to start (resume) snapshotting, in |
| * case a previous snapshotting was interrupted. |
| */ |
| private String snapshotSelectStatementOverrides; |
| /** |
| * A list of host/port pairs that the connector will use for |
| * establishing the initial connection to the Kafka cluster for |
| * retrieving database schema history previously stored by the |
| * connector. This should point to the same Kafka cluster used by the |
| * Kafka Connect process. |
| */ |
| private String databaseHistoryKafkaBootstrapServers; |
| /** |
| * Location of the Java keystore file containing an application |
| * process's own certificate and private key. |
| */ |
| private String databaseSslKeystore; |
| /** |
| * Length of an interval in milli-seconds in in which the connector |
| * periodically sends heartbeat messages to a heartbeat topic. Use 0 to |
| * disable heartbeat messages. Disabled by default. |
| */ |
| private Integer heartbeatIntervalMs = 0; |
| /** |
| * A version of the format of the publicly visible source part in the |
| * message |
| */ |
| private String sourceStructVersion = "v2"; |
| /** |
| * Password to unlock the keystore file (store password) specified by |
| * 'ssl.trustore' configuration property or the |
| * 'javax.net.ssl.trustStore' system or JVM property. |
| */ |
| private String databaseSslTruststorePassword; |
| /** |
| * Specify how binlog events that belong to a table missing from |
| * internal schema representation (i.e. internal representation is not |
| * consistent with database) should be handled, including:'fail' (the |
| * default) an exception indicating the problematic event and its binlog |
| * position is raised, causing the connector to be stopped; 'warn' the |
| * problematic event and its binlog position will be logged and the |
| * event will be skipped;'ignore' the problematic event will be skipped. |
| */ |
| private String inconsistentSchemaHandlingMode = "fail"; |
| /** |
| * MySQL allows user to insert year value as either 2-digit or 4-digit. |
| * In case of two digit the value is automatically mapped into 1970 - |
| * 2069.false - delegates the implicit conversion to the databasetrue - |
| * (the default) Debezium makes the conversion |
| */ |
| private Boolean enableTimeAdjuster = true; |
| /** |
| * If set to 'latest', when connector sees new GTID, it will start |
| * consuming gtid channel from the server latest executed gtid position. |
| * If 'earliest' connector starts reading channel from first available |
| * (not purged) gtid position on the server. |
| */ |
| private String gtidNewChannelPosition = "latest"; |
| /** |
| * Password of the MySQL database user to be used when connecting to the |
| * database. |
| */ |
| private String databasePassword; |
| /** |
| * Controls what DDL will Debezium store in database history.By default |
| * (false) Debezium will store all incoming DDL statements. If set to |
| * truethen only DDL that manipulates a monitored table will be stored. |
| */ |
| private Boolean databaseHistoryStoreOnlyMonitoredTablesDdl = false; |
| /** |
| * If set to true, we will only produce DML events into Kafka for |
| * transactions that were written on mysql servers with UUIDs matching |
| * the filters defined by the gtid.source.includes or |
| * gtid.source.excludes configuration options, if they are specified. |
| */ |
| private Boolean gtidSourceFilterDmlEvents = true; |
| /** |
| * Description is not available here, please check Debezium website for |
| * corresponding key 'database.blacklist' description. |
| */ |
| private String databaseBlacklist; |
| /** |
| * Maximum size of each batch of source records. Defaults to 2048. |
| */ |
| private Integer maxBatchSize = 2048; |
| /** |
| * Whether a separate thread should be used to ensure the connection is |
| * kept alive. |
| */ |
| private Boolean connectKeepAlive = true; |
| /** |
| * The name of the DatabaseHistory class that should be used to store |
| * and recover database schema changes. The configuration properties for |
| * the history are prefixed with the 'database.history.' string. |
| */ |
| private String databaseHistory = "io.debezium.relational.history.FileDatabaseHistory"; |
| /** |
| * The criteria for running a snapshot upon startup of the connector. |
| * Options include: 'when_needed' to specify that the connector run a |
| * snapshot upon startup whenever it deems it necessary; 'initial' (the |
| * default) to specify the connector can run a snapshot only when no |
| * offsets are available for the logical server name; 'initial_only' |
| * same as 'initial' except the connector should stop after completing |
| * the snapshot and before it would normally read the binlog; and'never' |
| * to specify the connector should never run a snapshot and that upon |
| * first startup the connector should read from the beginning of the |
| * binlog. The 'never' mode should be used with care, and only when the |
| * binlog is known to contain all history. |
| */ |
| private String snapshotMode = "initial"; |
| /** |
| * Maximum time in milliseconds to wait after trying to connect to the |
| * database before timing out. |
| */ |
| private Integer connectTimeoutMs = 30000; |
| /** |
| * Maximum size of the queue for change events read from the database |
| * log but not yet recorded or forwarded. Defaults to 8192, and should |
| * always be larger than the maximum batch size. |
| */ |
| private Integer maxQueueSize = 8192; |
| /** |
| * The name of the topic for the database schema history |
| */ |
| private String databaseHistoryKafkaTopic; |
| /** |
| * The number of milliseconds to delay before a snapshot will begin. |
| */ |
| private Long snapshotDelayMs = 0L; |
| /** |
| * The number of attempts in a row that no data are returned from Kafka |
| * before recover completes. The maximum amount of time to wait after |
| * receiving no data is (recovery.attempts) x |
| * (recovery.poll.interval.ms). |
| */ |
| private Integer databaseHistoryKafkaRecoveryAttempts = 100; |
| /** |
| * The tables for which changes are to be captured |
| */ |
| private String tableWhitelist; |
| /** |
| * Whether delete operations should be represented by a delete event and |
| * a subsquenttombstone event (true) or only by a delete event (false). |
| * Emitting the tombstone event (the default behavior) allows Kafka to |
| * completely delete all events pertaining to the given key once the |
| * source record got deleted. |
| */ |
| private Boolean tombstonesOnDelete = false; |
| /** |
| * Specify how DECIMAL and NUMERIC columns should be represented in |
| * change events, including:'precise' (the default) uses |
| * java.math.BigDecimal to represent values, which are encoded in the |
| * change events using a binary representation and Kafka Connect's |
| * 'org.apache.kafka.connect.data.Decimal' type; 'string' uses string to |
| * represent values; 'double' represents values using Java's 'double', |
| * which may not offer the precision but will be far easier to use in |
| * consumers. |
| */ |
| private String decimalHandlingMode = "precise"; |
| /** |
| * BETA FEATURE: On connector restart, the connector will check if there |
| * have been any new tables added to the configuration, and snapshot |
| * them. There is presently only two options:'off': Default behavior. Do |
| * not snapshot new tables.'parallel': The snapshot of the new tables |
| * will occur in parallel to the continued binlog reading of the old |
| * tables. When the snapshot completes, an independent binlog reader |
| * will begin reading the events for the new tables until it catches up |
| * to present time. At this point, both old and new binlog readers will |
| * be momentarily halted and new binlog reader will start that will read |
| * the binlog for all configured tables. The parallel binlog reader will |
| * have a configured server id of 10000 + the primary binlog reader's |
| * server id. |
| */ |
| private String snapshotNewTables = "off"; |
| /** |
| * Controls the action Debezium will take when it meets a DDL statement |
| * in binlog, that it cannot parse.By default the connector will stop |
| * operating but by changing the setting it can ignore the statements |
| * which it cannot parse. If skipping is enabled then Debezium can miss |
| * metadata changes. |
| */ |
| private Boolean databaseHistorySkipUnparseableDdl = false; |
| /** |
| * Flag specifying whether built-in tables should be ignored. |
| */ |
| private Boolean tableIgnoreBuiltin = true; |
| /** |
| * The databases for which changes are to be captured |
| */ |
| private String databaseWhitelist; |
| /** |
| * The path to the file that will be used to record the database history |
| */ |
| private String databaseHistoryFileFilename; |
| /** |
| * Specify how BIGINT UNSIGNED columns should be represented in change |
| * events, including:'precise' uses java.math.BigDecimal to represent |
| * values, which are encoded in the change events using a binary |
| * representation and Kafka Connect's |
| * 'org.apache.kafka.connect.data.Decimal' type; 'long' (the default) |
| * represents values using Java's 'long', which may not offer the |
| * precision but will be far easier to use in consumers. |
| */ |
| private String bigintUnsignedHandlingMode = "long"; |
| /** |
| * A numeric ID of this database client, which must be unique across all |
| * currently-running database processes in the cluster. This connector |
| * joins the MySQL database cluster as another server (with this unique |
| * ID) so it can read the binlog. By default, a random number is |
| * generated between 5400 and 6400. |
| */ |
| private Long databaseServerId; |
| /** |
| * Specify how failures during deserialization of binlog events (i.e. |
| * when encountering a corrupted event) should be handled, |
| * including:'fail' (the default) an exception indicating the |
| * problematic event and its binlog position is raised, causing the |
| * connector to be stopped; 'warn' the problematic event and its binlog |
| * position will be logged and the event will be skipped;'ignore' the |
| * problematic event will be skipped. |
| */ |
| private String eventDeserializationFailureHandlingMode = "fail"; |
| /** |
| * Time, date and timestamps can be represented with different kinds of |
| * precisions, including:'adaptive_time_microseconds': the precision of |
| * date and timestamp values is based the database column's precision; |
| * but time fields always use microseconds precision;'connect': always |
| * represents time, date and timestamp values using Kafka Connect's |
| * built-in representations for Time, Date, and Timestamp, which uses |
| * millisecond precision regardless of the database columns' precision. |
| */ |
| private String timePrecisionMode = "adaptive_time_microseconds"; |
| /** |
| * Unique name that identifies the database server and all recorded |
| * offsets, and that is used as a prefix for all schemas and topics. |
| * Each distinct installation should have a separate namespace and be |
| * monitored by at most one Debezium connector. |
| */ |
| private String databaseServerName; |
| /** |
| * Port of the MySQL database server. |
| */ |
| private Integer databasePort = 3306; |
| /** |
| * Location of the Java truststore file containing the collection of CA |
| * certificates trusted by this application process (trust store). |
| */ |
| private String databaseSslTruststore; |
| /** |
| * Whether to use an encrypted connection to MySQL. Options |
| * include'disabled' (the default) to use an unencrypted connection; |
| * 'preferred' to establish a secure (encrypted) connection if the |
| * server supports secure connections, but fall back to an unencrypted |
| * connection otherwise; 'required' to use a secure (encrypted) |
| * connection, and fail if one cannot be established; 'verify_ca' like |
| * 'required' but additionally verify the server TLS certificate against |
| * the configured Certificate Authority (CA) certificates, or fail if no |
| * valid matching CA certificates are found; or'verify_identity' like |
| * 'verify_ca' but additionally verify that the server certificate |
| * matches the host to which the connection is attempted. |
| */ |
| private String databaseSslMode = "disabled"; |
| /** |
| * Password to access the private key from the keystore file specified |
| * by 'ssl.keystore' configuration property or the |
| * 'javax.net.ssl.keyStore' system or JVM property. This password is |
| * used to unlock the keystore file (store password), and to decrypt the |
| * private key stored in the keystore (key password). |
| */ |
| private String databaseSslKeystorePassword; |
| /** |
| * Resolvable hostname or IP address of the MySQL database server. |
| */ |
| private String databaseHostname; |
| /** |
| * Only relevant if parallel snapshotting is configured. During parallel |
| * snapshotting, multiple (4) connections open to the database client, |
| * and they each need their own unique connection ID. This offset is |
| * used to generate those IDs from the base configured cluster ID. |
| */ |
| private Long databaseServerIdOffset = 10000L; |
| /** |
| * Interval in milliseconds to wait for connection checking if keep |
| * alive thread is used. |
| */ |
| private Long connectKeepAliveIntervalMs = 60000L; |
| /** |
| * Whether the connector should include the original SQL query that |
| * generated the change event. Note: This option requires MySQL be |
| * configured with the binlog_rows_query_log_events option set to ON. |
| * Query will not be present for events generated from snapshot. |
| * WARNING: Enabling this option may expose tables or fields explicitly |
| * blacklisted or masked by including the original SQL statement in the |
| * change event. For this reason the default value is 'false'. |
| */ |
| private Boolean includeQuery = false; |
| /** |
| * The name of the Java class for the connector |
| */ |
| private Class connectorClass; |
| /** |
| * Unique name for the connector. Attempting to register again with the |
| * same name will fail. |
| */ |
| private String name; |
| /** |
| * The name of the Java class that is responsible for persistence of |
| * connector offsets. |
| */ |
| private String offsetStorage = "org.apache.kafka.connect.storage.FileOffsetBackingStore"; |
| /** |
| * Path to file where offsets are to be stored. Required when |
| * offset.storage is set to the FileOffsetBackingStore |
| */ |
| private String offsetStorageFileName; |
| /** |
| * The name of the Kafka topic where offsets are to be stored. Required |
| * when offset.storage is set to the KafkaOffsetBackingStore. |
| */ |
| private String offsetStorageTopic; |
| /** |
| * Replication factor used when creating the offset storage topic. |
| * Required when offset.storage is set to the KafkaOffsetBackingStore |
| */ |
| private Integer offsetStorageReplicationFactor; |
| /** |
| * The name of the Java class of the commit policy. It defines when |
| * offsets commit has to be triggered based on the number of events |
| * processed and the time elapsed since the last commit. This class must |
| * implement the interface 'OffsetCommitPolicy'. The default is a |
| * periodic commit policy based upon time intervals. |
| */ |
| private String offsetCommitPolicy = "io.debezium.embedded.spi.OffsetCommitPolicy.PeriodicCommitOffsetPolicy"; |
| /** |
| * Interval at which to try committing offsets. The default is 1 minute. |
| */ |
| private Long offsetFlushIntervalMs = 60000L; |
| /** |
| * Maximum number of milliseconds to wait for records to flush and |
| * partition offset data to be committed to offset storage before |
| * cancelling the process and restoring the offset data to be committed |
| * in a future attempt. The default is 5 seconds. |
| */ |
| private Long offsetCommitTimeoutMs = 5000L; |
| /** |
| * The number of partitions used when creating the offset storage topic. |
| * Required when offset.storage is set to the 'KafkaOffsetBackingStore'. |
| */ |
| private Integer offsetStoragePartitions; |
| /** |
| * The Converter class that should be used to serialize and deserialize |
| * key data for offsets. The default is JSON converter. |
| */ |
| private String internalKeyConverter = "org.apache.kafka.connect.json.JsonConverter"; |
| /** |
| * The Converter class that should be used to serialize and deserialize |
| * value data for offsets. The default is JSON converter. |
| */ |
| private String internalValueConverter = "org.apache.kafka.connect.json.JsonConverter"; |
| |
| public String getSnapshotLockingMode() { |
| return snapshotLockingMode; |
| } |
| |
| public void setSnapshotLockingMode(String snapshotLockingMode) { |
| this.snapshotLockingMode = snapshotLockingMode; |
| } |
| |
| public String getMessageKeyColumns() { |
| return messageKeyColumns; |
| } |
| |
| public void setMessageKeyColumns(String messageKeyColumns) { |
| this.messageKeyColumns = messageKeyColumns; |
| } |
| |
| public String getColumnBlacklist() { |
| return columnBlacklist; |
| } |
| |
| public void setColumnBlacklist(String columnBlacklist) { |
| this.columnBlacklist = columnBlacklist; |
| } |
| |
| public String getTableBlacklist() { |
| return tableBlacklist; |
| } |
| |
| public void setTableBlacklist(String tableBlacklist) { |
| this.tableBlacklist = tableBlacklist; |
| } |
| |
| public Boolean getIncludeSchemaChanges() { |
| return includeSchemaChanges; |
| } |
| |
| public void setIncludeSchemaChanges(Boolean includeSchemaChanges) { |
| this.includeSchemaChanges = includeSchemaChanges; |
| } |
| |
| public String getGtidSourceIncludes() { |
| return gtidSourceIncludes; |
| } |
| |
| public void setGtidSourceIncludes(String gtidSourceIncludes) { |
| this.gtidSourceIncludes = gtidSourceIncludes; |
| } |
| |
| public String getDatabaseJdbcDriver() { |
| return databaseJdbcDriver; |
| } |
| |
| public void setDatabaseJdbcDriver(String databaseJdbcDriver) { |
| this.databaseJdbcDriver = databaseJdbcDriver; |
| } |
| |
| public Integer getDatabaseHistoryKafkaRecoveryPollIntervalMs() { |
| return databaseHistoryKafkaRecoveryPollIntervalMs; |
| } |
| |
| public void setDatabaseHistoryKafkaRecoveryPollIntervalMs( |
| Integer databaseHistoryKafkaRecoveryPollIntervalMs) { |
| this.databaseHistoryKafkaRecoveryPollIntervalMs = databaseHistoryKafkaRecoveryPollIntervalMs; |
| } |
| |
| public Long getPollIntervalMs() { |
| return pollIntervalMs; |
| } |
| |
| public void setPollIntervalMs(Long pollIntervalMs) { |
| this.pollIntervalMs = pollIntervalMs; |
| } |
| |
| public String getDatabaseInitialStatements() { |
| return databaseInitialStatements; |
| } |
| |
| public void setDatabaseInitialStatements( |
| String databaseInitialStatements) { |
| this.databaseInitialStatements = databaseInitialStatements; |
| } |
| |
| public String getHeartbeatTopicsPrefix() { |
| return heartbeatTopicsPrefix; |
| } |
| |
| public void setHeartbeatTopicsPrefix(String heartbeatTopicsPrefix) { |
| this.heartbeatTopicsPrefix = heartbeatTopicsPrefix; |
| } |
| |
| public Integer getBinlogBufferSize() { |
| return binlogBufferSize; |
| } |
| |
| public void setBinlogBufferSize(Integer binlogBufferSize) { |
| this.binlogBufferSize = binlogBufferSize; |
| } |
| |
| public Integer getSnapshotFetchSize() { |
| return snapshotFetchSize; |
| } |
| |
| public void setSnapshotFetchSize(Integer snapshotFetchSize) { |
| this.snapshotFetchSize = snapshotFetchSize; |
| } |
| |
| public String getDatabaseUser() { |
| return databaseUser; |
| } |
| |
| public void setDatabaseUser(String databaseUser) { |
| this.databaseUser = databaseUser; |
| } |
| |
| public String getGtidSourceExcludes() { |
| return gtidSourceExcludes; |
| } |
| |
| public void setGtidSourceExcludes(String gtidSourceExcludes) { |
| this.gtidSourceExcludes = gtidSourceExcludes; |
| } |
| |
| public String getSnapshotSelectStatementOverrides() { |
| return snapshotSelectStatementOverrides; |
| } |
| |
| public void setSnapshotSelectStatementOverrides( |
| String snapshotSelectStatementOverrides) { |
| this.snapshotSelectStatementOverrides = snapshotSelectStatementOverrides; |
| } |
| |
| public String getDatabaseHistoryKafkaBootstrapServers() { |
| return databaseHistoryKafkaBootstrapServers; |
| } |
| |
| public void setDatabaseHistoryKafkaBootstrapServers( |
| String databaseHistoryKafkaBootstrapServers) { |
| this.databaseHistoryKafkaBootstrapServers = databaseHistoryKafkaBootstrapServers; |
| } |
| |
| public String getDatabaseSslKeystore() { |
| return databaseSslKeystore; |
| } |
| |
| public void setDatabaseSslKeystore(String databaseSslKeystore) { |
| this.databaseSslKeystore = databaseSslKeystore; |
| } |
| |
| public Integer getHeartbeatIntervalMs() { |
| return heartbeatIntervalMs; |
| } |
| |
| public void setHeartbeatIntervalMs(Integer heartbeatIntervalMs) { |
| this.heartbeatIntervalMs = heartbeatIntervalMs; |
| } |
| |
| public String getSourceStructVersion() { |
| return sourceStructVersion; |
| } |
| |
| public void setSourceStructVersion(String sourceStructVersion) { |
| this.sourceStructVersion = sourceStructVersion; |
| } |
| |
| public String getDatabaseSslTruststorePassword() { |
| return databaseSslTruststorePassword; |
| } |
| |
| public void setDatabaseSslTruststorePassword( |
| String databaseSslTruststorePassword) { |
| this.databaseSslTruststorePassword = databaseSslTruststorePassword; |
| } |
| |
| public String getInconsistentSchemaHandlingMode() { |
| return inconsistentSchemaHandlingMode; |
| } |
| |
| public void setInconsistentSchemaHandlingMode( |
| String inconsistentSchemaHandlingMode) { |
| this.inconsistentSchemaHandlingMode = inconsistentSchemaHandlingMode; |
| } |
| |
| public Boolean getEnableTimeAdjuster() { |
| return enableTimeAdjuster; |
| } |
| |
| public void setEnableTimeAdjuster(Boolean enableTimeAdjuster) { |
| this.enableTimeAdjuster = enableTimeAdjuster; |
| } |
| |
| public String getGtidNewChannelPosition() { |
| return gtidNewChannelPosition; |
| } |
| |
| public void setGtidNewChannelPosition(String gtidNewChannelPosition) { |
| this.gtidNewChannelPosition = gtidNewChannelPosition; |
| } |
| |
| public String getDatabasePassword() { |
| return databasePassword; |
| } |
| |
| public void setDatabasePassword(String databasePassword) { |
| this.databasePassword = databasePassword; |
| } |
| |
| public Boolean getDatabaseHistoryStoreOnlyMonitoredTablesDdl() { |
| return databaseHistoryStoreOnlyMonitoredTablesDdl; |
| } |
| |
| public void setDatabaseHistoryStoreOnlyMonitoredTablesDdl( |
| Boolean databaseHistoryStoreOnlyMonitoredTablesDdl) { |
| this.databaseHistoryStoreOnlyMonitoredTablesDdl = databaseHistoryStoreOnlyMonitoredTablesDdl; |
| } |
| |
| public Boolean getGtidSourceFilterDmlEvents() { |
| return gtidSourceFilterDmlEvents; |
| } |
| |
| public void setGtidSourceFilterDmlEvents( |
| Boolean gtidSourceFilterDmlEvents) { |
| this.gtidSourceFilterDmlEvents = gtidSourceFilterDmlEvents; |
| } |
| |
| public String getDatabaseBlacklist() { |
| return databaseBlacklist; |
| } |
| |
| public void setDatabaseBlacklist(String databaseBlacklist) { |
| this.databaseBlacklist = databaseBlacklist; |
| } |
| |
| public Integer getMaxBatchSize() { |
| return maxBatchSize; |
| } |
| |
| public void setMaxBatchSize(Integer maxBatchSize) { |
| this.maxBatchSize = maxBatchSize; |
| } |
| |
| public Boolean getConnectKeepAlive() { |
| return connectKeepAlive; |
| } |
| |
| public void setConnectKeepAlive(Boolean connectKeepAlive) { |
| this.connectKeepAlive = connectKeepAlive; |
| } |
| |
| public String getDatabaseHistory() { |
| return databaseHistory; |
| } |
| |
| public void setDatabaseHistory(String databaseHistory) { |
| this.databaseHistory = databaseHistory; |
| } |
| |
| public String getSnapshotMode() { |
| return snapshotMode; |
| } |
| |
| public void setSnapshotMode(String snapshotMode) { |
| this.snapshotMode = snapshotMode; |
| } |
| |
| public Integer getConnectTimeoutMs() { |
| return connectTimeoutMs; |
| } |
| |
| public void setConnectTimeoutMs(Integer connectTimeoutMs) { |
| this.connectTimeoutMs = connectTimeoutMs; |
| } |
| |
| public Integer getMaxQueueSize() { |
| return maxQueueSize; |
| } |
| |
| public void setMaxQueueSize(Integer maxQueueSize) { |
| this.maxQueueSize = maxQueueSize; |
| } |
| |
| public String getDatabaseHistoryKafkaTopic() { |
| return databaseHistoryKafkaTopic; |
| } |
| |
| public void setDatabaseHistoryKafkaTopic( |
| String databaseHistoryKafkaTopic) { |
| this.databaseHistoryKafkaTopic = databaseHistoryKafkaTopic; |
| } |
| |
| public Long getSnapshotDelayMs() { |
| return snapshotDelayMs; |
| } |
| |
| public void setSnapshotDelayMs(Long snapshotDelayMs) { |
| this.snapshotDelayMs = snapshotDelayMs; |
| } |
| |
| public Integer getDatabaseHistoryKafkaRecoveryAttempts() { |
| return databaseHistoryKafkaRecoveryAttempts; |
| } |
| |
| public void setDatabaseHistoryKafkaRecoveryAttempts( |
| Integer databaseHistoryKafkaRecoveryAttempts) { |
| this.databaseHistoryKafkaRecoveryAttempts = databaseHistoryKafkaRecoveryAttempts; |
| } |
| |
| public String getTableWhitelist() { |
| return tableWhitelist; |
| } |
| |
| public void setTableWhitelist(String tableWhitelist) { |
| this.tableWhitelist = tableWhitelist; |
| } |
| |
| public Boolean getTombstonesOnDelete() { |
| return tombstonesOnDelete; |
| } |
| |
| public void setTombstonesOnDelete(Boolean tombstonesOnDelete) { |
| this.tombstonesOnDelete = tombstonesOnDelete; |
| } |
| |
| public String getDecimalHandlingMode() { |
| return decimalHandlingMode; |
| } |
| |
| public void setDecimalHandlingMode(String decimalHandlingMode) { |
| this.decimalHandlingMode = decimalHandlingMode; |
| } |
| |
| public String getSnapshotNewTables() { |
| return snapshotNewTables; |
| } |
| |
| public void setSnapshotNewTables(String snapshotNewTables) { |
| this.snapshotNewTables = snapshotNewTables; |
| } |
| |
| public Boolean getDatabaseHistorySkipUnparseableDdl() { |
| return databaseHistorySkipUnparseableDdl; |
| } |
| |
| public void setDatabaseHistorySkipUnparseableDdl( |
| Boolean databaseHistorySkipUnparseableDdl) { |
| this.databaseHistorySkipUnparseableDdl = databaseHistorySkipUnparseableDdl; |
| } |
| |
| public Boolean getTableIgnoreBuiltin() { |
| return tableIgnoreBuiltin; |
| } |
| |
| public void setTableIgnoreBuiltin(Boolean tableIgnoreBuiltin) { |
| this.tableIgnoreBuiltin = tableIgnoreBuiltin; |
| } |
| |
| public String getDatabaseWhitelist() { |
| return databaseWhitelist; |
| } |
| |
| public void setDatabaseWhitelist(String databaseWhitelist) { |
| this.databaseWhitelist = databaseWhitelist; |
| } |
| |
| public String getDatabaseHistoryFileFilename() { |
| return databaseHistoryFileFilename; |
| } |
| |
| public void setDatabaseHistoryFileFilename( |
| String databaseHistoryFileFilename) { |
| this.databaseHistoryFileFilename = databaseHistoryFileFilename; |
| } |
| |
| public String getBigintUnsignedHandlingMode() { |
| return bigintUnsignedHandlingMode; |
| } |
| |
| public void setBigintUnsignedHandlingMode( |
| String bigintUnsignedHandlingMode) { |
| this.bigintUnsignedHandlingMode = bigintUnsignedHandlingMode; |
| } |
| |
| public Long getDatabaseServerId() { |
| return databaseServerId; |
| } |
| |
| public void setDatabaseServerId(Long databaseServerId) { |
| this.databaseServerId = databaseServerId; |
| } |
| |
| public String getEventDeserializationFailureHandlingMode() { |
| return eventDeserializationFailureHandlingMode; |
| } |
| |
| public void setEventDeserializationFailureHandlingMode( |
| String eventDeserializationFailureHandlingMode) { |
| this.eventDeserializationFailureHandlingMode = eventDeserializationFailureHandlingMode; |
| } |
| |
| public String getTimePrecisionMode() { |
| return timePrecisionMode; |
| } |
| |
| public void setTimePrecisionMode(String timePrecisionMode) { |
| this.timePrecisionMode = timePrecisionMode; |
| } |
| |
| public String getDatabaseServerName() { |
| return databaseServerName; |
| } |
| |
| public void setDatabaseServerName(String databaseServerName) { |
| this.databaseServerName = databaseServerName; |
| } |
| |
| public Integer getDatabasePort() { |
| return databasePort; |
| } |
| |
| public void setDatabasePort(Integer databasePort) { |
| this.databasePort = databasePort; |
| } |
| |
| public String getDatabaseSslTruststore() { |
| return databaseSslTruststore; |
| } |
| |
| public void setDatabaseSslTruststore(String databaseSslTruststore) { |
| this.databaseSslTruststore = databaseSslTruststore; |
| } |
| |
| public String getDatabaseSslMode() { |
| return databaseSslMode; |
| } |
| |
| public void setDatabaseSslMode(String databaseSslMode) { |
| this.databaseSslMode = databaseSslMode; |
| } |
| |
| public String getDatabaseSslKeystorePassword() { |
| return databaseSslKeystorePassword; |
| } |
| |
| public void setDatabaseSslKeystorePassword( |
| String databaseSslKeystorePassword) { |
| this.databaseSslKeystorePassword = databaseSslKeystorePassword; |
| } |
| |
| public String getDatabaseHostname() { |
| return databaseHostname; |
| } |
| |
| public void setDatabaseHostname(String databaseHostname) { |
| this.databaseHostname = databaseHostname; |
| } |
| |
| public Long getDatabaseServerIdOffset() { |
| return databaseServerIdOffset; |
| } |
| |
| public void setDatabaseServerIdOffset(Long databaseServerIdOffset) { |
| this.databaseServerIdOffset = databaseServerIdOffset; |
| } |
| |
| public Long getConnectKeepAliveIntervalMs() { |
| return connectKeepAliveIntervalMs; |
| } |
| |
| public void setConnectKeepAliveIntervalMs( |
| Long connectKeepAliveIntervalMs) { |
| this.connectKeepAliveIntervalMs = connectKeepAliveIntervalMs; |
| } |
| |
| public Boolean getIncludeQuery() { |
| return includeQuery; |
| } |
| |
| public void setIncludeQuery(Boolean includeQuery) { |
| this.includeQuery = includeQuery; |
| } |
| |
| public Class getConnectorClass() { |
| return connectorClass; |
| } |
| |
| public void setConnectorClass(Class connectorClass) { |
| this.connectorClass = connectorClass; |
| } |
| |
| public String getName() { |
| return name; |
| } |
| |
| public void setName(String name) { |
| this.name = name; |
| } |
| |
| public String getOffsetStorage() { |
| return offsetStorage; |
| } |
| |
| public void setOffsetStorage(String offsetStorage) { |
| this.offsetStorage = offsetStorage; |
| } |
| |
| public String getOffsetStorageFileName() { |
| return offsetStorageFileName; |
| } |
| |
| public void setOffsetStorageFileName(String offsetStorageFileName) { |
| this.offsetStorageFileName = offsetStorageFileName; |
| } |
| |
| public String getOffsetStorageTopic() { |
| return offsetStorageTopic; |
| } |
| |
| public void setOffsetStorageTopic(String offsetStorageTopic) { |
| this.offsetStorageTopic = offsetStorageTopic; |
| } |
| |
| public Integer getOffsetStorageReplicationFactor() { |
| return offsetStorageReplicationFactor; |
| } |
| |
| public void setOffsetStorageReplicationFactor( |
| Integer offsetStorageReplicationFactor) { |
| this.offsetStorageReplicationFactor = offsetStorageReplicationFactor; |
| } |
| |
| public String getOffsetCommitPolicy() { |
| return offsetCommitPolicy; |
| } |
| |
| public void setOffsetCommitPolicy(String offsetCommitPolicy) { |
| this.offsetCommitPolicy = offsetCommitPolicy; |
| } |
| |
| public Long getOffsetFlushIntervalMs() { |
| return offsetFlushIntervalMs; |
| } |
| |
| public void setOffsetFlushIntervalMs(Long offsetFlushIntervalMs) { |
| this.offsetFlushIntervalMs = offsetFlushIntervalMs; |
| } |
| |
| public Long getOffsetCommitTimeoutMs() { |
| return offsetCommitTimeoutMs; |
| } |
| |
| public void setOffsetCommitTimeoutMs(Long offsetCommitTimeoutMs) { |
| this.offsetCommitTimeoutMs = offsetCommitTimeoutMs; |
| } |
| |
| public Integer getOffsetStoragePartitions() { |
| return offsetStoragePartitions; |
| } |
| |
| public void setOffsetStoragePartitions(Integer offsetStoragePartitions) { |
| this.offsetStoragePartitions = offsetStoragePartitions; |
| } |
| |
| public String getInternalKeyConverter() { |
| return internalKeyConverter; |
| } |
| |
| public void setInternalKeyConverter(String internalKeyConverter) { |
| this.internalKeyConverter = internalKeyConverter; |
| } |
| |
| public String getInternalValueConverter() { |
| return internalValueConverter; |
| } |
| |
| public void setInternalValueConverter(String internalValueConverter) { |
| this.internalValueConverter = internalValueConverter; |
| } |
| } |
| } |