| /* |
| * Licensed to the Apache Software Foundation (ASF) under one or more |
| * contributor license agreements. See the NOTICE file distributed with |
| * this work for additional information regarding copyright ownership. |
| * The ASF licenses this file to You under the Apache License, Version 2.0 |
| * (the "License"); you may not use this file except in compliance with |
| * the License. You may obtain a copy of the License at |
| * |
| * http://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| */ |
| package org.apache.camel.component.debezium.springboot; |
| |
| import java.util.Map; |
| import org.apache.camel.component.debezium.configuration.OracleConnectorEmbeddedDebeziumConfiguration; |
| import org.apache.camel.spring.boot.ComponentConfigurationPropertiesCommon; |
| import org.springframework.boot.context.properties.ConfigurationProperties; |
| |
| /** |
| * Capture changes from a Oracle database. |
| * |
| * Generated by camel-package-maven-plugin - do not edit this file! |
| */ |
| @ConfigurationProperties(prefix = "camel.component.debezium-oracle") |
| public class DebeziumOracleComponentConfiguration |
| extends |
| ComponentConfigurationPropertiesCommon { |
| |
| /** |
| * Whether to enable auto configuration of the debezium-oracle component. |
| * This is enabled by default. |
| */ |
| private Boolean enabled; |
| /** |
| * Additional properties for debezium components in case they can't be set |
| * directly on the camel configurations (e.g: setting Kafka Connect |
| * properties needed by Debezium engine, for example setting |
| * KafkaOffsetBackingStore), the properties have to be prefixed with |
| * additionalProperties.. E.g: |
| * additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http://localhost:8811/avro |
| */ |
| private Map<String, Object> additionalProperties; |
| /** |
| * Allows for bridging the consumer to the Camel routing Error Handler, |
| * which mean any exceptions (if possible) occurred while the Camel consumer |
| * is trying to pickup incoming messages, or the likes, will now be |
| * processed as a message and handled by the routing Error Handler. |
| * Important: This is only possible if the 3rd party component allows Camel |
| * to be alerted if an exception was thrown. Some components handle this |
| * internally only, and therefore bridgeErrorHandler is not possible. In |
| * other situations we may improve the Camel component to hook into the 3rd |
| * party component and make this possible for future releases. By default |
| * the consumer will use the org.apache.camel.spi.ExceptionHandler to deal |
| * with exceptions, that will be logged at WARN or ERROR level and ignored. |
| */ |
| private Boolean bridgeErrorHandler = false; |
| /** |
| * Allow pre-configured Configurations to be set. The option is a |
| * org.apache.camel.component.debezium.configuration.OracleConnectorEmbeddedDebeziumConfiguration type. |
| */ |
| private OracleConnectorEmbeddedDebeziumConfiguration configuration; |
| /** |
| * The Converter class that should be used to serialize and deserialize key |
| * data for offsets. The default is JSON converter. |
| */ |
| private String internalKeyConverter = "org.apache.kafka.connect.json.JsonConverter"; |
| /** |
| * The Converter class that should be used to serialize and deserialize |
| * value data for offsets. The default is JSON converter. |
| */ |
| private String internalValueConverter = "org.apache.kafka.connect.json.JsonConverter"; |
| /** |
| * The name of the Java class of the commit policy. It defines when offsets |
| * commit has to be triggered based on the number of events processed and |
| * the time elapsed since the last commit. This class must implement the |
| * interface 'OffsetCommitPolicy'. The default is a periodic commit policy |
| * based upon time intervals. |
| */ |
| private String offsetCommitPolicy; |
| /** |
| * Maximum number of milliseconds to wait for records to flush and partition |
| * offset data to be committed to offset storage before cancelling the |
| * process and restoring the offset data to be committed in a future |
| * attempt. The default is 5 seconds. The option is a long type. |
| */ |
| private Long offsetCommitTimeoutMs = 5000L; |
| /** |
| * Interval at which to try committing offsets. The default is 1 minute. The |
| * option is a long type. |
| */ |
| private Long offsetFlushIntervalMs = 60000L; |
| /** |
| * The name of the Java class that is responsible for persistence of |
| * connector offsets. |
| */ |
| private String offsetStorage = "org.apache.kafka.connect.storage.FileOffsetBackingStore"; |
| /** |
| * Path to file where offsets are to be stored. Required when offset.storage |
| * is set to the FileOffsetBackingStore. |
| */ |
| private String offsetStorageFileName; |
| /** |
| * The number of partitions used when creating the offset storage topic. |
| * Required when offset.storage is set to the 'KafkaOffsetBackingStore'. |
| */ |
| private Integer offsetStoragePartitions; |
| /** |
| * Replication factor used when creating the offset storage topic. Required |
| * when offset.storage is set to the KafkaOffsetBackingStore |
| */ |
| private Integer offsetStorageReplicationFactor; |
| /** |
| * The name of the Kafka topic where offsets are to be stored. Required when |
| * offset.storage is set to the KafkaOffsetBackingStore. |
| */ |
| private String offsetStorageTopic; |
| /** |
| * Whether autowiring is enabled. This is used for automatic autowiring |
| * options (the option must be marked as autowired) by looking up in the |
| * registry to find if there is a single instance of matching type, which |
| * then gets configured on the component. This can be used for automatic |
| * configuring JDBC data sources, JMS connection factories, AWS Clients, |
| * etc. |
| */ |
| private Boolean autowiredEnabled = true; |
| /** |
| * Specify how binary (blob, binary, etc.) columns should be represented in |
| * change events, including: 'bytes' represents binary data as byte array |
| * (default); 'base64' represents binary data as base64-encoded string; |
| * 'base64-url-safe' represents binary data as base64-url-safe-encoded |
| * string; 'hex' represents binary data as hex-encoded (base16) string |
| */ |
| private String binaryHandlingMode = "bytes"; |
| /** |
| * Regular expressions matching columns to exclude from change events |
| */ |
| private String columnExcludeList; |
| /** |
| * Regular expressions matching columns to include in change events |
| */ |
| private String columnIncludeList; |
| /** |
| * A comma-separated list of regular expressions matching fully-qualified |
| * names of columns that adds the columns original type and original length |
| * as parameters to the corresponding field schemas in the emitted change |
| * records. |
| */ |
| private String columnPropagateSourceType; |
| /** |
| * Optional list of custom converters that would be used instead of default |
| * ones. The converters are defined using '.type' config option and |
| * configured using options '.' |
| */ |
| private String converters; |
| /** |
| * The custom metric tags will accept key-value pairs to customize the MBean |
| * object name which should be appended the end of regular name, each key |
| * would represent a tag for the MBean object name, and the corresponding |
| * value would be the value of that tag the key is. For example: k1=v1,k2=v2 |
| */ |
| private String customMetricTags; |
| /** |
| * The adapter to use when capturing changes from the database. Options |
| * include: 'logminer': (the default) to capture changes using native Oracle |
| * LogMiner; 'xstream' to capture changes using Oracle XStreams |
| */ |
| private String databaseConnectionAdapter = "LogMiner"; |
| /** |
| * The name of the database from which the connector should capture changes |
| */ |
| private String databaseDbname; |
| /** |
| * Resolvable hostname or IP address of the database server. |
| */ |
| private String databaseHostname; |
| /** |
| * Name of the XStream Out server to connect to. |
| */ |
| private String databaseOutServerName; |
| /** |
| * Password of the database user to be used when connecting to the database. |
| */ |
| private String databasePassword; |
| /** |
| * Name of the pluggable database when working with a multi-tenant set-up. |
| * The CDB name must be given via database.dbname in this case. |
| */ |
| private String databasePdbName; |
| /** |
| * Port of the database server. |
| */ |
| private Integer databasePort = 1528; |
| /** |
| * Complete JDBC URL as an alternative to specifying hostname, port and |
| * database provided as a way to support alternative connection scenarios. |
| */ |
| private String databaseUrl; |
| /** |
| * Name of the database user to be used when connecting to the database. |
| */ |
| private String databaseUser; |
| /** |
| * A comma-separated list of regular expressions matching the |
| * database-specific data type names that adds the data type's original type |
| * and original length as parameters to the corresponding field schemas in |
| * the emitted change records. |
| */ |
| private String datatypePropagateSourceType; |
| /** |
| * Specify how DECIMAL and NUMERIC columns should be represented in change |
| * events, including: 'precise' (the default) uses java.math.BigDecimal to |
| * represent values, which are encoded in the change events using a binary |
| * representation and Kafka Connect's |
| * 'org.apache.kafka.connect.data.Decimal' type; 'string' uses string to |
| * represent values; 'double' represents values using Java's 'double', which |
| * may not offer the precision but will be far easier to use in consumers. |
| */ |
| private String decimalHandlingMode = "precise"; |
| /** |
| * The maximum number of retries on connection errors before failing (-1 = |
| * no limit, 0 = disabled, 0 = num of retries). |
| */ |
| private Integer errorsMaxRetries = -1; |
| /** |
| * Specify how failures during processing of events (i.e. when encountering |
| * a corrupted event) should be handled, including: 'fail' (the default) an |
| * exception indicating the problematic event and its position is raised, |
| * causing the connector to be stopped; 'warn' the problematic event and its |
| * position will be logged and the event will be skipped; 'ignore' the |
| * problematic event will be skipped. |
| */ |
| private String eventProcessingFailureHandlingMode = "fail"; |
| /** |
| * The query executed with every heartbeat. |
| */ |
| private String heartbeatActionQuery; |
| /** |
| * Length of an interval in milli-seconds in in which the connector |
| * periodically sends heartbeat messages to a heartbeat topic. Use 0 to |
| * disable heartbeat messages. Disabled by default. The option is a int |
| * type. |
| */ |
| private Integer heartbeatIntervalMs = 0; |
| /** |
| * The prefix that is used to name heartbeat topics.Defaults to |
| * __debezium-heartbeat. |
| */ |
| private String heartbeatTopicsPrefix = "__debezium-heartbeat"; |
| /** |
| * Whether the connector should publish changes in the database schema to a |
| * Kafka topic with the same name as the database server ID. Each schema |
| * change will be recorded using a key that contains the database name and |
| * whose value include logical description of the new schema and optionally |
| * the DDL statement(s). The default is 'true'. This is independent of how |
| * the connector internally records database schema history. |
| */ |
| private Boolean includeSchemaChanges = true; |
| /** |
| * Whether the connector parse table and column's comment to metadata |
| * object. Note: Enable this option will bring the implications on memory |
| * usage. The number and size of ColumnImpl objects is what largely impacts |
| * how much memory is consumed by the Debezium connectors, and adding a |
| * String to each of them can potentially be quite heavy. The default is |
| * 'false'. |
| */ |
| private Boolean includeSchemaComments = false; |
| /** |
| * Specify the strategy used for watermarking during an incremental |
| * snapshot: 'insert_insert' both open and close signal is written into |
| * signal data collection (default); 'insert_delete' only open signal is |
| * written on signal data collection, the close will delete the relative |
| * open signal; |
| */ |
| private String incrementalSnapshotWatermarkingStrategy = "INSERT_INSERT"; |
| /** |
| * Specify how INTERVAL columns should be represented in change events, |
| * including: 'string' represents values as an exact ISO formatted string; |
| * 'numeric' (default) represents values using the inexact conversion into |
| * microseconds |
| */ |
| private String intervalHandlingMode = "numeric"; |
| /** |
| * When set to 'false', the default, LOB fields will not be captured nor |
| * emitted. When set to 'true', the connector will capture LOB fields and |
| * emit changes for those fields like any other column type. |
| */ |
| private Boolean lobEnabled = false; |
| /** |
| * Sets the specific archive log destination as the source for reading |
| * archive logs.When not set, the connector will automatically select the |
| * first LOCAL and VALID destination. |
| */ |
| private String logMiningArchiveDestinationName; |
| /** |
| * The number of hours in the past from SYSDATE to mine archive logs. Using |
| * 0 mines all available archive logs |
| */ |
| private Long logMiningArchiveLogHours = 0L; |
| /** |
| * When set to 'false', the default, the connector will mine both archive |
| * log and redo logs to emit change events. When set to 'true', the |
| * connector will only mine archive logs. There are circumstances where its |
| * advantageous to only mine archive logs and accept latency in event |
| * emission due to frequent revolving redo logs. |
| */ |
| private Boolean logMiningArchiveLogOnlyMode = false; |
| /** |
| * The interval in milliseconds to wait between polls checking to see if the |
| * SCN is in the archive logs. The option is a long type. |
| */ |
| private Long logMiningArchiveLogOnlyScnPollIntervalMs = 10000L; |
| /** |
| * The starting SCN interval size that the connector will use for reading |
| * data from redo/archive logs. |
| */ |
| private Long logMiningBatchSizeDefault = 20000L; |
| /** |
| * The maximum SCN interval size that this connector will use when reading |
| * from redo/archive logs. |
| */ |
| private Long logMiningBatchSizeMax = 100000L; |
| /** |
| * The minimum SCN interval size that this connector will try to read from |
| * redo/archive logs. Active batch size will be also increased/decreased by |
| * this amount for tuning connector throughput when needed. |
| */ |
| private Long logMiningBatchSizeMin = 1000L; |
| /** |
| * When set to true the underlying buffer cache is not retained when the |
| * connector is stopped. When set to false (the default), the buffer cache |
| * is retained across restarts. |
| */ |
| private Boolean logMiningBufferDropOnStop = false; |
| /** |
| * Specifies the XML configuration for the Infinispan 'events' cache |
| */ |
| private String logMiningBufferInfinispanCacheEvents; |
| /** |
| * Specifies the XML configuration for the Infinispan 'global' configuration |
| */ |
| private String logMiningBufferInfinispanCacheGlobal; |
| /** |
| * Specifies the XML configuration for the Infinispan |
| * 'processed-transactions' cache |
| */ |
| private String logMiningBufferInfinispanCacheProcessedTransactions; |
| /** |
| * Specifies the XML configuration for the Infinispan 'schema-changes' cache |
| */ |
| private String logMiningBufferInfinispanCacheSchemaChanges; |
| /** |
| * Specifies the XML configuration for the Infinispan 'transactions' cache |
| */ |
| private String logMiningBufferInfinispanCacheTransactions; |
| /** |
| * The number of events a transaction can include before the transaction is |
| * discarded. This is useful for managing buffer memory and/or space when |
| * dealing with very large transactions. Defaults to 0, meaning that no |
| * threshold is applied and transactions can have unlimited events. |
| */ |
| private Long logMiningBufferTransactionEventsThreshold = 0L; |
| /** |
| * The buffer type controls how the connector manages buffering transaction |
| * data. memory - Uses the JVM process' heap to buffer all transaction data. |
| * infinispan_embedded - This option uses an embedded Infinispan cache to |
| * buffer transaction data and persist it to disk. infinispan_remote - This |
| * option uses a remote Infinispan cluster to buffer transaction data and |
| * persist it to disk. |
| */ |
| private String logMiningBufferType = "memory"; |
| /** |
| * The name of the flush table used by the connector, defaults to |
| * LOG_MINING_FLUSH. |
| */ |
| private String logMiningFlushTableName = "LOG_MINING_FLUSH"; |
| /** |
| * Specifies how the filter configuration is applied to the LogMiner |
| * database query. none - The query does not apply any schema or table |
| * filters, all filtering is at runtime by the connector. in - The query |
| * uses SQL in-clause expressions to specify the schema or table filters. |
| * regex - The query uses Oracle REGEXP_LIKE expressions to specify the |
| * schema or table filters. |
| */ |
| private String logMiningQueryFilterMode = "none"; |
| /** |
| * Debezium opens a database connection and keeps that connection open |
| * throughout the entire streaming phase. In some situations, this can lead |
| * to excessive SGA memory usage. By setting this option to 'true' (the |
| * default is 'false'), the connector will close and re-open a database |
| * connection after every detected log switch or if the |
| * log.mining.session.max.ms has been reached. |
| */ |
| private Boolean logMiningRestartConnection = false; |
| /** |
| * Used for SCN gap detection, if the difference between current SCN and |
| * previous end SCN is bigger than this value, and the time difference of |
| * current SCN and previous end SCN is smaller than |
| * log.mining.scn.gap.detection.time.interval.max.ms, consider it a SCN gap. |
| */ |
| private Long logMiningScnGapDetectionGapSizeMin = 1000000L; |
| /** |
| * Used for SCN gap detection, if the difference between current SCN and |
| * previous end SCN is bigger than |
| * log.mining.scn.gap.detection.gap.size.min, and the time difference of |
| * current SCN and previous end SCN is smaller than this value, consider it |
| * a SCN gap. The option is a long type. |
| */ |
| private Long logMiningScnGapDetectionTimeIntervalMaxMs = 20000L; |
| /** |
| * The maximum number of milliseconds that a LogMiner session lives for |
| * before being restarted. Defaults to 0 (indefinite until a log switch |
| * occurs). The option is a long type. |
| */ |
| private Long logMiningSessionMaxMs = 0L; |
| /** |
| * The amount of time that the connector will sleep after reading data from |
| * redo/archive logs and before starting reading data again. Value is in |
| * milliseconds. The option is a long type. |
| */ |
| private Long logMiningSleepTimeDefaultMs = 1000L; |
| /** |
| * The maximum amount of time that the connector will use to tune the |
| * optimal sleep time when reading data from LogMiner. Value is in |
| * milliseconds. The option is a long type. |
| */ |
| private Long logMiningSleepTimeIncrementMs = 200L; |
| /** |
| * The maximum amount of time that the connector will sleep after reading |
| * data from redo/archive logs and before starting reading data again. Value |
| * is in milliseconds. The option is a long type. |
| */ |
| private Long logMiningSleepTimeMaxMs = 3000L; |
| /** |
| * The minimum amount of time that the connector will sleep after reading |
| * data from redo/archive logs and before starting reading data again. Value |
| * is in milliseconds. The option is a long type. |
| */ |
| private Long logMiningSleepTimeMinMs = 0L; |
| /** |
| * There are strategies: Online catalog with faster mining but no captured |
| * DDL. Another - with data dictionary loaded into REDO LOG files |
| */ |
| private String logMiningStrategy = "redo_log_catalog"; |
| /** |
| * Duration in milliseconds to keep long running transactions in transaction |
| * buffer between log mining sessions. By default, all transactions are |
| * retained. The option is a long type. |
| */ |
| private Long logMiningTransactionRetentionMs = 0L; |
| /** |
| * Comma separated list of usernames to exclude from LogMiner query. |
| */ |
| private String logMiningUsernameExcludeList; |
| /** |
| * Comma separated list of usernames to include from LogMiner query. |
| */ |
| private String logMiningUsernameIncludeList; |
| /** |
| * Maximum size of each batch of source records. Defaults to 2048. |
| */ |
| private Integer maxBatchSize = 2048; |
| /** |
| * Maximum size of the queue for change events read from the database log |
| * but not yet recorded or forwarded. Defaults to 8192, and should always be |
| * larger than the maximum batch size. |
| */ |
| private Integer maxQueueSize = 8192; |
| /** |
| * Maximum size of the queue in bytes for change events read from the |
| * database log but not yet recorded or forwarded. Defaults to 0. Mean the |
| * feature is not enabled |
| */ |
| private Long maxQueueSizeInBytes = 0L; |
| /** |
| * A semicolon-separated list of expressions that match fully-qualified |
| * tables and column(s) to be used as message key. Each expression must |
| * match the pattern ':', where the table names could be defined as |
| * (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on the |
| * specific connector, and the key columns are a comma-separated list of |
| * columns representing the custom key. For any table without an explicit |
| * key configuration the table's primary key column(s) will be used as |
| * message key. Example: |
| * dbserver1.inventory.orderlines:orderId,orderLineId;dbserver1.inventory.orders:id |
| */ |
| private String messageKeyColumns; |
| /** |
| * List of notification channels names that are enabled. |
| */ |
| private String notificationEnabledChannels; |
| /** |
| * The name of the topic for the notifications. This is required in case |
| * 'sink' is in the list of enabled channels |
| */ |
| private String notificationSinkTopicName; |
| /** |
| * The hostname of the OpenLogReplicator network service |
| */ |
| private String openlogreplicatorHost; |
| /** |
| * The port of the OpenLogReplicator network service |
| */ |
| private Integer openlogreplicatorPort; |
| /** |
| * The configured logical source name in the OpenLogReplicator configuration |
| * that is to stream changes |
| */ |
| private String openlogreplicatorSource; |
| /** |
| * Time to wait for new change events to appear after receiving no events, |
| * given in milliseconds. Defaults to 500 ms. The option is a long type. |
| */ |
| private Long pollIntervalMs = 500L; |
| /** |
| * Optional list of post processors. The processors are defined using |
| * '.type' config option and configured using options '' |
| */ |
| private String postProcessors; |
| /** |
| * Enables transaction metadata extraction together with event counting |
| */ |
| private Boolean provideTransactionMetadata = false; |
| /** |
| * The maximum number of records that should be loaded into memory while |
| * streaming. A value of '0' uses the default JDBC fetch size, defaults to |
| * '2000'. |
| */ |
| private Integer queryFetchSize = 10000; |
| /** |
| * A comma-separated list of RAC node hostnames or ip addresses |
| */ |
| private String racNodes; |
| /** |
| * Time to wait before restarting connector after retriable exception |
| * occurs. Defaults to 10000ms. The option is a long type. |
| */ |
| private Long retriableRestartConnectorWaitMs = 10000L; |
| /** |
| * The name of the SchemaHistory class that should be used to store and |
| * recover database schema changes. The configuration properties for the |
| * history are prefixed with the 'schema.history.internal.' string. |
| */ |
| private String schemaHistoryInternal = "io.debezium.storage.kafka.history.KafkaSchemaHistory"; |
| /** |
| * The path to the file that will be used to record the database schema |
| * history |
| */ |
| private String schemaHistoryInternalFileFilename; |
| /** |
| * Controls the action Debezium will take when it meets a DDL statement in |
| * binlog, that it cannot parse.By default the connector will stop operating |
| * but by changing the setting it can ignore the statements which it cannot |
| * parse. If skipping is enabled then Debezium can miss metadata changes. |
| */ |
| private Boolean schemaHistoryInternalSkipUnparseableDdl = false; |
| /** |
| * Controls what DDL will Debezium store in database schema history. By |
| * default (true) only DDL that manipulates a table from captured |
| * schema/database will be stored. If set to false, then Debezium will store |
| * all incoming DDL statements. |
| */ |
| private Boolean schemaHistoryInternalStoreOnlyCapturedDatabasesDdl = false; |
| /** |
| * Controls what DDL will Debezium store in database schema history. By |
| * default (false) Debezium will store all incoming DDL statements. If set |
| * to true, then only DDL that manipulates a captured table will be stored. |
| */ |
| private Boolean schemaHistoryInternalStoreOnlyCapturedTablesDdl = false; |
| /** |
| * Specify how schema names should be adjusted for compatibility with the |
| * message converter used by the connector, including: 'avro' replaces the |
| * characters that cannot be used in the Avro type name with underscore; |
| * 'avro_unicode' replaces the underscore or characters that cannot be used |
| * in the Avro type name with corresponding unicode like _uxxxx. Note: _ is |
| * an escape sequence like backslash in Java;'none' does not apply any |
| * adjustment (default) |
| */ |
| private String schemaNameAdjustmentMode = "none"; |
| /** |
| * The name of the data collection that is used to send signals/commands to |
| * Debezium. Signaling is disabled when not set. |
| */ |
| private String signalDataCollection; |
| /** |
| * List of channels names that are enabled. Source channel is enabled by |
| * default |
| */ |
| private String signalEnabledChannels = "source"; |
| /** |
| * Interval for looking for new signals in registered channels, given in |
| * milliseconds. Defaults to 5 seconds. The option is a long type. |
| */ |
| private Long signalPollIntervalMs = 5000L; |
| /** |
| * The comma-separated list of operations to skip during streaming, defined |
| * as: 'c' for inserts/create; 'u' for updates; 'd' for deletes, 't' for |
| * truncates, and 'none' to indicate nothing skipped. By default, only |
| * truncate operations will be skipped. |
| */ |
| private String skippedOperations = "t"; |
| /** |
| * A delay period before a snapshot will begin, given in milliseconds. |
| * Defaults to 0 ms. The option is a long type. |
| */ |
| private Long snapshotDelayMs = 0L; |
| /** |
| * A token to replace on snapshot predicate template |
| */ |
| private String snapshotEnhancePredicateScn; |
| /** |
| * The maximum number of records that should be loaded into memory while |
| * performing a snapshot. |
| */ |
| private Integer snapshotFetchSize; |
| /** |
| * This setting must be set to specify a list of tables/collections whose |
| * snapshot must be taken on creating or restarting the connector. |
| */ |
| private String snapshotIncludeCollectionList; |
| /** |
| * Controls how the connector holds locks on tables while performing the |
| * schema snapshot. The default is 'shared', which means the connector will |
| * hold a table lock that prevents exclusive table access for just the |
| * initial portion of the snapshot while the database schemas and other |
| * metadata are being read. The remaining work in a snapshot involves |
| * selecting all rows from each table, and this is done using a flashback |
| * query that requires no locks. However, in some cases it may be desirable |
| * to avoid locks entirely which can be done by specifying 'none'. This mode |
| * is only safe to use if no schema changes are happening while the snapshot |
| * is taken. |
| */ |
| private String snapshotLockingMode = "shared"; |
| /** |
| * The maximum number of millis to wait for table locks at the beginning of |
| * a snapshot. If locks cannot be acquired in this time frame, the snapshot |
| * will be aborted. Defaults to 10 seconds. The option is a long type. |
| */ |
| private Long snapshotLockTimeoutMs = 10000L; |
| /** |
| * The maximum number of threads used to perform the snapshot. Defaults to |
| * 1. |
| */ |
| private Integer snapshotMaxThreads = 1; |
| /** |
| * The criteria for running a snapshot upon startup of the connector. Select |
| * one of the following snapshot options: 'always': The connector runs a |
| * snapshot every time that it starts. After the snapshot completes, the |
| * connector begins to stream changes from the redo logs.; 'initial' |
| * (default): If the connector does not detect any offsets for the logical |
| * server name, it runs a snapshot that captures the current full state of |
| * the configured tables. After the snapshot completes, the connector begins |
| * to stream changes from the redo logs. 'initial_only': The connector |
| * performs a snapshot as it does for the 'initial' option, but after the |
| * connector completes the snapshot, it stops, and does not stream changes |
| * from the redo logs.; 'schema_only': If the connector does not detect any |
| * offsets for the logical server name, it runs a snapshot that captures |
| * only the schema (table structures), but not any table data. After the |
| * snapshot completes, the connector begins to stream changes from the redo |
| * logs.; 'schema_only_recovery': The connector performs a snapshot that |
| * captures only the database schema history. The connector then transitions |
| * to streaming from the redo logs. Use this setting to restore a corrupted |
| * or lost database schema history topic. Do not use if the database schema |
| * was modified after the connector stopped. |
| */ |
| private String snapshotMode = "initial"; |
| /** |
| * This property contains a comma-separated list of fully-qualified tables |
| * (DB_NAME.TABLE_NAME) or (SCHEMA_NAME.TABLE_NAME), depending on the |
| * specific connectors. Select statements for the individual tables are |
| * specified in further configuration properties, one for each table, |
| * identified by the id |
| * 'snapshot.select.statement.overrides.DB_NAME.TABLE_NAME' or |
| * 'snapshot.select.statement.overrides.SCHEMA_NAME.TABLE_NAME', |
| * respectively. The value of those properties is the select statement to |
| * use when retrieving data from the specific table during snapshotting. A |
| * possible use case for large append-only tables is setting a specific |
| * point where to start (resume) snapshotting, in case a previous |
| * snapshotting was interrupted. |
| */ |
| private String snapshotSelectStatementOverrides; |
| /** |
| * Controls the order in which tables are processed in the initial snapshot. |
| * A descending value will order the tables by row count descending. A |
| * ascending value will order the tables by row count ascending. A value of |
| * disabled (the default) will disable ordering by row count. |
| */ |
| private String snapshotTablesOrderByRowCount = "disabled"; |
| /** |
| * The name of the SourceInfoStructMaker class that returns SourceInfo |
| * schema and struct. |
| */ |
| private String sourceinfoStructMaker = "io.debezium.connector.oracle.OracleSourceInfoStructMaker"; |
| /** |
| * A comma-separated list of regular expressions that match the |
| * fully-qualified names of tables to be excluded from monitoring |
| */ |
| private String tableExcludeList; |
| /** |
| * The tables for which changes are to be captured |
| */ |
| private String tableIncludeList; |
| /** |
| * Time, date, and timestamps can be represented with different kinds of |
| * precisions, including: 'adaptive' (the default) bases the precision of |
| * time, date, and timestamp values on the database column's precision; |
| * 'adaptive_time_microseconds' like 'adaptive' mode, but TIME fields always |
| * use microseconds precision; 'connect' always represents time, date, and |
| * timestamp values using Kafka Connect's built-in representations for Time, |
| * Date, and Timestamp, which uses millisecond precision regardless of the |
| * database columns' precision. |
| */ |
| private String timePrecisionMode = "adaptive"; |
| /** |
| * Whether delete operations should be represented by a delete event and a |
| * subsequent tombstone event (true) or only by a delete event (false). |
| * Emitting the tombstone event (the default behavior) allows Kafka to |
| * completely delete all events pertaining to the given key once the source |
| * record got deleted. |
| */ |
| private Boolean tombstonesOnDelete = false; |
| /** |
| * The name of the TopicNamingStrategy class that should be used to |
| * determine the topic name for data change, schema change, transaction, |
| * heartbeat event etc. |
| */ |
| private String topicNamingStrategy = "io.debezium.schema.SchemaTopicNamingStrategy"; |
| /** |
| * Topic prefix that identifies and provides a namespace for the particular |
| * database server/cluster is capturing changes. The topic prefix should be |
| * unique across all other connectors, since it is used as a prefix for all |
| * Kafka topic names that receive events emitted by this connector. Only |
| * alphanumeric characters, hyphens, dots and underscores must be accepted. |
| */ |
| private String topicPrefix; |
| /** |
| * Specify the constant that will be provided by Debezium to indicate that |
| * the original value is unavailable and not provided by the database. |
| */ |
| private String unavailableValuePlaceholder = "__debezium_unavailable_value"; |
| |
| public Map<String, Object> getAdditionalProperties() { |
| return additionalProperties; |
| } |
| |
| public void setAdditionalProperties(Map<String, Object> additionalProperties) { |
| this.additionalProperties = additionalProperties; |
| } |
| |
| public Boolean getBridgeErrorHandler() { |
| return bridgeErrorHandler; |
| } |
| |
| public void setBridgeErrorHandler(Boolean bridgeErrorHandler) { |
| this.bridgeErrorHandler = bridgeErrorHandler; |
| } |
| |
| public OracleConnectorEmbeddedDebeziumConfiguration getConfiguration() { |
| return configuration; |
| } |
| |
| public void setConfiguration( |
| OracleConnectorEmbeddedDebeziumConfiguration configuration) { |
| this.configuration = configuration; |
| } |
| |
| public String getInternalKeyConverter() { |
| return internalKeyConverter; |
| } |
| |
| public void setInternalKeyConverter(String internalKeyConverter) { |
| this.internalKeyConverter = internalKeyConverter; |
| } |
| |
| public String getInternalValueConverter() { |
| return internalValueConverter; |
| } |
| |
| public void setInternalValueConverter(String internalValueConverter) { |
| this.internalValueConverter = internalValueConverter; |
| } |
| |
| public String getOffsetCommitPolicy() { |
| return offsetCommitPolicy; |
| } |
| |
| public void setOffsetCommitPolicy(String offsetCommitPolicy) { |
| this.offsetCommitPolicy = offsetCommitPolicy; |
| } |
| |
| public Long getOffsetCommitTimeoutMs() { |
| return offsetCommitTimeoutMs; |
| } |
| |
| public void setOffsetCommitTimeoutMs(Long offsetCommitTimeoutMs) { |
| this.offsetCommitTimeoutMs = offsetCommitTimeoutMs; |
| } |
| |
| public Long getOffsetFlushIntervalMs() { |
| return offsetFlushIntervalMs; |
| } |
| |
| public void setOffsetFlushIntervalMs(Long offsetFlushIntervalMs) { |
| this.offsetFlushIntervalMs = offsetFlushIntervalMs; |
| } |
| |
| public String getOffsetStorage() { |
| return offsetStorage; |
| } |
| |
| public void setOffsetStorage(String offsetStorage) { |
| this.offsetStorage = offsetStorage; |
| } |
| |
| public String getOffsetStorageFileName() { |
| return offsetStorageFileName; |
| } |
| |
| public void setOffsetStorageFileName(String offsetStorageFileName) { |
| this.offsetStorageFileName = offsetStorageFileName; |
| } |
| |
| public Integer getOffsetStoragePartitions() { |
| return offsetStoragePartitions; |
| } |
| |
| public void setOffsetStoragePartitions(Integer offsetStoragePartitions) { |
| this.offsetStoragePartitions = offsetStoragePartitions; |
| } |
| |
| public Integer getOffsetStorageReplicationFactor() { |
| return offsetStorageReplicationFactor; |
| } |
| |
| public void setOffsetStorageReplicationFactor( |
| Integer offsetStorageReplicationFactor) { |
| this.offsetStorageReplicationFactor = offsetStorageReplicationFactor; |
| } |
| |
| public String getOffsetStorageTopic() { |
| return offsetStorageTopic; |
| } |
| |
| public void setOffsetStorageTopic(String offsetStorageTopic) { |
| this.offsetStorageTopic = offsetStorageTopic; |
| } |
| |
| public Boolean getAutowiredEnabled() { |
| return autowiredEnabled; |
| } |
| |
| public void setAutowiredEnabled(Boolean autowiredEnabled) { |
| this.autowiredEnabled = autowiredEnabled; |
| } |
| |
| public String getBinaryHandlingMode() { |
| return binaryHandlingMode; |
| } |
| |
| public void setBinaryHandlingMode(String binaryHandlingMode) { |
| this.binaryHandlingMode = binaryHandlingMode; |
| } |
| |
| public String getColumnExcludeList() { |
| return columnExcludeList; |
| } |
| |
| public void setColumnExcludeList(String columnExcludeList) { |
| this.columnExcludeList = columnExcludeList; |
| } |
| |
| public String getColumnIncludeList() { |
| return columnIncludeList; |
| } |
| |
| public void setColumnIncludeList(String columnIncludeList) { |
| this.columnIncludeList = columnIncludeList; |
| } |
| |
| public String getColumnPropagateSourceType() { |
| return columnPropagateSourceType; |
| } |
| |
| public void setColumnPropagateSourceType(String columnPropagateSourceType) { |
| this.columnPropagateSourceType = columnPropagateSourceType; |
| } |
| |
| public String getConverters() { |
| return converters; |
| } |
| |
| public void setConverters(String converters) { |
| this.converters = converters; |
| } |
| |
| public String getCustomMetricTags() { |
| return customMetricTags; |
| } |
| |
| public void setCustomMetricTags(String customMetricTags) { |
| this.customMetricTags = customMetricTags; |
| } |
| |
| public String getDatabaseConnectionAdapter() { |
| return databaseConnectionAdapter; |
| } |
| |
| public void setDatabaseConnectionAdapter(String databaseConnectionAdapter) { |
| this.databaseConnectionAdapter = databaseConnectionAdapter; |
| } |
| |
| public String getDatabaseDbname() { |
| return databaseDbname; |
| } |
| |
| public void setDatabaseDbname(String databaseDbname) { |
| this.databaseDbname = databaseDbname; |
| } |
| |
| public String getDatabaseHostname() { |
| return databaseHostname; |
| } |
| |
| public void setDatabaseHostname(String databaseHostname) { |
| this.databaseHostname = databaseHostname; |
| } |
| |
| public String getDatabaseOutServerName() { |
| return databaseOutServerName; |
| } |
| |
| public void setDatabaseOutServerName(String databaseOutServerName) { |
| this.databaseOutServerName = databaseOutServerName; |
| } |
| |
| public String getDatabasePassword() { |
| return databasePassword; |
| } |
| |
| public void setDatabasePassword(String databasePassword) { |
| this.databasePassword = databasePassword; |
| } |
| |
| public String getDatabasePdbName() { |
| return databasePdbName; |
| } |
| |
| public void setDatabasePdbName(String databasePdbName) { |
| this.databasePdbName = databasePdbName; |
| } |
| |
| public Integer getDatabasePort() { |
| return databasePort; |
| } |
| |
| public void setDatabasePort(Integer databasePort) { |
| this.databasePort = databasePort; |
| } |
| |
| public String getDatabaseUrl() { |
| return databaseUrl; |
| } |
| |
| public void setDatabaseUrl(String databaseUrl) { |
| this.databaseUrl = databaseUrl; |
| } |
| |
| public String getDatabaseUser() { |
| return databaseUser; |
| } |
| |
| public void setDatabaseUser(String databaseUser) { |
| this.databaseUser = databaseUser; |
| } |
| |
| public String getDatatypePropagateSourceType() { |
| return datatypePropagateSourceType; |
| } |
| |
| public void setDatatypePropagateSourceType( |
| String datatypePropagateSourceType) { |
| this.datatypePropagateSourceType = datatypePropagateSourceType; |
| } |
| |
| public String getDecimalHandlingMode() { |
| return decimalHandlingMode; |
| } |
| |
| public void setDecimalHandlingMode(String decimalHandlingMode) { |
| this.decimalHandlingMode = decimalHandlingMode; |
| } |
| |
| public Integer getErrorsMaxRetries() { |
| return errorsMaxRetries; |
| } |
| |
| public void setErrorsMaxRetries(Integer errorsMaxRetries) { |
| this.errorsMaxRetries = errorsMaxRetries; |
| } |
| |
| public String getEventProcessingFailureHandlingMode() { |
| return eventProcessingFailureHandlingMode; |
| } |
| |
| public void setEventProcessingFailureHandlingMode( |
| String eventProcessingFailureHandlingMode) { |
| this.eventProcessingFailureHandlingMode = eventProcessingFailureHandlingMode; |
| } |
| |
| public String getHeartbeatActionQuery() { |
| return heartbeatActionQuery; |
| } |
| |
| public void setHeartbeatActionQuery(String heartbeatActionQuery) { |
| this.heartbeatActionQuery = heartbeatActionQuery; |
| } |
| |
| public Integer getHeartbeatIntervalMs() { |
| return heartbeatIntervalMs; |
| } |
| |
| public void setHeartbeatIntervalMs(Integer heartbeatIntervalMs) { |
| this.heartbeatIntervalMs = heartbeatIntervalMs; |
| } |
| |
| public String getHeartbeatTopicsPrefix() { |
| return heartbeatTopicsPrefix; |
| } |
| |
| public void setHeartbeatTopicsPrefix(String heartbeatTopicsPrefix) { |
| this.heartbeatTopicsPrefix = heartbeatTopicsPrefix; |
| } |
| |
| public Boolean getIncludeSchemaChanges() { |
| return includeSchemaChanges; |
| } |
| |
| public void setIncludeSchemaChanges(Boolean includeSchemaChanges) { |
| this.includeSchemaChanges = includeSchemaChanges; |
| } |
| |
| public Boolean getIncludeSchemaComments() { |
| return includeSchemaComments; |
| } |
| |
| public void setIncludeSchemaComments(Boolean includeSchemaComments) { |
| this.includeSchemaComments = includeSchemaComments; |
| } |
| |
| public String getIncrementalSnapshotWatermarkingStrategy() { |
| return incrementalSnapshotWatermarkingStrategy; |
| } |
| |
| public void setIncrementalSnapshotWatermarkingStrategy( |
| String incrementalSnapshotWatermarkingStrategy) { |
| this.incrementalSnapshotWatermarkingStrategy = incrementalSnapshotWatermarkingStrategy; |
| } |
| |
| public String getIntervalHandlingMode() { |
| return intervalHandlingMode; |
| } |
| |
| public void setIntervalHandlingMode(String intervalHandlingMode) { |
| this.intervalHandlingMode = intervalHandlingMode; |
| } |
| |
| public Boolean getLobEnabled() { |
| return lobEnabled; |
| } |
| |
| public void setLobEnabled(Boolean lobEnabled) { |
| this.lobEnabled = lobEnabled; |
| } |
| |
| public String getLogMiningArchiveDestinationName() { |
| return logMiningArchiveDestinationName; |
| } |
| |
| public void setLogMiningArchiveDestinationName( |
| String logMiningArchiveDestinationName) { |
| this.logMiningArchiveDestinationName = logMiningArchiveDestinationName; |
| } |
| |
| public Long getLogMiningArchiveLogHours() { |
| return logMiningArchiveLogHours; |
| } |
| |
| public void setLogMiningArchiveLogHours(Long logMiningArchiveLogHours) { |
| this.logMiningArchiveLogHours = logMiningArchiveLogHours; |
| } |
| |
| public Boolean getLogMiningArchiveLogOnlyMode() { |
| return logMiningArchiveLogOnlyMode; |
| } |
| |
| public void setLogMiningArchiveLogOnlyMode( |
| Boolean logMiningArchiveLogOnlyMode) { |
| this.logMiningArchiveLogOnlyMode = logMiningArchiveLogOnlyMode; |
| } |
| |
| public Long getLogMiningArchiveLogOnlyScnPollIntervalMs() { |
| return logMiningArchiveLogOnlyScnPollIntervalMs; |
| } |
| |
| public void setLogMiningArchiveLogOnlyScnPollIntervalMs( |
| Long logMiningArchiveLogOnlyScnPollIntervalMs) { |
| this.logMiningArchiveLogOnlyScnPollIntervalMs = logMiningArchiveLogOnlyScnPollIntervalMs; |
| } |
| |
| public Long getLogMiningBatchSizeDefault() { |
| return logMiningBatchSizeDefault; |
| } |
| |
| public void setLogMiningBatchSizeDefault(Long logMiningBatchSizeDefault) { |
| this.logMiningBatchSizeDefault = logMiningBatchSizeDefault; |
| } |
| |
| public Long getLogMiningBatchSizeMax() { |
| return logMiningBatchSizeMax; |
| } |
| |
| public void setLogMiningBatchSizeMax(Long logMiningBatchSizeMax) { |
| this.logMiningBatchSizeMax = logMiningBatchSizeMax; |
| } |
| |
| public Long getLogMiningBatchSizeMin() { |
| return logMiningBatchSizeMin; |
| } |
| |
| public void setLogMiningBatchSizeMin(Long logMiningBatchSizeMin) { |
| this.logMiningBatchSizeMin = logMiningBatchSizeMin; |
| } |
| |
| public Boolean getLogMiningBufferDropOnStop() { |
| return logMiningBufferDropOnStop; |
| } |
| |
| public void setLogMiningBufferDropOnStop(Boolean logMiningBufferDropOnStop) { |
| this.logMiningBufferDropOnStop = logMiningBufferDropOnStop; |
| } |
| |
| public String getLogMiningBufferInfinispanCacheEvents() { |
| return logMiningBufferInfinispanCacheEvents; |
| } |
| |
| public void setLogMiningBufferInfinispanCacheEvents( |
| String logMiningBufferInfinispanCacheEvents) { |
| this.logMiningBufferInfinispanCacheEvents = logMiningBufferInfinispanCacheEvents; |
| } |
| |
| public String getLogMiningBufferInfinispanCacheGlobal() { |
| return logMiningBufferInfinispanCacheGlobal; |
| } |
| |
| public void setLogMiningBufferInfinispanCacheGlobal( |
| String logMiningBufferInfinispanCacheGlobal) { |
| this.logMiningBufferInfinispanCacheGlobal = logMiningBufferInfinispanCacheGlobal; |
| } |
| |
| public String getLogMiningBufferInfinispanCacheProcessedTransactions() { |
| return logMiningBufferInfinispanCacheProcessedTransactions; |
| } |
| |
| public void setLogMiningBufferInfinispanCacheProcessedTransactions( |
| String logMiningBufferInfinispanCacheProcessedTransactions) { |
| this.logMiningBufferInfinispanCacheProcessedTransactions = logMiningBufferInfinispanCacheProcessedTransactions; |
| } |
| |
| public String getLogMiningBufferInfinispanCacheSchemaChanges() { |
| return logMiningBufferInfinispanCacheSchemaChanges; |
| } |
| |
| public void setLogMiningBufferInfinispanCacheSchemaChanges( |
| String logMiningBufferInfinispanCacheSchemaChanges) { |
| this.logMiningBufferInfinispanCacheSchemaChanges = logMiningBufferInfinispanCacheSchemaChanges; |
| } |
| |
| public String getLogMiningBufferInfinispanCacheTransactions() { |
| return logMiningBufferInfinispanCacheTransactions; |
| } |
| |
| public void setLogMiningBufferInfinispanCacheTransactions( |
| String logMiningBufferInfinispanCacheTransactions) { |
| this.logMiningBufferInfinispanCacheTransactions = logMiningBufferInfinispanCacheTransactions; |
| } |
| |
| public Long getLogMiningBufferTransactionEventsThreshold() { |
| return logMiningBufferTransactionEventsThreshold; |
| } |
| |
| public void setLogMiningBufferTransactionEventsThreshold( |
| Long logMiningBufferTransactionEventsThreshold) { |
| this.logMiningBufferTransactionEventsThreshold = logMiningBufferTransactionEventsThreshold; |
| } |
| |
| public String getLogMiningBufferType() { |
| return logMiningBufferType; |
| } |
| |
| public void setLogMiningBufferType(String logMiningBufferType) { |
| this.logMiningBufferType = logMiningBufferType; |
| } |
| |
| public String getLogMiningFlushTableName() { |
| return logMiningFlushTableName; |
| } |
| |
| public void setLogMiningFlushTableName(String logMiningFlushTableName) { |
| this.logMiningFlushTableName = logMiningFlushTableName; |
| } |
| |
| public String getLogMiningQueryFilterMode() { |
| return logMiningQueryFilterMode; |
| } |
| |
| public void setLogMiningQueryFilterMode(String logMiningQueryFilterMode) { |
| this.logMiningQueryFilterMode = logMiningQueryFilterMode; |
| } |
| |
| public Boolean getLogMiningRestartConnection() { |
| return logMiningRestartConnection; |
| } |
| |
| public void setLogMiningRestartConnection(Boolean logMiningRestartConnection) { |
| this.logMiningRestartConnection = logMiningRestartConnection; |
| } |
| |
| public Long getLogMiningScnGapDetectionGapSizeMin() { |
| return logMiningScnGapDetectionGapSizeMin; |
| } |
| |
| public void setLogMiningScnGapDetectionGapSizeMin( |
| Long logMiningScnGapDetectionGapSizeMin) { |
| this.logMiningScnGapDetectionGapSizeMin = logMiningScnGapDetectionGapSizeMin; |
| } |
| |
| public Long getLogMiningScnGapDetectionTimeIntervalMaxMs() { |
| return logMiningScnGapDetectionTimeIntervalMaxMs; |
| } |
| |
| public void setLogMiningScnGapDetectionTimeIntervalMaxMs( |
| Long logMiningScnGapDetectionTimeIntervalMaxMs) { |
| this.logMiningScnGapDetectionTimeIntervalMaxMs = logMiningScnGapDetectionTimeIntervalMaxMs; |
| } |
| |
| public Long getLogMiningSessionMaxMs() { |
| return logMiningSessionMaxMs; |
| } |
| |
| public void setLogMiningSessionMaxMs(Long logMiningSessionMaxMs) { |
| this.logMiningSessionMaxMs = logMiningSessionMaxMs; |
| } |
| |
| public Long getLogMiningSleepTimeDefaultMs() { |
| return logMiningSleepTimeDefaultMs; |
| } |
| |
| public void setLogMiningSleepTimeDefaultMs(Long logMiningSleepTimeDefaultMs) { |
| this.logMiningSleepTimeDefaultMs = logMiningSleepTimeDefaultMs; |
| } |
| |
| public Long getLogMiningSleepTimeIncrementMs() { |
| return logMiningSleepTimeIncrementMs; |
| } |
| |
| public void setLogMiningSleepTimeIncrementMs( |
| Long logMiningSleepTimeIncrementMs) { |
| this.logMiningSleepTimeIncrementMs = logMiningSleepTimeIncrementMs; |
| } |
| |
| public Long getLogMiningSleepTimeMaxMs() { |
| return logMiningSleepTimeMaxMs; |
| } |
| |
| public void setLogMiningSleepTimeMaxMs(Long logMiningSleepTimeMaxMs) { |
| this.logMiningSleepTimeMaxMs = logMiningSleepTimeMaxMs; |
| } |
| |
| public Long getLogMiningSleepTimeMinMs() { |
| return logMiningSleepTimeMinMs; |
| } |
| |
| public void setLogMiningSleepTimeMinMs(Long logMiningSleepTimeMinMs) { |
| this.logMiningSleepTimeMinMs = logMiningSleepTimeMinMs; |
| } |
| |
| public String getLogMiningStrategy() { |
| return logMiningStrategy; |
| } |
| |
| public void setLogMiningStrategy(String logMiningStrategy) { |
| this.logMiningStrategy = logMiningStrategy; |
| } |
| |
| public Long getLogMiningTransactionRetentionMs() { |
| return logMiningTransactionRetentionMs; |
| } |
| |
| public void setLogMiningTransactionRetentionMs( |
| Long logMiningTransactionRetentionMs) { |
| this.logMiningTransactionRetentionMs = logMiningTransactionRetentionMs; |
| } |
| |
| public String getLogMiningUsernameExcludeList() { |
| return logMiningUsernameExcludeList; |
| } |
| |
| public void setLogMiningUsernameExcludeList( |
| String logMiningUsernameExcludeList) { |
| this.logMiningUsernameExcludeList = logMiningUsernameExcludeList; |
| } |
| |
| public String getLogMiningUsernameIncludeList() { |
| return logMiningUsernameIncludeList; |
| } |
| |
| public void setLogMiningUsernameIncludeList( |
| String logMiningUsernameIncludeList) { |
| this.logMiningUsernameIncludeList = logMiningUsernameIncludeList; |
| } |
| |
| public Integer getMaxBatchSize() { |
| return maxBatchSize; |
| } |
| |
| public void setMaxBatchSize(Integer maxBatchSize) { |
| this.maxBatchSize = maxBatchSize; |
| } |
| |
| public Integer getMaxQueueSize() { |
| return maxQueueSize; |
| } |
| |
| public void setMaxQueueSize(Integer maxQueueSize) { |
| this.maxQueueSize = maxQueueSize; |
| } |
| |
| public Long getMaxQueueSizeInBytes() { |
| return maxQueueSizeInBytes; |
| } |
| |
| public void setMaxQueueSizeInBytes(Long maxQueueSizeInBytes) { |
| this.maxQueueSizeInBytes = maxQueueSizeInBytes; |
| } |
| |
| public String getMessageKeyColumns() { |
| return messageKeyColumns; |
| } |
| |
| public void setMessageKeyColumns(String messageKeyColumns) { |
| this.messageKeyColumns = messageKeyColumns; |
| } |
| |
| public String getNotificationEnabledChannels() { |
| return notificationEnabledChannels; |
| } |
| |
| public void setNotificationEnabledChannels( |
| String notificationEnabledChannels) { |
| this.notificationEnabledChannels = notificationEnabledChannels; |
| } |
| |
| public String getNotificationSinkTopicName() { |
| return notificationSinkTopicName; |
| } |
| |
| public void setNotificationSinkTopicName(String notificationSinkTopicName) { |
| this.notificationSinkTopicName = notificationSinkTopicName; |
| } |
| |
| public String getOpenlogreplicatorHost() { |
| return openlogreplicatorHost; |
| } |
| |
| public void setOpenlogreplicatorHost(String openlogreplicatorHost) { |
| this.openlogreplicatorHost = openlogreplicatorHost; |
| } |
| |
| public Integer getOpenlogreplicatorPort() { |
| return openlogreplicatorPort; |
| } |
| |
| public void setOpenlogreplicatorPort(Integer openlogreplicatorPort) { |
| this.openlogreplicatorPort = openlogreplicatorPort; |
| } |
| |
| public String getOpenlogreplicatorSource() { |
| return openlogreplicatorSource; |
| } |
| |
| public void setOpenlogreplicatorSource(String openlogreplicatorSource) { |
| this.openlogreplicatorSource = openlogreplicatorSource; |
| } |
| |
| public Long getPollIntervalMs() { |
| return pollIntervalMs; |
| } |
| |
| public void setPollIntervalMs(Long pollIntervalMs) { |
| this.pollIntervalMs = pollIntervalMs; |
| } |
| |
| public String getPostProcessors() { |
| return postProcessors; |
| } |
| |
| public void setPostProcessors(String postProcessors) { |
| this.postProcessors = postProcessors; |
| } |
| |
| public Boolean getProvideTransactionMetadata() { |
| return provideTransactionMetadata; |
| } |
| |
| public void setProvideTransactionMetadata(Boolean provideTransactionMetadata) { |
| this.provideTransactionMetadata = provideTransactionMetadata; |
| } |
| |
| public Integer getQueryFetchSize() { |
| return queryFetchSize; |
| } |
| |
| public void setQueryFetchSize(Integer queryFetchSize) { |
| this.queryFetchSize = queryFetchSize; |
| } |
| |
| public String getRacNodes() { |
| return racNodes; |
| } |
| |
| public void setRacNodes(String racNodes) { |
| this.racNodes = racNodes; |
| } |
| |
| public Long getRetriableRestartConnectorWaitMs() { |
| return retriableRestartConnectorWaitMs; |
| } |
| |
| public void setRetriableRestartConnectorWaitMs( |
| Long retriableRestartConnectorWaitMs) { |
| this.retriableRestartConnectorWaitMs = retriableRestartConnectorWaitMs; |
| } |
| |
| public String getSchemaHistoryInternal() { |
| return schemaHistoryInternal; |
| } |
| |
| public void setSchemaHistoryInternal(String schemaHistoryInternal) { |
| this.schemaHistoryInternal = schemaHistoryInternal; |
| } |
| |
| public String getSchemaHistoryInternalFileFilename() { |
| return schemaHistoryInternalFileFilename; |
| } |
| |
| public void setSchemaHistoryInternalFileFilename( |
| String schemaHistoryInternalFileFilename) { |
| this.schemaHistoryInternalFileFilename = schemaHistoryInternalFileFilename; |
| } |
| |
| public Boolean getSchemaHistoryInternalSkipUnparseableDdl() { |
| return schemaHistoryInternalSkipUnparseableDdl; |
| } |
| |
| public void setSchemaHistoryInternalSkipUnparseableDdl( |
| Boolean schemaHistoryInternalSkipUnparseableDdl) { |
| this.schemaHistoryInternalSkipUnparseableDdl = schemaHistoryInternalSkipUnparseableDdl; |
| } |
| |
| public Boolean getSchemaHistoryInternalStoreOnlyCapturedDatabasesDdl() { |
| return schemaHistoryInternalStoreOnlyCapturedDatabasesDdl; |
| } |
| |
| public void setSchemaHistoryInternalStoreOnlyCapturedDatabasesDdl( |
| Boolean schemaHistoryInternalStoreOnlyCapturedDatabasesDdl) { |
| this.schemaHistoryInternalStoreOnlyCapturedDatabasesDdl = schemaHistoryInternalStoreOnlyCapturedDatabasesDdl; |
| } |
| |
| public Boolean getSchemaHistoryInternalStoreOnlyCapturedTablesDdl() { |
| return schemaHistoryInternalStoreOnlyCapturedTablesDdl; |
| } |
| |
| public void setSchemaHistoryInternalStoreOnlyCapturedTablesDdl( |
| Boolean schemaHistoryInternalStoreOnlyCapturedTablesDdl) { |
| this.schemaHistoryInternalStoreOnlyCapturedTablesDdl = schemaHistoryInternalStoreOnlyCapturedTablesDdl; |
| } |
| |
| public String getSchemaNameAdjustmentMode() { |
| return schemaNameAdjustmentMode; |
| } |
| |
| public void setSchemaNameAdjustmentMode(String schemaNameAdjustmentMode) { |
| this.schemaNameAdjustmentMode = schemaNameAdjustmentMode; |
| } |
| |
| public String getSignalDataCollection() { |
| return signalDataCollection; |
| } |
| |
| public void setSignalDataCollection(String signalDataCollection) { |
| this.signalDataCollection = signalDataCollection; |
| } |
| |
| public String getSignalEnabledChannels() { |
| return signalEnabledChannels; |
| } |
| |
| public void setSignalEnabledChannels(String signalEnabledChannels) { |
| this.signalEnabledChannels = signalEnabledChannels; |
| } |
| |
| public Long getSignalPollIntervalMs() { |
| return signalPollIntervalMs; |
| } |
| |
| public void setSignalPollIntervalMs(Long signalPollIntervalMs) { |
| this.signalPollIntervalMs = signalPollIntervalMs; |
| } |
| |
| public String getSkippedOperations() { |
| return skippedOperations; |
| } |
| |
| public void setSkippedOperations(String skippedOperations) { |
| this.skippedOperations = skippedOperations; |
| } |
| |
| public Long getSnapshotDelayMs() { |
| return snapshotDelayMs; |
| } |
| |
| public void setSnapshotDelayMs(Long snapshotDelayMs) { |
| this.snapshotDelayMs = snapshotDelayMs; |
| } |
| |
| public String getSnapshotEnhancePredicateScn() { |
| return snapshotEnhancePredicateScn; |
| } |
| |
| public void setSnapshotEnhancePredicateScn( |
| String snapshotEnhancePredicateScn) { |
| this.snapshotEnhancePredicateScn = snapshotEnhancePredicateScn; |
| } |
| |
| public Integer getSnapshotFetchSize() { |
| return snapshotFetchSize; |
| } |
| |
| public void setSnapshotFetchSize(Integer snapshotFetchSize) { |
| this.snapshotFetchSize = snapshotFetchSize; |
| } |
| |
| public String getSnapshotIncludeCollectionList() { |
| return snapshotIncludeCollectionList; |
| } |
| |
| public void setSnapshotIncludeCollectionList( |
| String snapshotIncludeCollectionList) { |
| this.snapshotIncludeCollectionList = snapshotIncludeCollectionList; |
| } |
| |
| public String getSnapshotLockingMode() { |
| return snapshotLockingMode; |
| } |
| |
| public void setSnapshotLockingMode(String snapshotLockingMode) { |
| this.snapshotLockingMode = snapshotLockingMode; |
| } |
| |
| public Long getSnapshotLockTimeoutMs() { |
| return snapshotLockTimeoutMs; |
| } |
| |
| public void setSnapshotLockTimeoutMs(Long snapshotLockTimeoutMs) { |
| this.snapshotLockTimeoutMs = snapshotLockTimeoutMs; |
| } |
| |
| public Integer getSnapshotMaxThreads() { |
| return snapshotMaxThreads; |
| } |
| |
| public void setSnapshotMaxThreads(Integer snapshotMaxThreads) { |
| this.snapshotMaxThreads = snapshotMaxThreads; |
| } |
| |
| public String getSnapshotMode() { |
| return snapshotMode; |
| } |
| |
| public void setSnapshotMode(String snapshotMode) { |
| this.snapshotMode = snapshotMode; |
| } |
| |
| public String getSnapshotSelectStatementOverrides() { |
| return snapshotSelectStatementOverrides; |
| } |
| |
| public void setSnapshotSelectStatementOverrides( |
| String snapshotSelectStatementOverrides) { |
| this.snapshotSelectStatementOverrides = snapshotSelectStatementOverrides; |
| } |
| |
| public String getSnapshotTablesOrderByRowCount() { |
| return snapshotTablesOrderByRowCount; |
| } |
| |
| public void setSnapshotTablesOrderByRowCount( |
| String snapshotTablesOrderByRowCount) { |
| this.snapshotTablesOrderByRowCount = snapshotTablesOrderByRowCount; |
| } |
| |
| public String getSourceinfoStructMaker() { |
| return sourceinfoStructMaker; |
| } |
| |
| public void setSourceinfoStructMaker(String sourceinfoStructMaker) { |
| this.sourceinfoStructMaker = sourceinfoStructMaker; |
| } |
| |
| public String getTableExcludeList() { |
| return tableExcludeList; |
| } |
| |
| public void setTableExcludeList(String tableExcludeList) { |
| this.tableExcludeList = tableExcludeList; |
| } |
| |
| public String getTableIncludeList() { |
| return tableIncludeList; |
| } |
| |
| public void setTableIncludeList(String tableIncludeList) { |
| this.tableIncludeList = tableIncludeList; |
| } |
| |
| public String getTimePrecisionMode() { |
| return timePrecisionMode; |
| } |
| |
| public void setTimePrecisionMode(String timePrecisionMode) { |
| this.timePrecisionMode = timePrecisionMode; |
| } |
| |
| public Boolean getTombstonesOnDelete() { |
| return tombstonesOnDelete; |
| } |
| |
| public void setTombstonesOnDelete(Boolean tombstonesOnDelete) { |
| this.tombstonesOnDelete = tombstonesOnDelete; |
| } |
| |
| public String getTopicNamingStrategy() { |
| return topicNamingStrategy; |
| } |
| |
| public void setTopicNamingStrategy(String topicNamingStrategy) { |
| this.topicNamingStrategy = topicNamingStrategy; |
| } |
| |
| public String getTopicPrefix() { |
| return topicPrefix; |
| } |
| |
| public void setTopicPrefix(String topicPrefix) { |
| this.topicPrefix = topicPrefix; |
| } |
| |
| public String getUnavailableValuePlaceholder() { |
| return unavailableValuePlaceholder; |
| } |
| |
| public void setUnavailableValuePlaceholder( |
| String unavailableValuePlaceholder) { |
| this.unavailableValuePlaceholder = unavailableValuePlaceholder; |
| } |
| } |