blob: dfd1877c1f893e3909d58ccb78d3d515a70770d0 [file] [log] [blame]
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.vertx.kafka.springboot;
import java.util.Map;
import javax.annotation.Generated;
import io.vertx.core.Vertx;
import io.vertx.core.VertxOptions;
import org.apache.camel.component.vertx.kafka.configuration.VertxKafkaConfiguration;
import org.apache.camel.spi.HeaderFilterStrategy;
import org.apache.camel.spring.boot.ComponentConfigurationPropertiesCommon;
import org.springframework.boot.context.properties.ConfigurationProperties;
/**
* Sent and receive messages to/from an Apache Kafka broker using vert.x Kafka
* client
*
* Generated by camel-package-maven-plugin - do not edit this file!
*/
@Generated("org.apache.camel.springboot.maven.SpringBootAutoConfigurationMojo")
@ConfigurationProperties(prefix = "camel.component.vertx-kafka")
public class VertxKafkaComponentConfiguration
extends
ComponentConfigurationPropertiesCommon {
/**
* Whether to enable auto configuration of the vertx-kafka component. This
* is enabled by default.
*/
private Boolean enabled;
/**
* Sets additional properties for either kafka consumer or kafka producer in
* case they can't be set directly on the camel configurations (e.g: new
* Kafka properties that are not reflected yet in Camel configurations), the
* properties have to be prefixed with additionalProperties.. E.g:
* additionalProperties.transactional.id=12345&additionalProperties.schema.registry.url=http://localhost:8811/avro
*/
private Map<String, Object> additionalProperties;
/**
* A list of host/port pairs to use for establishing the initial connection
* to the Kafka cluster. The client will make use of all servers
* irrespective of which servers are specified here for
* bootstrapping&mdash;this list only impacts the initial hosts used to
* discover the full set of servers. This list should be in the form
* host1:port1,host2:port2,.... Since these servers are just used for the
* initial connection to discover the full cluster membership (which may
* change dynamically), this list need not contain the full set of servers
* (you may want more than one, though, in case a server is down).
*/
private String bootstrapServers;
/**
* Controls how the client uses DNS lookups. If set to use_all_dns_ips,
* connect to each returned IP address in sequence until a successful
* connection is established. After a disconnection, the next IP is used.
* Once all IPs have been used once, the client resolves the IP(s) from the
* hostname again (both the JVM and the OS cache DNS name lookups, however).
* If set to resolve_canonical_bootstrap_servers_only, resolve each
* bootstrap address into a list of canonical names. After the bootstrap
* phase, this behaves the same as use_all_dns_ips. If set to default
* (deprecated), attempt to connect to the first IP address returned by the
* lookup, even if the lookup returns multiple IP addresses.
*/
private String clientDnsLookup = "use_all_dns_ips";
/**
* An id string to pass to the server when making requests. The purpose of
* this is to be able to track the source of requests beyond just ip/port by
* allowing a logical application name to be included in server-side request
* logging.
*/
private String clientId;
/**
* The component configurations. The option is a
* org.apache.camel.component.vertx.kafka.configuration.VertxKafkaConfiguration type.
*/
private VertxKafkaConfiguration configuration;
/**
* Close idle connections after the number of milliseconds specified by this
* config. The option is a long type.
*/
private Long connectionsMaxIdleMs = 540000L;
/**
* To use a custom HeaderFilterStrategy to filter header to and from Camel
* message. The option is a org.apache.camel.spi.HeaderFilterStrategy type.
*/
private HeaderFilterStrategy headerFilterStrategy;
/**
* A list of classes to use as interceptors. Implementing the
* org.apache.kafka.clients.producer.ProducerInterceptor interface allows
* you to intercept (and possibly mutate) the records received by the
* producer before they are published to the Kafka cluster. By default,
* there are no interceptors.
*/
private String interceptorClasses;
/**
* The period of time in milliseconds after which we force a refresh of
* metadata even if we haven't seen any partition leadership changes to
* proactively discover any new brokers or partitions. The option is a long
* type.
*/
private Long metadataMaxAgeMs = 300000L;
/**
* A list of classes to use as metrics reporters. Implementing the
* org.apache.kafka.common.metrics.MetricsReporter interface allows plugging
* in classes that will be notified of new metric creation. The JmxReporter
* is always included to register JMX statistics.
*/
private String metricReporters;
/**
* The number of samples maintained to compute metrics.
*/
private Integer metricsNumSamples = 2;
/**
* The highest recording level for metrics.
*/
private String metricsRecordingLevel = "INFO";
/**
* The window of time a metrics sample is computed over. The option is a
* long type.
*/
private Long metricsSampleWindowMs = 30000L;
/**
* The partition to which the record will be sent (or null if no partition
* was specified) or read from a particular partition if set. Header
* VertxKafkaConstants#PARTITION_ID If configured, it will take precedence
* over this config
*/
private Integer partitionId;
/**
* The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.
* If the value is -1, the OS default will be used.
*/
private Integer receiveBufferBytes = 32768;
/**
* The maximum amount of time in milliseconds to wait when reconnecting to a
* broker that has repeatedly failed to connect. If provided, the backoff
* per host will increase exponentially for each consecutive connection
* failure, up to this maximum. After calculating the backoff increase, 20%
* random jitter is added to avoid connection storms. The option is a long
* type.
*/
private Long reconnectBackoffMaxMs = 1000L;
/**
* The base amount of time to wait before attempting to reconnect to a given
* host. This avoids repeatedly connecting to a host in a tight loop. This
* backoff applies to all connection attempts by the client to a broker. The
* option is a long type.
*/
private Long reconnectBackoffMs = 50L;
/**
* The configuration controls the maximum amount of time the client will
* wait for the response of a request. If the response is not received
* before the timeout elapses the client will resend the request if
* necessary or fail the request if retries are exhausted. This should be
* larger than replica.lag.time.max.ms (a broker configuration) to reduce
* the possibility of message duplication due to unnecessary producer
* retries. The option is a int type.
*/
private Integer requestTimeoutMs = 30000;
/**
* The amount of time to wait before attempting to retry a failed request to
* a given topic partition. This avoids repeatedly sending requests in a
* tight loop under some failure scenarios. The option is a long type.
*/
private Long retryBackoffMs = 100L;
/**
* The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If
* the value is -1, the OS default will be used.
*/
private Integer sendBufferBytes = 131072;
/**
* Allow automatic topic creation on the broker when subscribing to or
* assigning a topic. A topic being subscribed to will be automatically
* created only if the broker allows for it using auto.create.topics.enable
* broker configuration. This configuration must be set to false when using
* brokers older than 0.11.0
*/
private Boolean allowAutoCreateTopics = true;
/**
* The frequency in milliseconds that the consumer offsets are
* auto-committed to Kafka if enable.auto.commit is set to true. The option
* is a int type.
*/
private Integer autoCommitIntervalMs = 5000;
/**
* What to do when there is no initial offset in Kafka or if the current
* offset does not exist any more on the server (e.g. because that data has
* been deleted): earliest: automatically reset the offset to the earliest
* offsetlatest: automatically reset the offset to the latest offsetnone:
* throw exception to the consumer if no previous offset is found for the
* consumer's groupanything else: throw exception to the consumer.
*/
private String autoOffsetReset = "latest";
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions occurred while the consumer is trying to pickup
* incoming messages, or the likes, will now be processed as a message and
* handled by the routing Error Handler. By default the consumer will use
* the org.apache.camel.spi.ExceptionHandler to deal with exceptions, that
* will be logged at WARN or ERROR level and ignored.
*/
private Boolean bridgeErrorHandler = false;
/**
* Automatically check the CRC32 of the records consumed. This ensures no
* on-the-wire or on-disk corruption to the messages occurred. This check
* adds some overhead, so it may be disabled in cases seeking extreme
* performance.
*/
private Boolean checkCrcs = true;
/**
* A rack identifier for this client. This can be any string value which
* indicates where this client is physically located. It corresponds with
* the broker config 'broker.rack'
*/
private String clientRack;
/**
* Specifies the timeout (in milliseconds) for client APIs. This
* configuration is used as the default timeout for all client operations
* that do not specify a timeout parameter. The option is a int type.
*/
private Integer defaultApiTimeoutMs = 60000;
/**
* If true the consumer's offset will be periodically committed in the
* background.
*/
private Boolean enableAutoCommit = true;
/**
* Whether internal topics matching a subscribed pattern should be excluded
* from the subscription. It is always possible to explicitly subscribe to
* an internal topic.
*/
private Boolean excludeInternalTopics = true;
/**
* The maximum amount of data the server should return for a fetch request.
* Records are fetched in batches by the consumer, and if the first record
* batch in the first non-empty partition of the fetch is larger than this
* value, the record batch will still be returned to ensure that the
* consumer can make progress. As such, this is not a absolute maximum. The
* maximum record batch size accepted by the broker is defined via
* message.max.bytes (broker config) or max.message.bytes (topic config).
* Note that the consumer performs multiple fetches in parallel.
*/
private Integer fetchMaxBytes = 52428800;
/**
* The maximum amount of time the server will block before answering the
* fetch request if there isn't sufficient data to immediately satisfy the
* requirement given by fetch.min.bytes. The option is a int type.
*/
private Integer fetchMaxWaitMs = 500;
/**
* The minimum amount of data the server should return for a fetch request.
* If insufficient data is available the request will wait for that much
* data to accumulate before answering the request. The default setting of 1
* byte means that fetch requests are answered as soon as a single byte of
* data is available or the fetch request times out waiting for data to
* arrive. Setting this to something greater than 1 will cause the server to
* wait for larger amounts of data to accumulate which can improve server
* throughput a bit at the cost of some additional latency.
*/
private Integer fetchMinBytes = 1;
/**
* A unique string that identifies the consumer group this consumer belongs
* to. This property is required if the consumer uses either the group
* management functionality by using subscribe(topic) or the Kafka-based
* offset management strategy.
*/
private String groupId;
/**
* A unique identifier of the consumer instance provided by the end user.
* Only non-empty strings are permitted. If set, the consumer is treated as
* a static member, which means that only one instance with this ID is
* allowed in the consumer group at any time. This can be used in
* combination with a larger session timeout to avoid group rebalances
* caused by transient unavailability (e.g. process restarts). If not set,
* the consumer will join the group as a dynamic member, which is the
* traditional behavior.
*/
private String groupInstanceId;
/**
* The expected time between heartbeats to the consumer coordinator when
* using Kafka's group management facilities. Heartbeats are used to ensure
* that the consumer's session stays active and to facilitate rebalancing
* when new consumers join or leave the group. The value must be set lower
* than session.timeout.ms, but typically should be set no higher than 1/3
* of that value. It can be adjusted even lower to control the expected time
* for normal rebalances. The option is a int type.
*/
private Integer heartbeatIntervalMs = 3000;
/**
* Controls how to read messages written transactionally. If set to
* read_committed, consumer.poll() will only return transactional messages
* which have been committed. If set to read_uncommitted' (the default),
* consumer.poll() will return all messages, even transactional messages
* which have been aborted. Non-transactional messages will be returned
* unconditionally in either mode. Messages will always be returned in
* offset order. Hence, in read_committed mode, consumer.poll() will only
* return messages up to the last stable offset (LSO), which is the one less
* than the offset of the first open transaction. In particular any messages
* appearing after messages belonging to ongoing transactions will be
* withheld until the relevant transaction has been completed. As a result,
* read_committed consumers will not be able to read up to the high
* watermark when there are in flight transactions. Further, when in
* read_committed the seekToEnd method will return the LSO
*/
private String isolationLevel = "read_uncommitted";
/**
* Deserializer class for key that implements the
* org.apache.kafka.common.serialization.Deserializer interface.
*/
private String keyDeserializer = "org.apache.kafka.common.serialization.StringDeserializer";
/**
* The maximum amount of data per-partition the server will return. Records
* are fetched in batches by the consumer. If the first record batch in the
* first non-empty partition of the fetch is larger than this limit, the
* batch will still be returned to ensure that the consumer can make
* progress. The maximum record batch size accepted by the broker is defined
* via message.max.bytes (broker config) or max.message.bytes (topic
* config). See fetch.max.bytes for limiting the consumer request size.
*/
private Integer maxPartitionFetchBytes = 1048576;
/**
* The maximum delay between invocations of poll() when using consumer group
* management. This places an upper bound on the amount of time that the
* consumer can be idle before fetching more records. If poll() is not
* called before expiration of this timeout, then the consumer is considered
* failed and the group will rebalance in order to reassign the partitions
* to another member. For consumers using a non-null group.instance.id which
* reach this timeout, partitions will not be immediately reassigned.
* Instead, the consumer will stop sending heartbeats and partitions will be
* reassigned after expiration of session.timeout.ms. This mirrors the
* behavior of a static consumer which has shutdown. The option is a int
* type.
*/
private Integer maxPollIntervalMs = 300000;
/**
* The maximum number of records returned in a single call to poll().
*/
private Integer maxPollRecords = 500;
/**
* A list of class names or class types, ordered by preference, of supported
* partition assignment strategies that the client will use to distribute
* partition ownership amongst consumer instances when group management is
* used.In addition to the default class specified below, you can use the
* org.apache.kafka.clients.consumer.RoundRobinAssignorclass for round robin
* assignments of partitions to consumers. Implementing the
* org.apache.kafka.clients.consumer.ConsumerPartitionAssignor interface
* allows you to plug in a custom assignmentstrategy.
*/
private String partitionAssignmentStrategy = "org.apache.kafka.clients.consumer.RangeAssignor";
/**
* Set if KafkaConsumer will read from a particular offset on startup. This
* config will take precedence over seekTo config
*/
private Long seekToOffset;
/**
* Set if KafkaConsumer will read from beginning or end on startup:
* beginning : read from beginning end : read from end.
*/
private String seekToPosition;
/**
* The timeout used to detect client failures when using Kafka's group
* management facility. The client sends periodic heartbeats to indicate its
* liveness to the broker. If no heartbeats are received by the broker
* before the expiration of this session timeout, then the broker will
* remove this client from the group and initiate a rebalance. Note that the
* value must be in the allowable range as configured in the broker
* configuration by group.min.session.timeout.ms and
* group.max.session.timeout.ms. The option is a int type.
*/
private Integer sessionTimeoutMs = 10000;
/**
* Deserializer class for value that implements the
* org.apache.kafka.common.serialization.Deserializer interface.
*/
private String valueDeserializer = "org.apache.kafka.common.serialization.StringDeserializer";
/**
* The number of acknowledgments the producer requires the leader to have
* received before considering a request complete. This controls the
* durability of records that are sent. The following settings are allowed:
* acks=0 If set to zero then the producer will not wait for any
* acknowledgment from the server at all. The record will be immediately
* added to the socket buffer and considered sent. No guarantee can be made
* that the server has received the record in this case, and the retries
* configuration will not take effect (as the client won't generally know of
* any failures). The offset given back for each record will always be set
* to -1. acks=1 This will mean the leader will write the record to its
* local log but will respond without awaiting full acknowledgement from all
* followers. In this case should the leader fail immediately after
* acknowledging the record but before the followers have replicated it then
* the record will be lost. acks=all This means the leader will wait for the
* full set of in-sync replicas to acknowledge the record. This guarantees
* that the record will not be lost as long as at least one in-sync replica
* remains alive. This is the strongest available guarantee. This is
* equivalent to the acks=-1 setting.
*/
private String acks = "1";
/**
* The producer will attempt to batch records together into fewer requests
* whenever multiple records are being sent to the same partition. This
* helps performance on both the client and the server. This configuration
* controls the default batch size in bytes. No attempt will be made to
* batch records larger than this size. Requests sent to brokers will
* contain multiple batches, one for each partition with data available to
* be sent. A small batch size will make batching less common and may reduce
* throughput (a batch size of zero will disable batching entirely). A very
* large batch size may use memory a bit more wastefully as we will always
* allocate a buffer of the specified batch size in anticipation of
* additional records.
*/
private Integer batchSize = 16384;
/**
* The total bytes of memory the producer can use to buffer records waiting
* to be sent to the server. If records are sent faster than they can be
* delivered to the server the producer will block for max.block.ms after
* which it will throw an exception.This setting should correspond roughly
* to the total memory the producer will use, but is not a hard bound since
* not all memory the producer uses is used for buffering. Some additional
* memory will be used for compression (if compression is enabled) as well
* as for maintaining in-flight requests.
*/
private Long bufferMemory = 33554432L;
/**
* The compression type for all data generated by the producer. The default
* is none (i.e. no compression). Valid values are none, gzip, snappy, lz4,
* or zstd. Compression is of full batches of data, so the efficacy of
* batching will also impact the compression ratio (more batching means
* better compression).
*/
private String compressionType = "none";
/**
* An upper bound on the time to report success or failure after a call to
* send() returns. This limits the total time that a record will be delayed
* prior to sending, the time to await acknowledgement from the broker (if
* expected), and the time allowed for retriable send failures. The producer
* may report failure to send a record earlier than this config if either an
* unrecoverable error is encountered, the retries have been exhausted, or
* the record is added to a batch which reached an earlier delivery
* expiration deadline. The value of this config should be greater than or
* equal to the sum of request.timeout.ms and linger.ms. The option is a int
* type.
*/
private Integer deliveryTimeoutMs = 120000;
/**
* When set to 'true', the producer will ensure that exactly one copy of
* each message is written in the stream. If 'false', producer retries due
* to broker failures, etc., may write duplicates of the retried message in
* the stream. Note that enabling idempotence requires
* max.in.flight.requests.per.connection to be less than or equal to 5,
* retries to be greater than 0 and acks must be 'all'. If these values are
* not explicitly set by the user, suitable values will be chosen. If
* incompatible values are set, a ConfigException will be thrown.
*/
private Boolean enableIdempotence = false;
/**
* Serializer class for key that implements the
* org.apache.kafka.common.serialization.Serializer interface.
*/
private String keySerializer = "org.apache.kafka.common.serialization.StringSerializer";
/**
* Whether the producer should be started lazy (on the first message). By
* starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during starting
* and cause the route to fail being started. By deferring this startup to
* be lazy then the startup failure can be handled during routing messages
* via Camel's routing error handlers. Beware that when the first message is
* processed then creating and starting the producer may take a little time
* and prolong the total processing time of the processing.
*/
private Boolean lazyStartProducer = false;
/**
* The producer groups together any records that arrive in between request
* transmissions into a single batched request. Normally this occurs only
* under load when records arrive faster than they can be sent out. However
* in some circumstances the client may want to reduce the number of
* requests even under moderate load. This setting accomplishes this by
* adding a small amount of artificial delay&mdash;that is, rather than
* immediately sending out a record the producer will wait for up to the
* given delay to allow other records to be sent so that the sends can be
* batched together. This can be thought of as analogous to Nagle's
* algorithm in TCP. This setting gives the upper bound on the delay for
* batching: once we get batch.size worth of records for a partition it will
* be sent immediately regardless of this setting, however if we have fewer
* than this many bytes accumulated for this partition we will 'linger' for
* the specified time waiting for more records to show up. This setting
* defaults to 0 (i.e. no delay). Setting linger.ms=5, for example, would
* have the effect of reducing the number of requests sent but would add up
* to 5ms of latency to records sent in the absence of load. The option is a
* long type.
*/
private Long lingerMs = 0L;
/**
* The configuration controls how long KafkaProducer.send() and
* KafkaProducer.partitionsFor() will block.These methods can be blocked
* either because the buffer is full or metadata unavailable.Blocking in the
* user-supplied serializers or partitioner will not be counted against this
* timeout. The option is a long type.
*/
private Long maxBlockMs = 60000L;
/**
* The maximum number of unacknowledged requests the client will send on a
* single connection before blocking. Note that if this setting is set to be
* greater than 1 and there are failed sends, there is a risk of message
* re-ordering due to retries (i.e., if retries are enabled).
*/
private Integer maxInFlightRequestsPerConnection = 5;
/**
* The maximum size of a request in bytes. This setting will limit the
* number of record batches the producer will send in a single request to
* avoid sending huge requests. This is also effectively a cap on the
* maximum uncompressed record batch size. Note that the server has its own
* cap on the record batch size (after compression if compression is
* enabled) which may be different from this.
*/
private Integer maxRequestSize = 1048576;
/**
* Controls how long the producer will cache metadata for a topic that's
* idle. If the elapsed time since a topic was last produced to exceeds the
* metadata idle duration, then the topic's metadata is forgotten and the
* next access to it will force a metadata fetch request. The option is a
* long type.
*/
private Long metadataMaxIdleMs = 300000L;
/**
* Partitioner class that implements the
* org.apache.kafka.clients.producer.Partitioner interface.
*/
private String partitionerClass = "org.apache.kafka.clients.producer.internals.DefaultPartitioner";
/**
* Setting a value greater than zero will cause the client to resend any
* record whose send fails with a potentially transient error. Note that
* this retry is no different than if the client resent the record upon
* receiving the error. Allowing retries without setting
* max.in.flight.requests.per.connection to 1 will potentially change the
* ordering of records because if two batches are sent to a single
* partition, and the first fails and is retried but the second succeeds,
* then the records in the second batch may appear first. Note additionally
* that produce requests will be failed before the number of retries has
* been exhausted if the timeout configured by delivery.timeout.ms expires
* first before successful acknowledgement. Users should generally prefer to
* leave this config unset and instead use delivery.timeout.ms to control
* retry behavior.
*/
private Integer retries = 2147483647;
/**
* The TransactionalId to use for transactional delivery. This enables
* reliability semantics which span multiple producer sessions since it
* allows the client to guarantee that transactions using the same
* TransactionalId have been completed prior to starting any new
* transactions. If no TransactionalId is provided, then the producer is
* limited to idempotent delivery. If a TransactionalId is configured,
* enable.idempotence is implied. By default the TransactionId is not
* configured, which means transactions cannot be used. Note that, by
* default, transactions require a cluster of at least three brokers which
* is the recommended setting for production; for development you can change
* this, by adjusting broker setting
* transaction.state.log.replication.factor.
*/
private String transactionalId;
/**
* The maximum amount of time in ms that the transaction coordinator will
* wait for a transaction status update from the producer before proactively
* aborting the ongoing transaction.If this value is larger than the
* transaction.max.timeout.ms setting in the broker, the request will fail
* with a InvalidTransactionTimeout error. The option is a int type.
*/
private Integer transactionTimeoutMs = 60000;
/**
* Serializer class for value that implements the
* org.apache.kafka.common.serialization.Serializer interface.
*/
private String valueSerializer = "org.apache.kafka.common.serialization.StringSerializer";
/**
* Whether autowiring is enabled. This is used for automatic autowiring
* options (the option must be marked as autowired) by looking up in the
* registry to find if there is a single instance of matching type, which
* then gets configured on the component. This can be used for automatic
* configuring JDBC data sources, JMS connection factories, AWS Clients,
* etc.
*/
private Boolean autowiredEnabled = true;
/**
* To use an existing vertx instead of creating a new instance. The option
* is a io.vertx.core.Vertx type.
*/
private Vertx vertx;
/**
* To provide a custom set of vertx options for configuring vertx. The
* option is a io.vertx.core.VertxOptions type.
*/
private VertxOptions vertxOptions;
/**
* The fully qualified name of a SASL client callback handler class that
* implements the AuthenticateCallbackHandler interface.
*/
private String saslClientCallbackHandlerClass;
/**
* JAAS login context parameters for SASL connections in the format used by
* JAAS configuration files. JAAS configuration file format is described
* here. The format for the value is: 'loginModuleClass controlFlag
* (optionName=optionValue);'. For brokers, the config must be prefixed with
* listener prefix and SASL mechanism name in lower-case. For example,
* listener.name.sasl_ssl.scram-sha-256.sasl.jaas.config=com.example.ScramLoginModule required;
*/
private String saslJaasConfig;
/**
* Kerberos kinit command path.
*/
private String saslKerberosKinitCmd = "/usr/bin/kinit";
/**
* Login thread sleep time between refresh attempts.
*/
private Long saslKerberosMinTimeBeforeRelogin = 60000L;
/**
* The Kerberos principal name that Kafka runs as. This can be defined
* either in Kafka's JAAS config or in Kafka's config.
*/
private String saslKerberosServiceName;
/**
* Percentage of random jitter added to the renewal time.
*/
private Double saslKerberosTicketRenewJitter;
/**
* Login thread will sleep until the specified window factor of time from
* last refresh to ticket's expiry has been reached, at which time it will
* try to renew the ticket.
*/
private Double saslKerberosTicketRenewWindowFactor;
/**
* The fully qualified name of a SASL login callback handler class that
* implements the AuthenticateCallbackHandler interface. For brokers, login
* callback handler config must be prefixed with listener prefix and SASL
* mechanism name in lower-case. For example,
* listener.name.sasl_ssl.scram-sha-256.sasl.login.callback.handler.class=com.example.CustomScramLoginCallbackHandler
*/
private String saslLoginCallbackHandlerClass;
/**
* The fully qualified name of a class that implements the Login interface.
* For brokers, login config must be prefixed with listener prefix and SASL
* mechanism name in lower-case. For example,
* listener.name.sasl_ssl.scram-sha-256.sasl.login.class=com.example.CustomScramLogin
*/
private String saslLoginClass;
/**
* The amount of buffer time before credential expiration to maintain when
* refreshing a credential, in seconds. If a refresh would otherwise occur
* closer to expiration than the number of buffer seconds then the refresh
* will be moved up to maintain as much of the buffer time as possible.
* Legal values are between 0 and 3600 (1 hour); a default value of 300 (5
* minutes) is used if no value is specified. This value and
* sasl.login.refresh.min.period.seconds are both ignored if their sum
* exceeds the remaining lifetime of a credential. Currently applies only to
* OAUTHBEARER.
*/
private Short saslLoginRefreshBufferSeconds = 300;
/**
* The desired minimum time for the login refresh thread to wait before
* refreshing a credential, in seconds. Legal values are between 0 and 900
* (15 minutes); a default value of 60 (1 minute) is used if no value is
* specified. This value and sasl.login.refresh.buffer.seconds are both
* ignored if their sum exceeds the remaining lifetime of a credential.
* Currently applies only to OAUTHBEARER.
*/
private Short saslLoginRefreshMinPeriodSeconds = 60;
/**
* Login refresh thread will sleep until the specified window factor
* relative to the credential's lifetime has been reached, at which time it
* will try to refresh the credential. Legal values are between 0.5 (50%)
* and 1.0 (100%) inclusive; a default value of 0.8 (80%) is used if no
* value is specified. Currently applies only to OAUTHBEARER.
*/
private Double saslLoginRefreshWindowFactor;
/**
* The maximum amount of random jitter relative to the credential's lifetime
* that is added to the login refresh thread's sleep time. Legal values are
* between 0 and 0.25 (25%) inclusive; a default value of 0.05 (5%) is used
* if no value is specified. Currently applies only to OAUTHBEARER.
*/
private Double saslLoginRefreshWindowJitter;
/**
* SASL mechanism used for client connections. This may be any mechanism for
* which a security provider is available. GSSAPI is the default mechanism.
*/
private String saslMechanism = "GSSAPI";
/**
* Protocol used to communicate with brokers. Valid values are: PLAINTEXT,
* SSL, SASL_PLAINTEXT, SASL_SSL.
*/
private String securityProtocol = "PLAINTEXT";
/**
* A list of configurable creator classes each returning a provider
* implementing security algorithms. These classes should implement the
* org.apache.kafka.common.security.auth.SecurityProviderCreator interface.
*/
private String securityProviders;
/**
* A list of cipher suites. This is a named combination of authentication,
* encryption, MAC and key exchange algorithm used to negotiate the security
* settings for a network connection using TLS or SSL network protocol. By
* default all the available cipher suites are supported.
*/
private String sslCipherSuites;
/**
* The list of protocols enabled for SSL connections. The default is
* 'TLSv1.2,TLSv1.3' when running with Java 11 or newer, 'TLSv1.2'
* otherwise. With the default value for Java 11, clients and servers will
* prefer TLSv1.3 if both support it and fallback to TLSv1.2 otherwise
* (assuming both support at least TLSv1.2). This default should be fine for
* most cases. Also see the config documentation for ssl.protocol.
*/
private String sslEnabledProtocols = "TLSv1.2,TLSv1.3";
/**
* The endpoint identification algorithm to validate server hostname using
* server certificate.
*/
private String sslEndpointIdentificationAlgorithm = "https";
/**
* The class of type org.apache.kafka.common.security.auth.SslEngineFactory
* to provide SSLEngine objects. Default value is
* org.apache.kafka.common.security.ssl.DefaultSslEngineFactory
*/
private String sslEngineFactoryClass;
/**
* The algorithm used by key manager factory for SSL connections. Default
* value is the key manager factory algorithm configured for the Java
* Virtual Machine.
*/
private String sslKeymanagerAlgorithm = "SunX509";
/**
* The password of the private key in the key store file. This is optional
* for client.
*/
private String sslKeyPassword;
/**
* The location of the key store file. This is optional for client and can
* be used for two-way authentication for client.
*/
private String sslKeystoreLocation;
/**
* The store password for the key store file. This is optional for client
* and only needed if ssl.keystore.location is configured.
*/
private String sslKeystorePassword;
/**
* The file format of the key store file. This is optional for client.
*/
private String sslKeystoreType = "JKS";
/**
* The SSL protocol used to generate the SSLContext. The default is
* 'TLSv1.3' when running with Java 11 or newer, 'TLSv1.2' otherwise. This
* value should be fine for most use cases. Allowed values in recent JVMs
* are 'TLSv1.2' and 'TLSv1.3'. 'TLS', 'TLSv1.1', 'SSL', 'SSLv2' and 'SSLv3'
* may be supported in older JVMs, but their usage is discouraged due to
* known security vulnerabilities. With the default value for this config
* and 'ssl.enabled.protocols', clients will downgrade to 'TLSv1.2' if the
* server does not support 'TLSv1.3'. If this config is set to 'TLSv1.2',
* clients will not use 'TLSv1.3' even if it is one of the values in
* ssl.enabled.protocols and the server only supports 'TLSv1.3'.
*/
private String sslProtocol = "TLSv1.3";
/**
* The name of the security provider used for SSL connections. Default value
* is the default security provider of the JVM.
*/
private String sslProvider;
/**
* The SecureRandom PRNG implementation to use for SSL cryptography
* operations.
*/
private String sslSecureRandomImplementation;
/**
* The algorithm used by trust manager factory for SSL connections. Default
* value is the trust manager factory algorithm configured for the Java
* Virtual Machine.
*/
private String sslTrustmanagerAlgorithm = "PKIX";
/**
* The location of the trust store file.
*/
private String sslTruststoreLocation;
/**
* The password for the trust store file. If a password is not set access to
* the truststore is still available, but integrity checking is disabled.
*/
private String sslTruststorePassword;
/**
* The file format of the trust store file.
*/
private String sslTruststoreType = "JKS";
public Map<String, Object> getAdditionalProperties() {
return additionalProperties;
}
public void setAdditionalProperties(Map<String, Object> additionalProperties) {
this.additionalProperties = additionalProperties;
}
public String getBootstrapServers() {
return bootstrapServers;
}
public void setBootstrapServers(String bootstrapServers) {
this.bootstrapServers = bootstrapServers;
}
public String getClientDnsLookup() {
return clientDnsLookup;
}
public void setClientDnsLookup(String clientDnsLookup) {
this.clientDnsLookup = clientDnsLookup;
}
public String getClientId() {
return clientId;
}
public void setClientId(String clientId) {
this.clientId = clientId;
}
public VertxKafkaConfiguration getConfiguration() {
return configuration;
}
public void setConfiguration(VertxKafkaConfiguration configuration) {
this.configuration = configuration;
}
public Long getConnectionsMaxIdleMs() {
return connectionsMaxIdleMs;
}
public void setConnectionsMaxIdleMs(Long connectionsMaxIdleMs) {
this.connectionsMaxIdleMs = connectionsMaxIdleMs;
}
public HeaderFilterStrategy getHeaderFilterStrategy() {
return headerFilterStrategy;
}
public void setHeaderFilterStrategy(
HeaderFilterStrategy headerFilterStrategy) {
this.headerFilterStrategy = headerFilterStrategy;
}
public String getInterceptorClasses() {
return interceptorClasses;
}
public void setInterceptorClasses(String interceptorClasses) {
this.interceptorClasses = interceptorClasses;
}
public Long getMetadataMaxAgeMs() {
return metadataMaxAgeMs;
}
public void setMetadataMaxAgeMs(Long metadataMaxAgeMs) {
this.metadataMaxAgeMs = metadataMaxAgeMs;
}
public String getMetricReporters() {
return metricReporters;
}
public void setMetricReporters(String metricReporters) {
this.metricReporters = metricReporters;
}
public Integer getMetricsNumSamples() {
return metricsNumSamples;
}
public void setMetricsNumSamples(Integer metricsNumSamples) {
this.metricsNumSamples = metricsNumSamples;
}
public String getMetricsRecordingLevel() {
return metricsRecordingLevel;
}
public void setMetricsRecordingLevel(String metricsRecordingLevel) {
this.metricsRecordingLevel = metricsRecordingLevel;
}
public Long getMetricsSampleWindowMs() {
return metricsSampleWindowMs;
}
public void setMetricsSampleWindowMs(Long metricsSampleWindowMs) {
this.metricsSampleWindowMs = metricsSampleWindowMs;
}
public Integer getPartitionId() {
return partitionId;
}
public void setPartitionId(Integer partitionId) {
this.partitionId = partitionId;
}
public Integer getReceiveBufferBytes() {
return receiveBufferBytes;
}
public void setReceiveBufferBytes(Integer receiveBufferBytes) {
this.receiveBufferBytes = receiveBufferBytes;
}
public Long getReconnectBackoffMaxMs() {
return reconnectBackoffMaxMs;
}
public void setReconnectBackoffMaxMs(Long reconnectBackoffMaxMs) {
this.reconnectBackoffMaxMs = reconnectBackoffMaxMs;
}
public Long getReconnectBackoffMs() {
return reconnectBackoffMs;
}
public void setReconnectBackoffMs(Long reconnectBackoffMs) {
this.reconnectBackoffMs = reconnectBackoffMs;
}
public Integer getRequestTimeoutMs() {
return requestTimeoutMs;
}
public void setRequestTimeoutMs(Integer requestTimeoutMs) {
this.requestTimeoutMs = requestTimeoutMs;
}
public Long getRetryBackoffMs() {
return retryBackoffMs;
}
public void setRetryBackoffMs(Long retryBackoffMs) {
this.retryBackoffMs = retryBackoffMs;
}
public Integer getSendBufferBytes() {
return sendBufferBytes;
}
public void setSendBufferBytes(Integer sendBufferBytes) {
this.sendBufferBytes = sendBufferBytes;
}
public Boolean getAllowAutoCreateTopics() {
return allowAutoCreateTopics;
}
public void setAllowAutoCreateTopics(Boolean allowAutoCreateTopics) {
this.allowAutoCreateTopics = allowAutoCreateTopics;
}
public Integer getAutoCommitIntervalMs() {
return autoCommitIntervalMs;
}
public void setAutoCommitIntervalMs(Integer autoCommitIntervalMs) {
this.autoCommitIntervalMs = autoCommitIntervalMs;
}
public String getAutoOffsetReset() {
return autoOffsetReset;
}
public void setAutoOffsetReset(String autoOffsetReset) {
this.autoOffsetReset = autoOffsetReset;
}
public Boolean getBridgeErrorHandler() {
return bridgeErrorHandler;
}
public void setBridgeErrorHandler(Boolean bridgeErrorHandler) {
this.bridgeErrorHandler = bridgeErrorHandler;
}
public Boolean getCheckCrcs() {
return checkCrcs;
}
public void setCheckCrcs(Boolean checkCrcs) {
this.checkCrcs = checkCrcs;
}
public String getClientRack() {
return clientRack;
}
public void setClientRack(String clientRack) {
this.clientRack = clientRack;
}
public Integer getDefaultApiTimeoutMs() {
return defaultApiTimeoutMs;
}
public void setDefaultApiTimeoutMs(Integer defaultApiTimeoutMs) {
this.defaultApiTimeoutMs = defaultApiTimeoutMs;
}
public Boolean getEnableAutoCommit() {
return enableAutoCommit;
}
public void setEnableAutoCommit(Boolean enableAutoCommit) {
this.enableAutoCommit = enableAutoCommit;
}
public Boolean getExcludeInternalTopics() {
return excludeInternalTopics;
}
public void setExcludeInternalTopics(Boolean excludeInternalTopics) {
this.excludeInternalTopics = excludeInternalTopics;
}
public Integer getFetchMaxBytes() {
return fetchMaxBytes;
}
public void setFetchMaxBytes(Integer fetchMaxBytes) {
this.fetchMaxBytes = fetchMaxBytes;
}
public Integer getFetchMaxWaitMs() {
return fetchMaxWaitMs;
}
public void setFetchMaxWaitMs(Integer fetchMaxWaitMs) {
this.fetchMaxWaitMs = fetchMaxWaitMs;
}
public Integer getFetchMinBytes() {
return fetchMinBytes;
}
public void setFetchMinBytes(Integer fetchMinBytes) {
this.fetchMinBytes = fetchMinBytes;
}
public String getGroupId() {
return groupId;
}
public void setGroupId(String groupId) {
this.groupId = groupId;
}
public String getGroupInstanceId() {
return groupInstanceId;
}
public void setGroupInstanceId(String groupInstanceId) {
this.groupInstanceId = groupInstanceId;
}
public Integer getHeartbeatIntervalMs() {
return heartbeatIntervalMs;
}
public void setHeartbeatIntervalMs(Integer heartbeatIntervalMs) {
this.heartbeatIntervalMs = heartbeatIntervalMs;
}
public String getIsolationLevel() {
return isolationLevel;
}
public void setIsolationLevel(String isolationLevel) {
this.isolationLevel = isolationLevel;
}
public String getKeyDeserializer() {
return keyDeserializer;
}
public void setKeyDeserializer(String keyDeserializer) {
this.keyDeserializer = keyDeserializer;
}
public Integer getMaxPartitionFetchBytes() {
return maxPartitionFetchBytes;
}
public void setMaxPartitionFetchBytes(Integer maxPartitionFetchBytes) {
this.maxPartitionFetchBytes = maxPartitionFetchBytes;
}
public Integer getMaxPollIntervalMs() {
return maxPollIntervalMs;
}
public void setMaxPollIntervalMs(Integer maxPollIntervalMs) {
this.maxPollIntervalMs = maxPollIntervalMs;
}
public Integer getMaxPollRecords() {
return maxPollRecords;
}
public void setMaxPollRecords(Integer maxPollRecords) {
this.maxPollRecords = maxPollRecords;
}
public String getPartitionAssignmentStrategy() {
return partitionAssignmentStrategy;
}
public void setPartitionAssignmentStrategy(
String partitionAssignmentStrategy) {
this.partitionAssignmentStrategy = partitionAssignmentStrategy;
}
public Long getSeekToOffset() {
return seekToOffset;
}
public void setSeekToOffset(Long seekToOffset) {
this.seekToOffset = seekToOffset;
}
public String getSeekToPosition() {
return seekToPosition;
}
public void setSeekToPosition(String seekToPosition) {
this.seekToPosition = seekToPosition;
}
public Integer getSessionTimeoutMs() {
return sessionTimeoutMs;
}
public void setSessionTimeoutMs(Integer sessionTimeoutMs) {
this.sessionTimeoutMs = sessionTimeoutMs;
}
public String getValueDeserializer() {
return valueDeserializer;
}
public void setValueDeserializer(String valueDeserializer) {
this.valueDeserializer = valueDeserializer;
}
public String getAcks() {
return acks;
}
public void setAcks(String acks) {
this.acks = acks;
}
public Integer getBatchSize() {
return batchSize;
}
public void setBatchSize(Integer batchSize) {
this.batchSize = batchSize;
}
public Long getBufferMemory() {
return bufferMemory;
}
public void setBufferMemory(Long bufferMemory) {
this.bufferMemory = bufferMemory;
}
public String getCompressionType() {
return compressionType;
}
public void setCompressionType(String compressionType) {
this.compressionType = compressionType;
}
public Integer getDeliveryTimeoutMs() {
return deliveryTimeoutMs;
}
public void setDeliveryTimeoutMs(Integer deliveryTimeoutMs) {
this.deliveryTimeoutMs = deliveryTimeoutMs;
}
public Boolean getEnableIdempotence() {
return enableIdempotence;
}
public void setEnableIdempotence(Boolean enableIdempotence) {
this.enableIdempotence = enableIdempotence;
}
public String getKeySerializer() {
return keySerializer;
}
public void setKeySerializer(String keySerializer) {
this.keySerializer = keySerializer;
}
public Boolean getLazyStartProducer() {
return lazyStartProducer;
}
public void setLazyStartProducer(Boolean lazyStartProducer) {
this.lazyStartProducer = lazyStartProducer;
}
public Long getLingerMs() {
return lingerMs;
}
public void setLingerMs(Long lingerMs) {
this.lingerMs = lingerMs;
}
public Long getMaxBlockMs() {
return maxBlockMs;
}
public void setMaxBlockMs(Long maxBlockMs) {
this.maxBlockMs = maxBlockMs;
}
public Integer getMaxInFlightRequestsPerConnection() {
return maxInFlightRequestsPerConnection;
}
public void setMaxInFlightRequestsPerConnection(
Integer maxInFlightRequestsPerConnection) {
this.maxInFlightRequestsPerConnection = maxInFlightRequestsPerConnection;
}
public Integer getMaxRequestSize() {
return maxRequestSize;
}
public void setMaxRequestSize(Integer maxRequestSize) {
this.maxRequestSize = maxRequestSize;
}
public Long getMetadataMaxIdleMs() {
return metadataMaxIdleMs;
}
public void setMetadataMaxIdleMs(Long metadataMaxIdleMs) {
this.metadataMaxIdleMs = metadataMaxIdleMs;
}
public String getPartitionerClass() {
return partitionerClass;
}
public void setPartitionerClass(String partitionerClass) {
this.partitionerClass = partitionerClass;
}
public Integer getRetries() {
return retries;
}
public void setRetries(Integer retries) {
this.retries = retries;
}
public String getTransactionalId() {
return transactionalId;
}
public void setTransactionalId(String transactionalId) {
this.transactionalId = transactionalId;
}
public Integer getTransactionTimeoutMs() {
return transactionTimeoutMs;
}
public void setTransactionTimeoutMs(Integer transactionTimeoutMs) {
this.transactionTimeoutMs = transactionTimeoutMs;
}
public String getValueSerializer() {
return valueSerializer;
}
public void setValueSerializer(String valueSerializer) {
this.valueSerializer = valueSerializer;
}
public Boolean getAutowiredEnabled() {
return autowiredEnabled;
}
public void setAutowiredEnabled(Boolean autowiredEnabled) {
this.autowiredEnabled = autowiredEnabled;
}
public Vertx getVertx() {
return vertx;
}
public void setVertx(Vertx vertx) {
this.vertx = vertx;
}
public VertxOptions getVertxOptions() {
return vertxOptions;
}
public void setVertxOptions(VertxOptions vertxOptions) {
this.vertxOptions = vertxOptions;
}
public String getSaslClientCallbackHandlerClass() {
return saslClientCallbackHandlerClass;
}
public void setSaslClientCallbackHandlerClass(
String saslClientCallbackHandlerClass) {
this.saslClientCallbackHandlerClass = saslClientCallbackHandlerClass;
}
public String getSaslJaasConfig() {
return saslJaasConfig;
}
public void setSaslJaasConfig(String saslJaasConfig) {
this.saslJaasConfig = saslJaasConfig;
}
public String getSaslKerberosKinitCmd() {
return saslKerberosKinitCmd;
}
public void setSaslKerberosKinitCmd(String saslKerberosKinitCmd) {
this.saslKerberosKinitCmd = saslKerberosKinitCmd;
}
public Long getSaslKerberosMinTimeBeforeRelogin() {
return saslKerberosMinTimeBeforeRelogin;
}
public void setSaslKerberosMinTimeBeforeRelogin(
Long saslKerberosMinTimeBeforeRelogin) {
this.saslKerberosMinTimeBeforeRelogin = saslKerberosMinTimeBeforeRelogin;
}
public String getSaslKerberosServiceName() {
return saslKerberosServiceName;
}
public void setSaslKerberosServiceName(String saslKerberosServiceName) {
this.saslKerberosServiceName = saslKerberosServiceName;
}
public Double getSaslKerberosTicketRenewJitter() {
return saslKerberosTicketRenewJitter;
}
public void setSaslKerberosTicketRenewJitter(
Double saslKerberosTicketRenewJitter) {
this.saslKerberosTicketRenewJitter = saslKerberosTicketRenewJitter;
}
public Double getSaslKerberosTicketRenewWindowFactor() {
return saslKerberosTicketRenewWindowFactor;
}
public void setSaslKerberosTicketRenewWindowFactor(
Double saslKerberosTicketRenewWindowFactor) {
this.saslKerberosTicketRenewWindowFactor = saslKerberosTicketRenewWindowFactor;
}
public String getSaslLoginCallbackHandlerClass() {
return saslLoginCallbackHandlerClass;
}
public void setSaslLoginCallbackHandlerClass(
String saslLoginCallbackHandlerClass) {
this.saslLoginCallbackHandlerClass = saslLoginCallbackHandlerClass;
}
public String getSaslLoginClass() {
return saslLoginClass;
}
public void setSaslLoginClass(String saslLoginClass) {
this.saslLoginClass = saslLoginClass;
}
public Short getSaslLoginRefreshBufferSeconds() {
return saslLoginRefreshBufferSeconds;
}
public void setSaslLoginRefreshBufferSeconds(
Short saslLoginRefreshBufferSeconds) {
this.saslLoginRefreshBufferSeconds = saslLoginRefreshBufferSeconds;
}
public Short getSaslLoginRefreshMinPeriodSeconds() {
return saslLoginRefreshMinPeriodSeconds;
}
public void setSaslLoginRefreshMinPeriodSeconds(
Short saslLoginRefreshMinPeriodSeconds) {
this.saslLoginRefreshMinPeriodSeconds = saslLoginRefreshMinPeriodSeconds;
}
public Double getSaslLoginRefreshWindowFactor() {
return saslLoginRefreshWindowFactor;
}
public void setSaslLoginRefreshWindowFactor(
Double saslLoginRefreshWindowFactor) {
this.saslLoginRefreshWindowFactor = saslLoginRefreshWindowFactor;
}
public Double getSaslLoginRefreshWindowJitter() {
return saslLoginRefreshWindowJitter;
}
public void setSaslLoginRefreshWindowJitter(
Double saslLoginRefreshWindowJitter) {
this.saslLoginRefreshWindowJitter = saslLoginRefreshWindowJitter;
}
public String getSaslMechanism() {
return saslMechanism;
}
public void setSaslMechanism(String saslMechanism) {
this.saslMechanism = saslMechanism;
}
public String getSecurityProtocol() {
return securityProtocol;
}
public void setSecurityProtocol(String securityProtocol) {
this.securityProtocol = securityProtocol;
}
public String getSecurityProviders() {
return securityProviders;
}
public void setSecurityProviders(String securityProviders) {
this.securityProviders = securityProviders;
}
public String getSslCipherSuites() {
return sslCipherSuites;
}
public void setSslCipherSuites(String sslCipherSuites) {
this.sslCipherSuites = sslCipherSuites;
}
public String getSslEnabledProtocols() {
return sslEnabledProtocols;
}
public void setSslEnabledProtocols(String sslEnabledProtocols) {
this.sslEnabledProtocols = sslEnabledProtocols;
}
public String getSslEndpointIdentificationAlgorithm() {
return sslEndpointIdentificationAlgorithm;
}
public void setSslEndpointIdentificationAlgorithm(
String sslEndpointIdentificationAlgorithm) {
this.sslEndpointIdentificationAlgorithm = sslEndpointIdentificationAlgorithm;
}
public String getSslEngineFactoryClass() {
return sslEngineFactoryClass;
}
public void setSslEngineFactoryClass(String sslEngineFactoryClass) {
this.sslEngineFactoryClass = sslEngineFactoryClass;
}
public String getSslKeymanagerAlgorithm() {
return sslKeymanagerAlgorithm;
}
public void setSslKeymanagerAlgorithm(String sslKeymanagerAlgorithm) {
this.sslKeymanagerAlgorithm = sslKeymanagerAlgorithm;
}
public String getSslKeyPassword() {
return sslKeyPassword;
}
public void setSslKeyPassword(String sslKeyPassword) {
this.sslKeyPassword = sslKeyPassword;
}
public String getSslKeystoreLocation() {
return sslKeystoreLocation;
}
public void setSslKeystoreLocation(String sslKeystoreLocation) {
this.sslKeystoreLocation = sslKeystoreLocation;
}
public String getSslKeystorePassword() {
return sslKeystorePassword;
}
public void setSslKeystorePassword(String sslKeystorePassword) {
this.sslKeystorePassword = sslKeystorePassword;
}
public String getSslKeystoreType() {
return sslKeystoreType;
}
public void setSslKeystoreType(String sslKeystoreType) {
this.sslKeystoreType = sslKeystoreType;
}
public String getSslProtocol() {
return sslProtocol;
}
public void setSslProtocol(String sslProtocol) {
this.sslProtocol = sslProtocol;
}
public String getSslProvider() {
return sslProvider;
}
public void setSslProvider(String sslProvider) {
this.sslProvider = sslProvider;
}
public String getSslSecureRandomImplementation() {
return sslSecureRandomImplementation;
}
public void setSslSecureRandomImplementation(
String sslSecureRandomImplementation) {
this.sslSecureRandomImplementation = sslSecureRandomImplementation;
}
public String getSslTrustmanagerAlgorithm() {
return sslTrustmanagerAlgorithm;
}
public void setSslTrustmanagerAlgorithm(String sslTrustmanagerAlgorithm) {
this.sslTrustmanagerAlgorithm = sslTrustmanagerAlgorithm;
}
public String getSslTruststoreLocation() {
return sslTruststoreLocation;
}
public void setSslTruststoreLocation(String sslTruststoreLocation) {
this.sslTruststoreLocation = sslTruststoreLocation;
}
public String getSslTruststorePassword() {
return sslTruststorePassword;
}
public void setSslTruststorePassword(String sslTruststorePassword) {
this.sslTruststorePassword = sslTruststorePassword;
}
public String getSslTruststoreType() {
return sslTruststoreType;
}
public void setSslTruststoreType(String sslTruststoreType) {
this.sslTruststoreType = sslTruststoreType;
}
}