| ################################ GENERAL ##################################### |
| |
| # By default kvrocks listens for connections from all the network interfaces |
| # available on the server. It is possible to listen to just one or multiple |
| # interfaces using the "bind" configuration directive, followed by one or |
| # more IP addresses. |
| # |
| # Examples: |
| # |
| # bind 192.168.1.100 10.0.0.1 |
| # bind 127.0.0.1 |
| bind 0.0.0.0 |
| |
| # Accept connections on the specified port, default is 6666. |
| port 6668 |
| |
| # Close the connection after a client is idle for N seconds (0 to disable) |
| timeout 0 |
| |
| # The number of worker's threads, increase or decrease it would effect the performance. |
| workers 8 |
| |
| # The number of replication worker's threads, increase or decrease it would effect the replication performance. |
| # default is 1 |
| repl-workers 1 |
| |
| # The value should be INFO, WARNING, ERROR, FATAL |
| # default is INFO |
| loglevel INFO |
| |
| # By default kvrocks does not run as a daemon. Use 'yes' if you need it. |
| # Note that kvrocks will write a pid file in /var/run/kvrocks.pid when daemonized. |
| daemonize yes |
| |
| # Require clients to issue AUTH <PASSWORD> before processing any other |
| # commands. This might be useful in environments in which you do not trust |
| # others with access to the host running kvrocks. |
| # |
| # This should stay commented out for backward compatibility and because most |
| # people do not need auth (e.g. they run their own servers). |
| # |
| # Warning: since kvrocks is pretty fast an outside user can try up to |
| # 150k passwords per second against a good box. This means that you should |
| # use a very strong password otherwise it will be very easy to break. |
| # |
| requirepass foobared |
| |
| # If the master is password protected (using the "requirepass" configuration |
| # directive below) it is possible to tell the slave to authenticate before |
| # starting the replication synchronization process, otherwise the master will |
| # refuse the slave request. |
| # |
| masterauth foobared |
| |
| # Master-Salve replication would check db name is matched. if not, the slave should |
| # refuse to sync the db from master. Don't use default value, set the db-name to identify |
| # the cluster. |
| db-name change.me.db |
| |
| # The working directory |
| # |
| # The DB will be written inside this directory |
| # Note that you must specify a directory here, not a file name. |
| dir /tmp/kvrocks-slave |
| |
| # The backup directory |
| # |
| # The DB will be written inside this directory |
| # Note that you must specify a directory here, not a file name. |
| # backup-dir /tmp/kvrocks/backup |
| |
| # When running daemonized, kvrocks writes a pid file in ${CONFIG_DIR}/kvrocks.pid by |
| # default. You can specify a custom pid file location here. |
| # pidfile /var/run/kvrocks.pid |
| |
| # You can configure a slave instance to accept writes or not. Writing against |
| # a slave instance may be useful to store some ephemeral data (because data |
| # written on a slave will be easily deleted after resync with the master) but |
| # may also cause problems if clients are writing to it because of a |
| # misconfiguration. |
| slave-read-only yes |
| |
| # The slave priority is an integer number published by Kvrocks in the INFO output. |
| # It is used by Redis Sentinel in order to select a slave to promote into a |
| # master if the master is no longer working correctly. |
| # |
| # A slave with a low priority number is considered better for promotion, so |
| # for instance if there are three slave with priority 10, 100, 25 Sentinel will |
| # pick the one with priority 10, that is the lowest. |
| # |
| # However a special priority of 0 marks the replica as not able to perform the |
| # role of master, so a slave with priority of 0 will never be selected by |
| # Redis Sentinel for promotion. |
| # |
| # By default the priority is 100. |
| slave-priority 100 |
| |
| # TCP listen() backlog. |
| # |
| # In high requests-per-second environments you need an high backlog in order |
| # to avoid slow clients connections issues. Note that the Linux kernel |
| # will silently truncate it to the value of /proc/sys/net/core/somaxconn so |
| # make sure to raise both the value of somaxconn and tcp_max_syn_backlog |
| # in order to Get the desired effect. |
| tcp-backlog 511 |
| |
| # |
| # repl-bind 192.168.1.100 10.0.0.1 |
| # repl-bind 127.0.0.1 |
| repl-bind 0.0.0.0 |
| |
| # Master-Slave replication. Use slaveof to make a kvrocks instance a copy of |
| # another kvrocks server. A few things to understand ASAP about kvrocks replication. |
| # |
| # 1) Kvrocks replication is asynchronous, but you can configure a master to |
| # stop accepting writes if it appears to be not connected with at least |
| # a given number of slaves. |
| # 2) Kvrocks slaves are able to perform a partial resynchronization with the |
| # master if the replication link is lost for a relatively small amount of |
| # time. You may want to configure the replication backlog size (see the next |
| # sections of this file) with a sensible value depending on your needs. |
| # 3) Replication is automatic and does not need user intervention. After a |
| # network partition slaves automatically try to reconnect to masters |
| # and resynchronize with them. |
| # |
| # slaveof <masterip> <masterport> |
| slaveof 127.0.0.1 6666 |
| |
| # The maximum allowed rate (in MB/s) that should be used by Replication. |
| # If the rate exceeds max-replication-mb, replication will slow down. |
| # Default: 0 (i.e. no limit) |
| max-replication-mb 0 |
| |
| # The maximum allowed aggregated write rate of flush and compaction (in MB/s). |
| # If the rate exceeds max-io-mb, io will slow down. |
| # 0 is no limit |
| # Default: 500 |
| max-io-mb 500 |
| |
| # The maximum allowed space (in GB) that should be used by RocksDB. |
| # If the total size of the SST files exceeds max_allowed_space, writes to RocksDB will fail. |
| # Please see: https://github.com/facebook/rocksdb/wiki/Managing-Disk-Space-Utilization |
| # Default: 0 (i.e. no limit) |
| max-db-size 0 |
| |
| # The maximum backup to keep, server cron would run every minutes to check the num of current |
| # backup, and purge the old backup if exceed the max backup num to keep. If num-backup-to-keep |
| # is 0, no backup would be keep. |
| max-backup-to-keep 1 |
| |
| # The maximum hours to keep the backup. If max-backup-keep-hours is 0, wouldn't purge any backup. |
| # default is 168, 1 week |
| max-backup-keep-hours 168 |
| |
| |
| ################################## SLOW LOG ################################### |
| |
| # The Kvrocks Slow Log is a system to log queries that exceeded a specified |
| # execution time. The execution time does not include the I/O operations |
| # like talking with the client, sending the reply and so forth, |
| # but just the time needed to actually execute the command (this is the only |
| # stage of command execution where the thread is blocked and can not serve |
| # other requests in the meantime). |
| # |
| # You can configure the slow log with two parameters: one tells Kvrocks |
| # what is the execution time, in microseconds, to exceed in order for the |
| # command to get logged, and the other parameter is the length of the |
| # slow log. When a new command is logged the oldest one is removed from the |
| # queue of logged commands. |
| |
| # The following time is expressed in microseconds, so 1000000 is equivalent |
| # to one second. Note that a negative number disables the slow log, while |
| # a value of zero forces the logging of every command. |
| slowlog-log-slower-than 100000 |
| |
| # There is no limit to this length. Just be aware that it will consume memory. |
| # You can reclaim memory used by the slow log with SLOWLOG RESET. |
| slowlog-max-len 128 |
| |
| ################################## CRON ################################### |
| |
| # Compact Scheduler, auto compact at schedule time |
| # time expression format is the same as crontab(currently only support * and int) |
| # e.g. compact-cron 0 3 * * * 0 4 * * * |
| # would compact the db at 3am and 4am everyday |
| compact-cron 0 3 * * * |
| |
| # Backup Scheduler, auto backup at schedule time |
| # time expression format is the same as compact-cron |
| # e.g. compact-cron 0 3 * * * 0 4 * * * |
| # would backup the db at 3am and 4am everyday |
| # bgsave-cron 0 4 * * * |
| |
| ################################ ROCKSDB ##################################### |
| |
| # Specify the capacity of metadata column family block cache. Larger block cache |
| # may make request faster while more keys would be cached. Max Size is 200*1024. |
| # unit is MiB, default 4096 |
| rocksdb.metadata_block_cache_size 4096 |
| |
| # Specify the capacity of subkey column family block cache. Larger block cache |
| # may make request faster while more keys would be cached. Max Size is 200*1024. |
| # unit is MiB, default 8192 |
| rocksdb.subkey_block_cache_size 8192 |
| |
| # Number of open files that can be used by the DB. You may need to |
| # increase this if your database has a large working set. Value -1 means |
| # files opened are always kept open. You can estimate number of files based |
| # on target_file_size_base and target_file_size_multiplier for level-based |
| # compaction. For universal-style compaction, you can usually set it to -1. |
| rocksdb.max_open_files 8096 |
| |
| # Amount of data to build up in memory (backed by an unsorted log |
| # on disk) before converting to a sorted on-disk file. |
| # |
| # Larger values increase performance, especially during bulk loads. |
| # Up to max_write_buffer_number write buffers may be held in memory |
| # at the same time, |
| # so you may wish to adjust this parameter to control memory usage. |
| # Also, a larger write buffer will result in a longer recovery time |
| # the next time the database is opened. |
| # |
| # Note that write_buffer_size is enforced per column family. |
| # See db_write_buffer_size for sharing memory across column families. |
| |
| # default is 256MB |
| rocksdb.write_buffer_size 256 |
| |
| # The maximum number of write buffers that are built up in memory. |
| # The default and the minimum number is 2, so that when 1 write buffer |
| # is being flushed to storage, new writes can continue to the other |
| # write buffer. |
| # If max_write_buffer_number > 3, writing will be slowed down to |
| # options.delayed_write_rate if we are writing to the last write buffer |
| # allowed. |
| rocksdb.max_write_buffer_number 2 |
| |
| # Maximum number of concurrent background compaction jobs, submitted to |
| # the default LOW priority thread pool. |
| rocksdb.max_background_compactions 2 |
| |
| # Maximum number of concurrent background memtable flush jobs, submitted by |
| # default to the HIGH priority thread pool. If the HIGH priority thread pool |
| # is configured to have zero threads, flush jobs will share the LOW priority |
| # thread pool with compaction jobs. |
| rocksdb.max_background_flushes 2 |
| |
| # This value represents the maximum number of threads that will |
| # concurrently perform a compaction job by breaking it into multiple, |
| # smaller ones that are run simultaneously. |
| # Default: 1 (i.e. no subcompactions) |
| rocksdb.max_sub_compactions 1 |
| |
| # Specify the compression to use. |
| # Accept value: "no", "snappy" |
| # default snappy |
| rocksdb.compression snappy |
| |
| ################################ NAMESPACE ##################################### |
| namespace.test change.me |