blob: 62014ca2a9995b0e3b3a1f192cf7b6a205373518 [file] [log] [blame]
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------IMPORTANT---------------------------------------------#
# 1. Note that the system will automatically create a heartbeat port for each metadata service #
# and data service. The default metadata heartbeat port is internal_meta_port + 1, #
# The default data heartbeat port is internal_data_port + 1. #
# So when you configure these two items and seed_nodes, pay attention to reserve a port for #
# heartbeat service. #
# 2. If open_server_rpc_port is set to true, the server module's RPC port will be turned on, #
# and the server module's RPC port will be set to rpc_port (in iotdb-engines.properties) + 1, #
# so this port should also be reserved. #
#-------------------------------------------IMPORTANT---------------------------------------------#
# used for communication between cluster nodes, eg heartbeat、raft logs and snapshots etc.
internal_ip=127.0.0.1
# port for metadata service
internal_meta_port=9003
# port for data service
internal_data_port=40010
# whether open port for server module (for debug purpose)
# if true, the rpc_port of the single server will be changed to rpc_port (in iotdb-engines.properties) + 1
# open_server_rpc_port=false
# comma-separated {IP/DOMAIN}:internal_meta_port pairs, when used by start-node.sh(.bat),
# this configuration means the nodes that will form the initial cluster,
# every node that use start-node.sh(.bat) should have the same SEED_NODES, or the
# building of the initial cluster will fail. WARNING: if the initial cluster is built, this
# should not be changed before the environment is cleaned.
# when used by add-node.sh(.bat), this means the nodes to which that the application of joining
# the cluster will be sent, as all nodes can respond to a request, this configuration can be any
# nodes that already in the cluster, unnecessary to be the nodes that were used to build the
# initial cluster by start-node.sh(.bat). Several nodes will be picked randomly to send the
# request, the number of nodes picked depends on the number of retries.
seed_nodes=127.0.0.1:9003,127.0.0.1:9005,127.0.0.1:9007
# whether to use thrift compressed protocol for internal communications. If you want to change
# compression settings for external clients, please modify 'rpc_thrift_compression_enable' in
# 'iotdb-engine.properties'.
# WARNING: this must be consistent across all nodes in the cluster
# rpc_thrift_compression_enable=false
# max client connections created by thrift
# this configuration applies separately to data/meta/client connections and thus does not control
# the number of global connections
# max_concurrent_client_num=10000
# number of replications for one partition
default_replica_num=1
# cluster name to identify different clusters
# all node's cluster_name in one cluster are the same
# cluster_name=default
# connection time out (ms) among raft nodes
# connection_timeout_ms=20000
# write operation timeout threshold (ms), this is only for internal communications,
# not for the whole operation.
# write_operation_timeout_ms=30000
# read operation timeout threshold (ms), this is only for internal communications,
# not for the whole operation.
# read_operation_timeout_ms=30000
# catch up timeout threshold (ms), this is used for a follower behind the leader too much,
# so the leader will send logs(snapshot) to the follower,
# NOTICE, it may cost minutes of time to send a snapshot,
# so this parameter should be larger than the snapshot cost time.
# catch_up_timeout_ms=300000
# whether to use batch append entries in log catch up
# use_batch_in_catch_up=true
# the minimum number of committed logs in memory, after each log deletion, at most such number of logs
# will remain in memory. Increasing the number will reduce the chance to use snapshot in catch-ups,
# but will also increase the memory footprint
# min_num_of_logs_in_mem=1000
# maximum number of committed logs in memory, when reached, a log deletion will be triggered.
# Increasing the number will reduce the chance to use snapshot in catch-ups, but will also increase
# memory footprint
# max_num_of_logs_in_mem=2000
# maximum memory size of committed logs in memory, when reached, a log deletion will be triggered.
# Increasing the number will reduce the chance to use snapshot in catch-ups, but will also increase
# memory footprint, default is 512MB
# max_memory_size_for_raft_log=536870912
# deletion check period of the submitted log
# log_deletion_check_interval_second=-1
# Whether creating schema automatically is enabled, this will replace the one in iotdb-engine.properties
# enable_auto_create_schema=true
# consistency level, now three consistency levels are supported: strong, mid, and weak.
# Strong consistency means the server will first try to synchronize with the leader to get the
# newest data, if failed(timeout), directly report an error to the user;
# While mid consistency means the server will first try to synchronize with the leader,
# but if failed(timeout), it will give up and just use current data it has cached before;
# Weak consistency does not synchronize with the leader and simply use the local data
# consistency_level=mid
# Whether to use asynchronous server
# is_use_async_server=false
# Whether to use asynchronous applier
# is_use_async_applier=true
# is raft log persistence enabled
# is_enable_raft_log_persistence=true
# When a certain amount of raft log is reached, it will be flushed to disk
# It is possible to lose at most flush_raft_log_threshold operations
# flush_raft_log_threshold=10000
# Size of log buffer in each RaftMember's LogManager(in byte).
# raft_log_buffer_size=16777216
# The maximum value of the raft log index stored in the memory per raft group,
# These indexes are used to index the location of the log on the disk
# max_raft_log_index_size_in_memory=10000
# The maximum size of the raft log saved on disk for each file (in bytes) of each raft group.
# The default size is 1GB
# max_raft_log_persist_data_size_per_file=1073741824
# The maximum number of persistent raft log files on disk per raft group,
# So each raft group's log takes up disk space approximately equals
# max_raft_log_persist_data_size_per_file*max_number_of_persist_raft_log_files
# max_number_of_persist_raft_log_files=5
# The maximum number of logs saved on the disk
# max_persist_raft_log_number_on_disk=1000000
# whether enable use persist log on disk to catch up when no logs found in memory, if set false,
# will use snapshot to catch up when no logs found in memory.
# enable_use_persist_log_on_disk_to_catch_up=true
# The number of logs read on the disk at one time, which is mainly used to control the memory usage.
# This value multiplied by the log size is about the amount of memory used to read logs from the disk at one time.
# max_number_of_logs_per_fetch_on_disk=1000
# When consistency level is set to mid, query will fail if the log lag exceeds max_read_log_lag
# This default value is 1000
# max_read_log_lag=1000
# Max number of clients in a ClientPool of a member for one node.
# max_client_pernode_permember_number=1000
# If the number of connections created for a node exceeds `max_client_pernode_permember_number`,
# we need to wait so much time for other connections to be released until timeout,
# or a new connection will be created.
# wait_client_timeout_ms=5000