blob: 103a980c435f8abb0955dc532ef8c738c0dcc933 [file]
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------IMPORTANT---------------------------------------------#
# 1. Note that the system will automatically create a heartbeat port for each metadata service #
# and data service. The default metadata heartbeat port is internal_meta_port + 1, #
# The default data heartbeat port is internal_data_port + 1. #
# So when you configure these two items and seed_nodes, pay attention to reserve a port for #
# heartbeat service. #
# 2. If open_server_rpc_port is set to true, the server module's RPC port will be turned on, #
# and the server module's RPC port will be set to rpc_port (in iotdb-engines.properties) + 1, #
# so this port should also be reserved. #
#-------------------------------------------IMPORTANT---------------------------------------------#
# used for communication between cluster nodes, eg heartbeat、raft logs and snapshots etc.
# if this parameter is commented, then the IP that binded by the hostname will be used.
internal_ip=127.0.0.1
# port for metadata service
internal_meta_port=9003
# port for data service
internal_data_port=40010
# port for cluster info API, 6567 by default
#cluster_info_public_port=6567
# whether open port for server module (for debug purpose)
# if true, the rpc_port of the single server will be changed to rpc_port (in iotdb-engines.properties) + 1
# open_server_rpc_port=false
# comma-separated {IP/DOMAIN}:internal_meta_port pairs, when used by start-datanode.sh(.bat),
# this configuration means the nodes that will form the initial cluster,
# every node that use start-datanode.sh(.bat) should have the same SEED_NODES, or the
# building of the initial cluster will fail. WARNING: if the initial cluster is built, this
# should not be changed before the environment is cleaned.
# when used by add-node.sh(.bat), this means the nodes to which that the application of joining
# the cluster will be sent, as all nodes can respond to a request, this configuration can be any
# nodes that already in the cluster, unnecessary to be the nodes that were used to build the
# initial cluster by start-datanode.sh(.bat). Several nodes will be picked randomly to send the
# request, the number of nodes picked depends on the number of retries.
#seed_nodes=127.0.0.1:9003,127.0.0.1:9005,127.0.0.1:9007
seed_nodes=127.0.0.1:9003
# whether to use thrift compressed protocol for internal communications. If you want to change
# compression settings for external clients, please modify 'rpc_thrift_compression_enable' in
# 'iotdb-engine.properties'.
# WARNING: this must be consistent across all nodes in the cluster
# rpc_thrift_compression_enable=false
# number of replications for one partition
default_replica_num=1
# sub raft num for multi-raft
multi_raft_factor=1
# cluster name to identify different clusters
# all node's cluster_name in one cluster are the same
# cluster_name=default
# Thrift socket and connection timeout between raft nodes, in milliseconds.
# NOTE: the timeout of connection used for sending heartbeats and requesting votes
# will be adjusted to min(heartbeat_interval_ms, connection_timeout_ms).
# connection_timeout_ms=20000
# write operation timeout threshold (ms), this is only for internal communications,
# not for the whole operation.
# write_operation_timeout_ms=30000
# read operation timeout threshold (ms), this is only for internal communications,
# not for the whole operation.
# read_operation_timeout_ms=30000
# the time interval (ms) between two rounds of heartbeat broadcast of one raft group leader.
# Recommend to set it as 1/10 of election_timeout_ms, but larger than 1 RTT between each two nodes.
# heartbeat_interval_ms=1000
# The election timeout in follower, or the time waiting for requesting votes in elector, in milliseconds.
# election_timeout_ms=20000
# catch up timeout threshold (ms), this is used for a follower behind the leader too much,
# so the leader will send logs(snapshot) to the follower,
# NOTICE, it may cost minutes of time to send a snapshot,
# so this parameter should be larger than the snapshot cost time.
# catch_up_timeout_ms=300000
# whether to use batch append entries in log catch up
# use_batch_in_catch_up=true
# the minimum number of committed logs in memory, after each log deletion, at most such number of logs
# will remain in memory. Increasing the number will reduce the chance to use snapshot in catch-ups,
# but will also increase the memory footprint
# min_num_of_logs_in_mem=1000
# maximum number of committed logs in memory, when reached, a log deletion will be triggered.
# Increasing the number will reduce the chance to use snapshot in catch-ups, but will also increase
# memory footprint
# max_num_of_logs_in_mem=2000
# Ratio of write memory allocated for raft log, 0.2 by default
# Increasing the number will reduce the memory allocated for write process in iotdb, but will also
# increase the memory footprint for raft log, which reduces the chance to use snapshot in catch-ups
# raft_log_memory_proportion=0.2
# deletion check period of the submitted log
# log_deletion_check_interval_second=-1
# Whether creating schema automatically is enabled, this will replace the one in iotdb-engine.properties
# enable_auto_create_schema=true
# consistency level, now three consistency levels are supported: strong, mid, and weak.
# Strong consistency means the server will first try to synchronize with the leader to get the
# newest data, if failed(timeout), directly report an error to the user;
# While mid consistency means the server will first try to synchronize with the leader,
# but if failed(timeout), it will give up and just use current data it has cached before;
# Weak consistency does not synchronize with the leader and simply use the local data
# consistency_level=mid
# Whether to use asynchronous server
# is_use_async_server=false
# Whether to use asynchronous applier
# is_use_async_applier=true
# is raft log persistence enabled
# is_enable_raft_log_persistence=true
# When a certain amount of raft log is reached, it will be flushed to disk
# It is possible to lose at most flush_raft_log_threshold operations
# flush_raft_log_threshold=10000
# Size of log buffer in each RaftMember's LogManager(in byte).
# raft_log_buffer_size=16777216
# The maximum value of the raft log index stored in the memory per raft group,
# These indexes are used to index the location of the log on the disk
# max_raft_log_index_size_in_memory=10000
# If leader finds too many uncommitted raft logs, raft group leader will wait for a short period of
# time, and then append the raft log
# uncommitted_raft_log_num_for_reject_threshold=500
# If followers find too many committed raft logs have not been applied, followers will reject the raft
# log sent by leader
# unapplied_raft_log_num_for_reject_threshold=500
# The maximum size of the raft log saved on disk for each file (in bytes) of each raft group.
# The default size is 1GB
# max_raft_log_persist_data_size_per_file=1073741824
# The maximum number of persistent raft log files on disk per raft group,
# So each raft group's log takes up disk space approximately equals
# max_raft_log_persist_data_size_per_file*max_number_of_persist_raft_log_files
# max_number_of_persist_raft_log_files=5
# The maximum number of logs saved on the disk
# max_persist_raft_log_number_on_disk=1000000
# whether enable use persist log on disk to catch up when no logs found in memory, if set false,
# will use snapshot to catch up when no logs found in memory.
# enable_use_persist_log_on_disk_to_catch_up=false
# The number of logs read on the disk at one time, which is mainly used to control the memory usage.
# This value multiplied by the log size is about the amount of memory used to read logs from the disk at one time.
# max_number_of_logs_per_fetch_on_disk=1000
# When consistency level is set to mid, query will fail if the log lag exceeds max_read_log_lag
# This default value is 1000
# max_read_log_lag=1000
# When a follower tries to sync log with the leader, sync will fail if the log Lag exceeds max_sync_log_lag.
# This default value is 100000
# max_sync_log_lag=100000
# Max number of clients in a ClientPool of a member for one node. When the num of clients in the ClientPool exceeds this parameter, the ClientPool blocks the thread that obtains the client for waitClientTimeoutMS.
# max_client_pernode_permember_number=1000
# Max number of idle clients in a ClientPool of a member for one node. When the num of clients in the ClientPool exceeds this parameter, the ClientPool destroys the client when it returns.
# max_idle_client_pernode_permember_number=500
# If the number of connections created for a node exceeds `max_client_pernode_permember_number`,
# we need to wait so much time for other connections to be released until timeout,
# or a new connection will be created.
# wait_client_timeout_ms=5000
# ---------------------------Experimental Features---------------------------------------
# Features below are for experimental purposes, so DO NOT CHANGE unless you are more than
# certain what you are doing.
# Use a asynchronous queue to sequence commands. This may reduce thread contention when the
# client concurrency is high.
# use_async_sequencing=false
# Use a sliding window in followers to store out-of-order entries.
# May reduce some waiting time during log replication but increase memory footprint.
# use_follower_sliding_window=false
# Enable write requests to be returned as soon as they are received by a quorum, but not necessarily
# appended or committed.
# Up to max_num_of_logs_in_mem requests may be lost when the client and the leader fail
# simultaneously, so be extremely careful before turning it on.
# Currently, this is compatible only when clients use IoTDB Java Session.
# enable_weak_acceptance=false
# The number of threads that will be used to send logs to a follower. This increases parallelism
# in log replication but be careful when there are many replication groups or the number of
# replicas is high, which may make thread switching costly.
# dispatcher_binding_thread_num=16
use_indirect_broadcasting=true
optimize_indirect_broadcasting=false