blob: 5b9a7b191dd9a71ff7614bb4186c12238407d75e [file] [log] [blame]
[apps..default]
run = true
count = 1
[apps.meta]
type = meta
name = meta
ports = @META_PORT@
pools = THREAD_POOL_DEFAULT,THREAD_POOL_META_SERVER,THREAD_POOL_META_STATE,THREAD_POOL_FD,THREAD_POOL_DLOCK,THREAD_POOL_BLOCK_SERVICE
[apps.replica]
type = replica
name = replica
ports = @REPLICA_PORT@
pools = THREAD_POOL_DEFAULT,THREAD_POOL_REPLICATION_LONG,THREAD_POOL_REPLICATION,THREAD_POOL_FD,THREAD_POOL_LOCAL_APP,THREAD_POOL_BLOCK_SERVICE,THREAD_POOL_COMPACT,THREAD_POOL_INGESTION,THREAD_POOL_SLOG,THREAD_POOL_PLOG
[apps.collector]
name = collector
type = collector
ports = 34101
pools = THREAD_POOL_DEFAULT,THREAD_POOL_REPLICATION
[core]
tool = nativerun
toollets = profiler
enable_default_app_mimic = true
logging_start_level = LOG_LEVEL_DEBUG
[block_service.local_service]
type = local_service
args = ../block_service/local_service
[tools.simple_logger]
short_header = false
stderr_start_level = LOG_LEVEL_ERROR
[threadpool..default]
worker_count = 4
worker_priority = THREAD_xPRIORITY_NORMAL
partitioned = false
[threadpool.THREAD_POOL_DEFAULT]
name = default
# The worker count in THREAD_POOL_DEFAULT must be >= 5.
# Because in info collector server, there are four timer tasks(LPC_PEGASUS_APP_STAT_TIMER, LPC_PEGASUS_STORAGE_SIZE_STAT_TIMER,
# LPC_DETECT_AVAILABLE and LPC_PEGASUS_CAPACITY_UNIT_STAT_TIMER). Each of these timer tasks occupies a thread in THREAD_POOL_DEFAULT.
# Each of these timer tasks calls remote procedure to meta server(which produce a callback), and waits for the rpc's callback to execute.
# If the worker_count <= 4, all of these threads are occupied by these timer tasks. so their rpc's callbacks can't get a thread to run.
# it comes to be a deadlock(timer task wait for rpc's callback to execute, and rpc's callback wait for the timer task to release the thread).
worker_count = 5
[threadpool.THREAD_POOL_REPLICATION]
name = replica
partitioned = true
worker_count = 2
[threadpool.THREAD_POOL_META_STATE]
name = meta_state
partitioned = true
worker_count = 1
[threadpool.THREAD_POOL_DLOCK]
name = dist_lock
partitioned = true
worker_count = 1
[threadpool.THREAD_POOL_FD]
name = fd
worker_count = 2
[threadpool.THREAD_POOL_LOCAL_APP]
name = local_app
worker_count = 2
[threadpool.THREAD_POOL_REPLICATION_LONG]
name = rep_long
worker_count = 2
[threadpool.THREAD_POOL_BLOCK_SERVICE]
name = block_service
worker_count = 1
[threadpool.THREAD_POOL_COMPACT]
name = compact
worker_count = 1
[threadpool.THREAD_POOL_INGESTION]
name = ingestion
partitioned = false
worker_count = 2
[threadpool.THREAD_POOL_SLOG]
name = slog
worker_count = 1
[threadpool.THREAD_POOL_PLOG]
name = plog
partitioned = true
worker_count = 4
[meta_server]
server_list = @LOCAL_IP@:34601,@LOCAL_IP@:34602,@LOCAL_IP@:34603
cluster_root = /pegasus/onebox/@LOCAL_IP@
distributed_lock_service_type = distributed_lock_service_zookeeper
distributed_lock_service_parameters = /pegasus/onebox/@LOCAL_IP@
meta_state_service_type = meta_state_service_zookeeper
stable_rs_min_running_seconds = 0
server_load_balancer_type = greedy_load_balancer
min_live_node_count_for_unfreeze = 1
cold_backup_disabled = false
recover_from_replica_server = false
[replication]
mutation_2pc_min_replica_count = 1
cold_backup_root = onebox
cluster_name = onebox
[meta_server.apps.@APP_NAME@]
app_name = @APP_NAME@
app_type = pegasus
partition_count = @PARTITION_COUNT@
[meta_server.apps.stat]
app_name = stat
app_type = pegasus
partition_count = 4
[pegasus.server]
perf_counter_enable_logging = false
# Where the metrics are collected. If no value is given, no sink is used.
# Options:
# - falcon
# - prometheus
perf_counter_sink = prometheus
# The HTTP port exposed to Prometheus for pulling metrics from pegasus server.
prometheus_port = @PROMETHEUS_PORT@
[pegasus.collector]
available_detect_app = @APP_NAME@
available_detect_alert_script_dir = ./package/bin
usage_stat_app = stat
enable_detect_hotkey = false
[pegasus.clusters]
onebox = @LOCAL_IP@:34601,@LOCAL_IP@:34602,@LOCAL_IP@:34603
onebox2 = 0.0.0.0:35601
# The group of clusters participating in duplication.
# Each cluster is assigned with a unique cluster id [1, 127] to identify which cluster
# the write comes from.
[duplication-group]
onebox = 1
onebox2 = 2
[zookeeper]
hosts_list = 127.0.0.1:22181
timeout_ms = 60000
logfile = zoo.log
[task..default]
is_trace = false
is_profile = false
allow_inline = false
fast_execution_in_network_thread = false
rpc_call_header_format = NET_HDR_DSN
rpc_call_channel = RPC_CHANNEL_TCP
rpc_timeout_milliseconds = 5000
[task.RPC_PREPARE]
is_profile = true
[task.RPC_PREPARE_ACK]
is_profile = true
[task.RPC_RRDB_RRDB_PUT]
is_profile = true
profiler::size.request.server = true
[task.RPC_RRDB_RRDB_PUT_ACK]
is_profile = true
[task.RPC_RRDB_RRDB_MULTI_PUT]
is_profile = true
profiler::size.request.server = true
[task.RPC_RRDB_RRDB_MULTI_PUT_ACK]
is_profile = true
[task.RPC_RRDB_RRDB_REMOVE]
is_profile = true
[task.RPC_RRDB_RRDB_REMOVE_ACK]
is_profile = true
[task.RPC_RRDB_RRDB_MULTI_REMOVE]
is_profile = true
[task.RPC_RRDB_RRDB_MULTI_REMOVE_ACK]
is_profile = true
[task.RPC_RRDB_RRDB_INCR]
is_profile = true
[task.RPC_RRDB_RRDB_INCR_ACK]
is_profile = true
[task.RPC_RRDB_RRDB_CHECK_AND_SET]
is_profile = true
[task.RPC_RRDB_RRDB_CHECK_AND_SET_ACK]
is_profile = true
[task.RPC_RRDB_RRDB_CHECK_AND_MUTATE]
is_profile = true
[task.RPC_RRDB_RRDB_CHECK_AND_MUTATE_ACK]
is_profile = true
[task.RPC_RRDB_RRDB_GET]
is_profile = true
profiler::size.response.server = true
[task.RPC_RRDB_RRDB_GET_ACK]
is_profile = true
[task.RPC_RRDB_RRDB_MULTI_GET]
is_profile = true
profiler::size.response.server = true