blob: bd5ae936e63867c8309ef3db4fcec5ad89abb212 [file] [log] [blame]
#/**
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
## Bookie settings
# Max file size of entry logger, in bytes
# A new entry log file will be created when the old one reaches the file size limitation
logSizeLimit=1073741823
# Max file size of journal file, in mega bytes
# A new journal file will be created when the old one reaches the file size limitation
#
journalMaxSizeMB=2048
# Max number of old journal file to kept
# Keep a number of old journal files would help data recovery in specia case
#
journalMaxBackups=5
# How long the interval to trigger next garbage collection, in milliseconds
# Since garbage collection is running in background, too frequent gc
# will heart performance. It is better to give a higher number of gc
# interval if there is enough disk capacity.
#
# gc per 20 minutes (even there is nothing to gc, it would scan entry log files
# to get ledgers mapping for next gc cycle. this would help if we have pretty high
# write volume)
gcWaitTime=1200000
# do minor compaction per 1 hours
minorCompactionInterval=3600
minorCompactionThreshold=0.2
# disable major compaction
majorCompactionInterval=0
# reduce major compaction threshold to a low value to prevent bad force compaction behavior
majorCompactionThreshold=0.3
# disk usage
diskUsageThreshold=0.97
# increase warn threshold to avoid bad force compaction behavior
diskUsageWarnThreshold=0.96
# How long the interval to flush ledger index pages to disk, in milliseconds
# Flushing index files will introduce much random disk I/O.
# If separating journal dir and ledger dirs each on different devices,
# flushing would not affect performance. But if putting journal dir
# and ledger dirs on same device, performance degrade significantly
# on too frequent flushing. You can consider increment flush interval
# to get better performance, but you need to pay more time on bookie
# server restart after failure.
#
flushInterval=1000
# ZooKeeper client session timeout in milliseconds
# Bookie server will exit if it received SESSION_EXPIRED because it
# was partitioned off from ZooKeeper for more than the session timeout
# JVM garbage collection, disk I/O will cause SESSION_EXPIRED.
# Increment this value could help avoiding this issue
zkTimeout=60000
## NIO Server settings
# This settings is used to enabled/disabled Nagle's algorithm, which is a means of
# improving the efficiency of TCP/IP networks by reducing the number of packets
# that need to be sent over the network.
# If you are sending many small messages, such that more than one can fit in
# a single IP packet, setting server.tcpnodelay to false to enable Nagle algorithm
# can provide better performance.
# Default value is true.
#
serverTcpNoDelay=true
## ledger cache settings
# Max number of ledger index files could be opened in bookie server
# If number of ledger index files reaches this limitation, bookie
# server started to swap some ledgers from memory to disk.
# Too frequent swap will affect performance. You can tune this number
# to gain performance according your requirements.
openFileLimit=20000
# Size of a index page in ledger cache, in bytes
# A larger index page can improve performance writing page to disk,
# which is efficent when you have small number of ledgers and these
# ledgers have similar number of entries.
# If you have large number of ledgers and each ledger has fewer entries,
# smaller index page would improve memory usage.
pageSize=8192
# How many index pages provided in ledger cache
# If number of index pages reaches this limitation, bookie server
# starts to swap some ledgers from memory to disk. You can increment
# this value when you found swap became more frequent. But make sure
# pageLimit*pageSize should not more than JVM max memory limitation,
# otherwise you would got OutOfMemoryException.
# In general, incrementing pageLimit, using smaller index page would
# gain bettern performance in lager number of ledgers with fewer entries case
# If pageLimit is -1, bookie server will use 1/3 of JVM memory to compute
# the limitation of number of index pages.
pageLimit=131072
#If all ledger directories configured are full, then support only read requests for clients.
#If "readOnlyModeEnabled=true" then on all ledger disks full, bookie will be converted
#to read-only mode and serve only read requests. Otherwise the bookie will be shutdown.
readOnlyModeEnabled=true
# Bookie Journal Settings
writeBufferSizeBytes=524288
journalFlushWhenQueueEmpty=false
journalRemoveFromPageCache=true
journalAdaptiveGroupWrites=true
journalMaxGroupWaitMSec=15
journalBufferedEntriesThreshold=180
journalBufferedWritesThreshold=262144
journalMaxGroupedEntriesToCommit=200
journalPreAllocSizeMB=4
journalFlushWhenQueueEmpty=true
# Sorted Ledger Storage Settings
sortedLedgerStorageEnabled=true
skipListSizeLimit=67108864
skipListArenaChunkSize=2097152
skipListArenaMaxAllocSize=131072
fileInfoCacheInitialCapacity=10000
fileInfoMaxIdleTime=3600
# Bookie Threads Settings
numAddWorkerThreads=24
numJournalCallbackThreads=48
numReadWorkerThreads=72
numLongPollWorkerThreads=72