blob: 1709aac6fef6439b21e678c02bb52c5526bbea2b [file] [log] [blame]
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#################### System Configuration ##################
##Optional. Location where CarbonData will create the store, and write the data in its own format.
##If not specified then it takes spark.sql.warehouse.dir path.
#carbon.storelocation
#Base directory for Data files
#carbon.ddl.base.hdfs.url
#Path where the bad records are stored
#carbon.badRecords.location
#################### Performance Configuration ##################
######## DataLoading Configuration ########
#File read buffer size used during sorting(in MB) :MIN=1:MAX=100
carbon.sort.file.buffer.size=10
#Number of cores to be used while data loading
carbon.number.of.cores.while.loading=2
#Record count to sort and write to temp intermediate files
carbon.sort.size=100000
#Algorithm for hashmap for hashkey calculation
carbon.enableXXHash=true
#enable prefetch of data during merge sort while reading data from sort temp files in data loading
#carbon.merge.sort.prefetch=true
######## Alter Partition Configuration ########
#Number of cores to be used while alter partition
carbon.number.of.cores.while.alterPartition=2
######## Compaction Configuration ########
#Number of cores to be used while compacting
carbon.number.of.cores.while.compacting=2
#For minor compaction, Number of segments to be merged in stage 1, number of compacted segments to be merged in stage 2.
carbon.compaction.level.threshold=4,3
#default size (in MB) for major compaction to be triggered
carbon.major.compaction.size=1024
#################### Extra Configuration ##################
##Timestamp format of input data used for timestamp data type.
#carbon.timestamp.format=yyyy-MM-dd HH:mm:ss
######## Dataload Configuration ########
##File write buffer size used during sorting.
#carbon.sort.file.write.buffer.size=16384
##Locking mechanism for data loading on a table
#carbon.lock.type=LOCALLOCK
##Minimum no of intermediate files after which sort merged to be started.
#carbon.sort.intermediate.files.limit=20
##space reserved in percentage for writing block meta data in carbon data file
#carbon.block.meta.size.reserved.percentage=10
##csv reading buffer size.
#carbon.csv.read.buffersize.byte=1048576
##no of threads used for reading intermediate files for final merging.
#carbon.merge.sort.reader.thread=3
##To disable/enable carbon block distribution
#carbon.custom.block.distribution=false
######## Compaction Configuration ########
##to specify number of segments to be preserved from compaction
#carbon.numberof.preserve.segments=0
##To determine the loads of number of days to be compacted
#carbon.allowed.compaction.days=0
##To enable compaction while data loading
#carbon.enable.auto.load.merge=false
######## Query Configuration ########
##Maximum time allowed for one query to be executed.
#max.query.execution.time=60
##Min max feature is added to enhance query performance. To disable this feature, make it false.
#carbon.enableMinMax=true
######## Global Dictionary Configurations ########
##The property to set the date to be considered as start date for calculating the timestamp.
#carbon.cutOffTimestamp
##The property to set the timestamp (ie milis) conversion to the SECOND, MINUTE, HOUR or DAY level.
#carbon.timegranularity=SECOND
##the number of prefetched rows in sort step
#carbon.prefetch.buffersize=1000