| # |
| # Licensed to the Apache Software Foundation (ASF) under one |
| # or more contributor license agreements. See the NOTICE file |
| # distributed with this work for additional information |
| # regarding copyright ownership. The ASF licenses this file |
| # to you under the Apache License, Version 2.0 (the |
| # "License"); you may not use this file except in compliance |
| # with the License. You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, |
| # software distributed under the License is distributed on an |
| # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| # KIND, either express or implied. See the License for the |
| # specific language governing permissions and limitations |
| # under the License. |
| # |
| #################### |
| ### enable Tri |
| #################### |
| # MinMax, MinMaxLTTB, M4, LTTB, ILTS, SimPiece, SC, FSW, Uniform, DWT, Visval |
| enable_Tri="" |
| |
| # segment error threshold for SimPiece, SC, FSW |
| epsilon=100 |
| |
| auto_p1n=false |
| |
| p1t=0 |
| p1v=0 |
| |
| pnt=100 |
| pnv=100 |
| |
| #for MinMaxLTTB |
| #rps=2 |
| |
| #for ILTS |
| numIterations=4 |
| acc_avg=false |
| acc_rectangle=false |
| acc_convex=false |
| acc_iterRepeat=false |
| |
| #################### |
| ### enable CPV |
| #################### |
| enable_CPV=false |
| |
| #################### |
| ### Web Page Configuration |
| #################### |
| |
| # enable_metric_service=false |
| |
| # metrics_port=8181 |
| |
| # query_cache_size_in_metric=50 |
| |
| #################### |
| ### RPC Configuration |
| #################### |
| |
| rpc_address=0.0.0.0 |
| |
| rpc_port=6667 |
| |
| # rpc_thrift_compression_enable=false |
| |
| # if true, a snappy based compression method will be called before sending data by the network |
| # rpc_advanced_compression_enable=false |
| |
| # rpc_max_concurrent_client_num=65535 |
| |
| # thrift max frame size, 512MB by default |
| # thrift_max_frame_size=536870912 |
| |
| # thrift init buffer size |
| # thrift_init_buffer_size=1024 |
| |
| #################### |
| ### Write Ahead Log Configuration |
| #################### |
| |
| # Is insert ahead log enable |
| # enable_wal=true |
| |
| # Add a switch to drop ouf-of-order data |
| # Out-of-order data will impact the aggregation query a lot. Users may not care about discarding some out-of-order data. |
| # enable_discard_out_of_order_data=false |
| |
| # When a certain amount of insert ahead log is reached, it will be flushed to disk |
| # It is possible to lose at most flush_wal_threshold operations |
| # flush_wal_threshold=10000 |
| |
| # The cycle when insert ahead log is periodically forced to be written to disk(in milliseconds) |
| # If force_wal_period_in_ms = 0 it means force insert ahead log to be written to disk after each refreshment |
| # Set this parameter to 0 may slow down the ingestion on slow disk. |
| # force_wal_period_in_ms=100 |
| |
| #################### |
| ### Directory Configuration |
| #################### |
| |
| # system dir |
| # If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/system). |
| # If it is absolute, system will save the data in exact location it points to. |
| # If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. |
| # For windows platform |
| # If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. |
| # system_dir=data\\system |
| # For Linux platform |
| # If its prefix is "/", then the path is absolute. Otherwise, it is relative. |
| # system_dir=data/system |
| |
| |
| # data dirs |
| # If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data/data). |
| # If it is absolute, system will save the data in exact location it points to. |
| # If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. |
| # Note: If data_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. |
| # For windows platform |
| # If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. |
| # data_dirs=data\\data |
| # For Linux platform |
| # If its prefix is "/", then the path is absolute. Otherwise, it is relative. |
| # data_dirs=data/data |
| |
| |
| # mult_dir_strategy |
| # The strategy is used to choose a directory from tsfile_dir for the system to store a new tsfile. |
| # System provides three strategies to choose from, or user can create his own strategy by extending org.apache.iotdb.db.conf.directories.strategy.DirectoryStrategy. |
| # The info of the three strategies are as follows: |
| # 1. SequenceStrategy: the system will choose the directory in sequence. |
| # 2. MaxDiskUsableSpaceFirstStrategy: the system will choose the directory whose disk has the maximum space. |
| # 3. MinFolderOccupiedSpaceFirstStrategy: the system will choose the directory whose folder has the minimum occupied space. |
| # 4. RandomOnDiskUsableSpaceStrategy: the system will randomly choose the directory based on usable space of disks. The more usable space, the greater the chance of being chosen; |
| # Set SequenceStrategy,MaxDiskUsableSpaceFirstStrategy and MinFolderOccupiedSpaceFirstStrategy to apply the corresponding strategy. |
| # If this property is unset, system will use MaxDiskUsableSpaceFirstStrategy as default strategy. |
| # For this property, fully-qualified class name (include package name) and simple class name are both acceptable. |
| # multi_dir_strategy=MaxDiskUsableSpaceFirstStrategy |
| |
| |
| # wal dir |
| # If this property is unset, system will save the data in the default relative path directory under the IoTDB folder(i.e., %IOTDB_HOME%/data). |
| # If it is absolute, system will save the data in the exact location it points to. |
| # If it is relative, system will save the data in the relative path directory it indicates under the IoTDB folder. |
| # Note: If wal_dir is assigned an empty string(i.e.,zero-size), it will be handled as a relative path. |
| # For windows platform |
| # If its prefix is a drive specifier followed by "\\", or if its prefix is "\\\\", then the path is absolute. Otherwise, it is relative. |
| # wal_dir=data\\wal |
| # For Linux platform |
| # If its prefix is "/", then the path is absolute. Otherwise, it is relative. |
| # wal_dir=data/wal |
| |
| |
| # TSFile storage file system. Currently, Tsfile are supported to be stored in LOCAL file system or HDFS. |
| # tsfile_storage_fs=LOCAL |
| |
| # If using HDFS, the absolute file path of Hadoop core-site.xml should be configured |
| # core_site_path=/etc/hadoop/conf/core-site.xml |
| |
| # If using HDFS, the absolute file path of Hadoop hdfs-site.xml should be configured |
| # hdfs_site_path=/etc/hadoop/conf/hdfs-site.xml |
| |
| # If using HDFS, hadoop ip can be configured. If there are more than one hdfs_ip, Hadoop HA is used |
| # hdfs_ip=localhost |
| |
| # If using HDFS, hadoop port can be configured |
| # hdfs_port=9000 |
| |
| # If there are more than one hdfs_ip, Hadoop HA is used. Below are configuration for HA |
| # If using Hadoop HA, nameservices of hdfs can be configured |
| # dfs_nameservices=hdfsnamespace |
| |
| # If using Hadoop HA, namenodes under dfs nameservices can be configured |
| # dfs_ha_namenodes=nn1,nn2 |
| |
| # If using Hadoop HA, automatic failover can be enabled or disabled |
| # dfs_ha_automatic_failover_enabled=true |
| |
| # If using Hadoop HA and enabling automatic failover, the proxy provider can be configured |
| # dfs_client_failover_proxy_provider=org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider |
| |
| # If using kerberos to authenticate hdfs, this should be true |
| # hdfs_use_kerberos=false |
| |
| # Full path of kerberos keytab file |
| # kerberos_keytab_file_path=/path |
| |
| # Kerberos pricipal |
| # kerberos_principal=your principal |
| |
| |
| #################### |
| ### Storage Engine Configuration |
| #################### |
| |
| # Use this value to set timestamp precision as "ms", "us" or "ns". |
| # Once the precision is been set, it can not be changed. |
| timestamp_precision=ms |
| |
| # Default TTL for storage groups that are not set TTL by statements, in ms. If not set (default), |
| # the TTL will be unlimited. |
| # Notice: if this property is changed, previous created storage group which are not set TTL will |
| # also be affected. And negative values are accepted, which means you can only insert future |
| # data. |
| # default_ttl=36000000 |
| |
| # The size of the log buffer in each log node (in bytes). Due to the double buffer mechanism, |
| # if WAL is enabled and the size of the inserted plan is greater than one-half of this parameter, |
| # then the insert plan will be rejected by WAL. |
| # If it sets a value smaller than 0, use the default value 16777216 |
| # wal_buffer_size=16777216 |
| |
| # When a unSequence TsFile's file size (in byte) exceeds this, the TsFile is forced closed. |
| # It may cause memTable size smaller if it is a large value |
| unseq_tsfile_size=1073741824 |
| |
| # When a sequence TsFile's file size (in byte) exceeds this, the TsFile is forced closed. |
| # It may cause memTable size smaller if it is a large value |
| seq_tsfile_size=1073741824 |
| |
| # Size of log buffer in each metadata operation plan(in byte). |
| # If the size of a metadata operation plan is larger than this parameter, then it will be rejected by MManager |
| # If it sets a value smaller than 0, use the default value 1024*1024 |
| # mlog_buffer_size=1048576 |
| |
| # When a memTable's size (in byte) exceeds this, the memtable is flushed to disk. The default threshold is 1 GB. |
| # memtable_size_threshold=1073741824 |
| |
| # Whether to timed flush sequence tsfiles' memtables. |
| # Datatype: boolean |
| # enable_timed_flush_seq_memtable=false |
| |
| # If a memTable's created time is older than current time minus this, the memtable will be flushed to disk. |
| # Only check sequence tsfiles' memtables. |
| # The default flush interval is 60 * 60 * 1000. (unit: ms) |
| # Datatype: long |
| # seq_memtable_flush_interval_in_ms=3600000 |
| |
| # The interval to check whether sequence memtables need flushing. |
| # The default flush check interval is 10 * 60 * 1000. (unit: ms) |
| # Datatype: long |
| # seq_memtable_flush_check_interval_in_ms=600000 |
| |
| # Whether to timed flush unsequence tsfiles' memtables. |
| # Datatype: boolean |
| # enable_timed_flush_unseq_memtable=true |
| |
| # If a memTable's created time is older than current time minus this, the memtable will be flushed to disk. |
| # Only check unsequence tsfiles' memtables. |
| # The default flush interval is 60 * 60 * 1000. (unit: ms) |
| # Datatype: long |
| # unseq_memtable_flush_interval_in_ms=3600000 |
| |
| # The interval to check whether unsequence memtables need flushing. |
| # The default flush check interval is 10 * 60 * 1000. (unit: ms) |
| # Datatype: long |
| # unseq_memtable_flush_check_interval_in_ms=600000 |
| |
| # Whether to timed close tsfiles. |
| # Datatype: boolean |
| # enable_timed_close_tsfile=true |
| |
| # If a TsfileProcessor's last working memtable flush time is older than current time minus this and its working memtable is null, the TsfileProcessor will be closed. |
| # The default close interval is 60 * 60 * 1000. (unit: ms) |
| # Datatype: long |
| # close_tsfile_interval_after_flushing_in_ms=3600000 |
| |
| # The interval to check whether tsfiles need closing. |
| # The default close check interval is 10 * 60 * 1000. (unit: ms) |
| # Datatype: long |
| # close_tsfile_check_interval_in_ms=600000 |
| |
| # When the average point number of timeseries in memtable exceeds this, the memtable is flushed to disk. The default threshold is 10000. |
| avg_series_point_number_threshold=10000 |
| |
| # How many threads can concurrently flush. When <= 0, use CPU core number. |
| # concurrent_flush_thread=0 |
| |
| # How many threads can concurrently query. When <= 0, use CPU core number. |
| # concurrent_query_thread=8 |
| |
| # whether take over the memory management by IoTDB rather than JVM when serializing memtable as bytes in memory |
| # (i.e., whether use ChunkBufferPool), value true, false |
| # chunk_buffer_pool_enable=false |
| |
| # The amount of data iterate each time in server (the number of data strips, that is, the number of different timestamps.) |
| # batch_size=100000 |
| |
| # max size for tag and attribute of one time series |
| # the unit is byte |
| # tag_attribute_total_size=700 |
| |
| # interval num for tag and attribute records when force flushing to disk |
| # When a certain amount of tag and attribute records is reached, they will be force flushed to disk |
| # It is possible to lose at most tag_attribute_flush_interval records |
| # tag_attribute_flush_interval=1000 |
| |
| # In one insert (one device, one timestamp, multiple measurements), |
| # if enable partial insert, one measurement failure will not impact other measurements |
| # enable_partial_insert=true |
| |
| # Whether to enable MTree snapshot. Default false from 0.11.0 on. |
| # enable_mtree_snapshot=false |
| |
| # The least interval line numbers of mlog.txt when creating a checkpoint and saving snapshot of MTree. |
| # Only take effect when enable_mtree_snapshot=true. Unit: line numbers |
| # mtree_snapshot_interval=100000 |
| |
| # Threshold interval time of MTree modification. Unit: second. Default: 1 hour(3600 seconds) |
| # If the last modification time is less than this threshold, MTree snapshot will not be created |
| # Only take effect when enable_mtree_snapshot=true. |
| # mtree_snapshot_threshold_time=3600 |
| |
| # number of virtual storage groups per user-defined storage group |
| # a virtual storage group is the unit of parallelism in memory as all ingestions in one virtual storage group are serialized |
| # recommended value is [virtual storage group number] = [CPU core number] / [user-defined storage group number] |
| # virtual_storage_group_num = 1 |
| |
| # Level of TimeIndex, which records the start time and end time of TsFileResource. Currently, |
| # DEVICE_TIME_INDEX and FILE_TIME_INDEX are supported, and could not be changed after first set. |
| # time_index_level=DEVICE_TIME_INDEX |
| |
| #################### |
| ### Memory Control Configuration |
| #################### |
| |
| # Whether to enable memory control |
| # enable_mem_control=true |
| |
| # Memory Allocation Ratio: Write, Read, Schema and Free Memory. |
| # The parameter form is a:b:c:d, where a, b, c and d are integers. for example: 1:1:1:1 , 6:2:1:1 |
| # If you have high level of writing pressure and low level of reading pressure, please adjust it to for example 6:1:1:2 |
| # write_read_schema_free_memory_proportion=4:3:1:2 |
| |
| # primitive array size (length of each array) in array pool |
| # primitive_array_size=32 |
| |
| # Ratio of write memory for invoking flush disk, 0.4 by default |
| # If you have extremely high write load (like batch=1000), it can be set lower than the default value like 0.2 |
| # flush_proportion=0.4 |
| |
| # Ratio of write memory allocated for buffered arrays, 0.6 by default |
| # buffered_arrays_memory_proportion=0.6 |
| |
| # Ratio of write memory for rejecting insertion, 0.8 by default |
| # If you have extremely high write load (like batch=1000) and the physical memory size is large enough, |
| # it can be set higher than the default value like 0.9 |
| # reject_proportion=0.8 |
| |
| # If memory (in byte) of storage group increased more than this threshold, report to system. The default value is 16MB |
| # storage_group_report_threshold=16777216 |
| |
| # allowed max numbers of deduplicated path in one query |
| # it's just an advised value, the real limitation will be the smaller one between this and the one we calculated |
| # max_deduplicated_path_num=1000 |
| |
| # When an inserting is rejected, waiting period (in ms) to check system again, 50 by default. |
| # If the insertion has been rejected and the read load is low, it can be set larger. |
| # check_period_when_insert_blocked=50 |
| |
| # When the waiting time (in ms) of an inserting exceeds this, throw an exception. 10000 by default. |
| # If the insertion has been rejected and the read load is low, it can be set larger |
| # max_waiting_time_when_insert_blocked=10000 |
| |
| # estimated metadata size (in byte) of one timeseries in Mtree |
| # estimated_series_size=300 |
| |
| # size of ioTaskQueue. The default value is 10 |
| # io_task_queue_size_for_flushing=10 |
| |
| #################### |
| ### Upgrade Configurations |
| #################### |
| |
| # When there exists old version(0.9.x/v1) data, how many thread will be set up to perform upgrade tasks, 1 by default. |
| # Set to 1 when less than or equal to 0. |
| # upgrade_thread_num=1 |
| |
| |
| #################### |
| ### Query Configurations |
| #################### |
| |
| # the default time period that used in fill query, -1 by default means infinite past time, in ms |
| # default_fill_interval=-1 |
| |
| # The max executing time of query. unit: ms |
| # query_timeout_threshold=60000 |
| |
| #################### |
| ### Merge Configurations |
| #################### |
| # LEVEL_COMPACTION, NO_COMPACTION |
| compaction_strategy=NO_COMPACTION |
| |
| # Works when the compaction_strategy is LEVEL_COMPACTION. |
| # Whether to merge unseq files into seq files or not. |
| enable_unseq_compaction=false |
| |
| # Start compaction task at this delay, unit is ms |
| # compaction_interval=30000 |
| |
| # Works when the compaction_strategy is LEVEL_COMPACTION. |
| # The max seq file num of each level. |
| # When the num of files in one level exceeds this, |
| # the files in this level will merge to one and put to upper level. |
| # seq_file_num_in_each_level=6 |
| |
| # Works when the compaction_strategy is LEVEL_COMPACTION. |
| # The max num of seq level. |
| # seq_level_num=3 |
| |
| # Works when compaction_strategy is LEVEL_COMPACTION. |
| # The max ujseq file num of each level. |
| # When the num of files in one level exceeds this, |
| # the files in this level will merge to one and put to upper level. |
| # unseq_file_num_in_each_level=10 |
| |
| # Works when the compaction_strategy is LEVEL_COMPACTION. |
| # The max num of unseq level. |
| # unseq_level_num=1 |
| |
| # Works when compaction_strategy is LEVEL_COMPACTION. |
| # The max open file num in each unseq compaction task. |
| # We use the unseq file num as the open file num |
| # This parameters have to be much smaller than the permitted max open file num of each process controlled by operator system(65535 in most system) |
| # Datatype: int |
| # max_select_unseq_file_num_in_each_unseq_compaction=2000 |
| |
| # Works when the compaction_strategy is LEVEL_COMPACTION. |
| # When the average point number of chunks in the target file reaches this, merge the file to the top level. |
| # During a merge, if a chunk with less number of points than this parameter, the chunk will be |
| # merged with its succeeding chunks even if it is not overflowed, until the merged chunks reach |
| # this threshold and the new chunk will be flushed. |
| # When less than 0, this mechanism is disabled. |
| # merge_chunk_point_number=100000 |
| |
| # Works when the compaction_strategy is LEVEL_COMPACTION. |
| # When point number of a page reaches this, use "append merge" instead of "deserialize merge". |
| # merge_page_point_number=100 |
| |
| # How many thread will be used to perform merge task |
| # Datatype: int |
| # merge_thread_num=1 |
| |
| # How many threads will be set up to perform unseq merge chunk sub-tasks, 4 by default. |
| # Set to 1 when less than or equal to 0. |
| # merge_chunk_subthread_num=4 |
| |
| # If one merge file selection runs for more than this time, it will be ended and its current |
| # selection will be used as final selection. Unit: millis. |
| # When < 0, it means time is unbounded. |
| # merge_fileSelection_time_budget=30000 |
| |
| # How much memory may be used in ONE merge task (in byte), 10% of maximum JVM memory by default. |
| # This is only a rough estimation, starting from a relatively small value to avoid OOM. |
| # Each new merge thread may take such memory, so merge_thread_num * merge_memory_budget is the |
| # total memory estimation of merge. |
| # merge_memory_budget=2147483648 |
| |
| # When set to true, if some crashed merges are detected during system rebooting, such merges will |
| # be continued, otherwise, the unfinished parts of such merges will not be continued while the |
| # finished parts still remains as they are. |
| # If you are feeling the rebooting is too slow, set this to false, false by default |
| # continue_merge_after_reboot=false |
| |
| # When set to true, all unseq merges becomes full merge (the whole SeqFiles are re-written despite how |
| # much they are overflowed). This may increase merge overhead depending on how much the SeqFiles |
| # are overflowed. |
| # force_full_merge=true |
| |
| # How many threads will be set up to perform compaction, 10 by default. |
| # Set to 1 when less than or equal to 0. |
| # compaction_thread_num=10 |
| |
| # The limit of write throughput merge can reach per second |
| # merge_write_throughput_mb_per_sec=8 |
| |
| #################### |
| ### Metadata Cache Configuration |
| #################### |
| |
| # whether to cache meta data(ChunkMetadata and TimeSeriesMetadata) or not. |
| # meta_data_cache_enable=true |
| # Read memory Allocation Ratio: ChunkCache, TimeSeriesMetadataCache, memory used for constructing QueryDataSet and Free Memory Used in Query. |
| # The parameter form is a:b:c:d, where a, b, c and d are integers. for example: 1:1:1:1 , 1:2:3:4 |
| # chunk_timeseriesmeta_free_memory_proportion=1:2:3:4 |
| |
| # cache size for MManager. |
| # This cache is used to improve insert speed where all path check and TSDataType will be cached in MManager with corresponding Path. |
| # metadata_node_cache_size=300000 |
| |
| #################### |
| ### LAST Cache Configuration |
| #################### |
| |
| # Whether to enable LAST cache |
| # enable_last_cache=true |
| |
| #################### |
| ### Statistics Monitor configuration |
| #################### |
| |
| # Set enable_stat_monitor true(or false) to enable(or disable) the StatMonitor that stores statistics info. |
| # enable_stat_monitor=false |
| |
| # Set enable_monitor_series_write true (or false) to enable (or disable) the writing monitor time series |
| # enable_monitor_series_write=false |
| |
| #################### |
| ### WAL Direct Buffer Pool Configuration |
| #################### |
| # the interval to trim the wal pool |
| # wal_pool_trim_interval_ms=10000 |
| |
| # the max number of wal bytebuffer can be allocated for each time partition, if there is no unseq data you can set it to 4. |
| # it should be an even number |
| # max_wal_bytebuffer_num_for_each_partition=6 |
| |
| #################### |
| ### External sort Configuration |
| #################### |
| # Is external sort enable |
| # enable_external_sort=true |
| |
| # The maximum number of simultaneous chunk reading for a single time series. |
| # If the num of simultaneous chunk reading is greater than external_sort_threshold, external sorting is used. |
| # When external_sort_threshold increases, the number of chunks sorted at the same time in memory may increase and this will occupy more memory. |
| # When external_sort_threshold decreases, triggering external sorting will increase the time-consuming. |
| # external_sort_threshold=1000 |
| |
| |
| #################### |
| ### Sync Server Configuration |
| #################### |
| |
| # Whether to open the sync_server_port for receiving data from sync client, the default is closed |
| # is_sync_enable=false |
| |
| # Sync server port to listen |
| # sync_server_port=5555 |
| |
| # White IP list of Sync client. |
| # Please use the form of network segment to present the range of IP, for example: 192.168.0.0/16 |
| # If there are more than one IP segment, please separate them by commas |
| # The default is to allow all IP to sync |
| # ip_white_list=0.0.0.0/0 |
| |
| #################### |
| ### performance statistic configuration |
| #################### |
| |
| # Is stat performance of sub-module enable |
| # enable_performance_stat=false |
| # The interval of display statistic result in ms. |
| # performance_stat_display_interval=60000 |
| # The memory used for performance_stat in kb. |
| # performance_stat_memory_in_kb=20 |
| # Is performance tracing enable |
| # enable_performance_tracing=false |
| |
| # Uncomment following fields to configure the tracing root directory. |
| # For Window platform, the index is as follows: |
| # tracing_dir=data\\tracing |
| # For Linux platform |
| # If its prefix is "/", then the path is absolute. Otherwise, it is relative. |
| # tracing_dir=data/tracing |
| |
| #################### |
| ### Configurations for watermark module |
| #################### |
| # watermark_module_opened=false |
| # watermark_secret_key=IoTDB*2019@Beijing |
| # watermark_bit_string=100101110100 |
| # watermark_method=GroupBasedLSBMethod(embed_row_cycle=2,embed_lsb_num=5) |
| |
| |
| #################### |
| ### Configurations for creating schema automatically |
| #################### |
| |
| # Whether creating schema automatically is enabled |
| # enable_auto_create_schema=true |
| |
| # Storage group level when creating schema automatically is enabled |
| # e.g. root.sg0.d1.s2 |
| # we will set root.sg0 as the storage group if storage group level is 1 |
| # default_storage_group_level=1 |
| |
| # ALL data types: BOOLEAN, INT32, INT64, FLOAT, DOUBLE, TEXT |
| |
| # register time series as which type when receiving boolean string "true" or "false" |
| # boolean_string_infer_type=BOOLEAN |
| |
| # register time series as which type when receiving an integer string "67" |
| # integer_string_infer_type=FLOAT |
| |
| # register time series as which type when receiving an integer string and using float may lose precision |
| # num > 2 ^ 24 |
| # long_string_infer_type=DOUBLE |
| |
| # register time series as which type when receiving a floating number string "6.7" |
| # floating_string_infer_type=FLOAT |
| |
| # register time series as which type when receiving the Literal NaN. Values can be DOUBLE, FLOAT or TEXT |
| # nan_string_infer_type=DOUBLE |
| |
| |
| # BOOLEAN encoding when creating schema automatically is enabled |
| # default_boolean_encoding=RLE |
| |
| # INT32 encoding when creating schema automatically is enabled |
| # default_int32_encoding=RLE |
| |
| # INT64 encoding when creating schema automatically is enabled |
| # default_int64_encoding=RLE |
| |
| # FLOAT encoding when creating schema automatically is enabled |
| # default_float_encoding=GORILLA |
| |
| # DOUBLE encoding when creating schema automatically is enabled |
| # default_double_encoding=GORILLA |
| |
| # TEXT encoding when creating schema automatically is enabled |
| # default_text_encoding=PLAIN |
| |
| #################### |
| ### Configurations for tsfile-format |
| #################### |
| |
| write_convex_hull=false |
| |
| enableMinMaxLSM=false |
| |
| use_Statistics=true |
| |
| use_TimeIndex=false |
| |
| use_Mad=false |
| |
| use_ValueIndex=false |
| |
| error_Param=1 |
| |
| # group_size_in_byte=134217728 |
| |
| # The memory size for each series writer to pack page, default value is 64KB |
| page_size_in_byte=1073741824 |
| |
| # The maximum number of data points in a page, default 1024*1024 |
| # max_number_of_points_in_page=1048576 |
| |
| # Data type configuration |
| # Data type for input timestamp, supports INT32 or INT64 |
| # time_series_data_type=INT64 |
| |
| # Max size limitation of input string |
| # max_string_length=128 |
| |
| # Floating-point precision |
| # float_precision=2 |
| |
| # Encoder configuration |
| # Encoder of time series, supports TS_2DIFF, PLAIN and RLE(run-length encoding), REGULAR and default value is TS_2DIFF |
| time_encoder=PLAIN |
| |
| # Encoder of value series. default value is PLAIN. |
| # For int, long data type, also supports TS_2DIFF and RLE(run-length encoding) and GORILLA. |
| # For float, double data type, also supports TS_2DIFF, RLE(run-length encoding) and GORILLA. |
| # For text data type, only supports PLAIN. |
| # value_encoder=PLAIN |
| |
| # Compression configuration |
| # Data compression method, supports UNCOMPRESSED, SNAPPY or LZ4. Default value is SNAPPY |
| # compressor=SNAPPY |
| |
| # Maximum degree of a metadataIndex node, default value is 256 |
| # max_degree_of_index_node=256 |
| |
| # time interval in minute for calculating query frequency |
| # frequency_interval_in_minute=1 |
| |
| # time cost(ms) threshold for slow query |
| # slow_query_threshold=5000 |
| |
| #################### |
| ### MQTT Broker Configuration |
| #################### |
| |
| # whether to enable the mqtt service. |
| # enable_mqtt_service=false |
| |
| # the mqtt service binding host. |
| # mqtt_host=0.0.0.0 |
| |
| # the mqtt service binding port. |
| # mqtt_port=1883 |
| |
| # the handler pool size for handing the mqtt messages. |
| # mqtt_handler_pool_size=1 |
| |
| # the mqtt message payload formatter. |
| # mqtt_payload_formatter=json |
| |
| # max length of mqtt message in byte |
| # mqtt_max_message_size=1048576 |
| |
| #################### |
| ### Authorization Configuration |
| #################### |
| |
| #which class to serve for authorization. By default, it is LocalFileAuthorizer. |
| #Another choice is org.apache.iotdb.db.auth.authorizer.OpenIdAuthorizer |
| # authorizer_provider_class=org.apache.iotdb.db.auth.authorizer.LocalFileAuthorizer |
| |
| |
| #If OpenIdAuthorizer is enabled, then openID_url must be set. |
| |
| #openID_url= |
| |
| #################### |
| ### UDF Query Configuration |
| #################### |
| |
| # Used to estimate the memory usage of text fields in a UDF query. |
| # It is recommended to set this value to be slightly larger than the average length of all text |
| # records. |
| # udf_initial_byte_array_length_for_memory_control=48 |
| |
| # How much memory may be used in ONE UDF query (in MB). |
| # The upper limit is 20% of allocated memory for read. |
| # udf_memory_budget_in_mb=30.0 |
| |
| # UDF memory allocation ratio. |
| # The parameter form is a:b:c, where a, b, and c are integers. |
| # udf_reader_transformer_collector_memory_proportion=1:1:1 |
| |
| # Uncomment following fields to configure the udf root directory. |
| # For Window platform, the index is as follows: |
| # udf_root_dir=ext\\udf |
| # For Linux platform |
| # If its prefix is "/", then the path is absolute. Otherwise, it is relative. |
| # udf_root_dir=ext/udf |
| |
| |
| # Uncomment following fields to configure the index root directory. |
| # For Window platform, the index is as follows: |
| # index_root_dir=data\\index |
| # For Linux platform |
| # If its prefix is "/", then the path is absolute. Otherwise, it is relative. |
| # index_root_dir=data/index |
| |
| # Is index enable |
| # enable_index=false |
| |
| # How many threads can concurrently build index. When <= 0, use CPU core number. |
| # concurrent_index_build_thread=0 |
| |
| # the default size of sliding window used for the subsequence matching in index framework |
| # default_index_window_range=10 |
| |
| # buffer parameter for index processor. |
| # index_buffer_size=134217728 |
| |
| # whether enable data partition. If disabled, all data belongs to partition 0 |
| # enable_partition=false |
| |
| # time range for partitioning data inside each storage group, the unit is second |
| # partition_interval=604800 |
| |
| # concurrent_writing_time_partition=500 |
| |
| # admin username, default is root |
| # Datatype: string |
| # admin_name=root |
| |
| # admin password, default is root |
| # Datatype: string |
| # admin_password=root |