Add FLEA

diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..26651a2
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,226 @@
+---
+Language:        Cpp
+# BasedOnStyle:  Google
+AccessModifierOffset: -1
+AlignAfterOpenBracket: Align
+AlignArrayOfStructures: None
+AlignConsecutiveMacros: None
+AlignConsecutiveAssignments: None
+AlignConsecutiveBitFields: None
+AlignConsecutiveDeclarations: None
+AlignEscapedNewlines: Left
+AlignOperands:   Align
+AlignTrailingComments: true
+AllowAllArgumentsOnNextLine: true
+AllowAllParametersOfDeclarationOnNextLine: true
+AllowShortEnumsOnASingleLine: true
+AllowShortBlocksOnASingleLine: Never
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: All
+AllowShortLambdasOnASingleLine: All
+AllowShortIfStatementsOnASingleLine: WithoutElse
+AllowShortLoopsOnASingleLine: true
+AlwaysBreakAfterDefinitionReturnType: None
+AlwaysBreakAfterReturnType: None
+AlwaysBreakBeforeMultilineStrings: true
+AlwaysBreakTemplateDeclarations: Yes
+AttributeMacros:
+  - __capability
+BinPackArguments: true
+BinPackParameters: true
+BraceWrapping:
+  AfterCaseLabel:  false
+  AfterClass:      false
+  AfterControlStatement: Never
+  AfterEnum:       false
+  AfterFunction:   false
+  AfterNamespace:  false
+  AfterObjCDeclaration: false
+  AfterStruct:     false
+  AfterUnion:      false
+  AfterExternBlock: false
+  BeforeCatch:     false
+  BeforeElse:      false
+  BeforeLambdaBody: false
+  BeforeWhile:     false
+  IndentBraces:    false
+  SplitEmptyFunction: true
+  SplitEmptyRecord: true
+  SplitEmptyNamespace: true
+BreakBeforeBinaryOperators: None
+BreakBeforeConceptDeclarations: true
+BreakBeforeBraces: Attach
+BreakBeforeInheritanceComma: false
+BreakInheritanceList: BeforeColon
+BreakBeforeTernaryOperators: true
+BreakConstructorInitializersBeforeComma: false
+BreakConstructorInitializers: BeforeColon
+BreakAfterJavaFieldAnnotations: false
+BreakStringLiterals: true
+ColumnLimit:     80
+CommentPragmas:  '^ IWYU pragma:'
+QualifierAlignment: Leave
+CompactNamespaces: false
+ConstructorInitializerIndentWidth: 4
+ContinuationIndentWidth: 4
+Cpp11BracedListStyle: true
+DeriveLineEnding: true
+DerivePointerAlignment: true
+DisableFormat:   false
+EmptyLineAfterAccessModifier: Never
+EmptyLineBeforeAccessModifier: LogicalBlock
+ExperimentalAutoDetectBinPacking: false
+PackConstructorInitializers: NextLine
+BasedOnStyle:    ''
+ConstructorInitializerAllOnOneLineOrOnePerLine: false
+AllowAllConstructorInitializersOnNextLine: true
+FixNamespaceComments: true
+ForEachMacros:
+  - foreach
+  - Q_FOREACH
+  - BOOST_FOREACH
+IfMacros:
+  - KJ_IF_MAYBE
+IncludeBlocks:   Regroup
+IncludeCategories:
+  - Regex:           '^<ext/.*\.h>'
+    Priority:        2
+    SortPriority:    0
+    CaseSensitive:   false
+  - Regex:           '^<.*\.h>'
+    Priority:        1
+    SortPriority:    0
+    CaseSensitive:   false
+  - Regex:           '^<.*'
+    Priority:        2
+    SortPriority:    0
+    CaseSensitive:   false
+  - Regex:           '.*'
+    Priority:        3
+    SortPriority:    0
+    CaseSensitive:   false
+IncludeIsMainRegex: '([-_](test|unittest))?$'
+IncludeIsMainSourceRegex: ''
+IndentAccessModifiers: false
+IndentCaseLabels: true
+IndentCaseBlocks: false
+IndentGotoLabels: true
+IndentPPDirectives: None
+IndentExternBlock: AfterExternBlock
+IndentRequires:  false
+IndentWidth:     4
+IndentWrappedFunctionNames: false
+InsertTrailingCommas: None
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
+KeepEmptyLinesAtTheStartOfBlocks: false
+LambdaBodyIndentation: Signature
+MacroBlockBegin: ''
+MacroBlockEnd:   ''
+MaxEmptyLinesToKeep: 1
+NamespaceIndentation: None
+ObjCBinPackProtocolList: Never
+ObjCBlockIndentWidth: 2
+ObjCBreakBeforeNestedBlockParam: true
+ObjCSpaceAfterProperty: false
+ObjCSpaceBeforeProtocolList: true
+PenaltyBreakAssignment: 2
+PenaltyBreakBeforeFirstCallParameter: 1
+PenaltyBreakComment: 300
+PenaltyBreakFirstLessLess: 120
+PenaltyBreakOpenParenthesis: 0
+PenaltyBreakString: 1000
+PenaltyBreakTemplateDeclaration: 10
+PenaltyExcessCharacter: 1000000
+PenaltyReturnTypeOnItsOwnLine: 200
+PenaltyIndentedWhitespace: 0
+PointerAlignment: Left
+PPIndentWidth:   -1
+RawStringFormats:
+  - Language:        Cpp
+    Delimiters:
+      - cc
+      - CC
+      - cpp
+      - Cpp
+      - CPP
+      - 'c++'
+      - 'C++'
+    CanonicalDelimiter: ''
+    BasedOnStyle:    google
+  - Language:        TextProto
+    Delimiters:
+      - pb
+      - PB
+      - proto
+      - PROTO
+    EnclosingFunctions:
+      - EqualsProto
+      - EquivToProto
+      - PARSE_PARTIAL_TEXT_PROTO
+      - PARSE_TEST_PROTO
+      - PARSE_TEXT_PROTO
+      - ParseTextOrDie
+      - ParseTextProtoOrDie
+      - ParseTestProto
+      - ParsePartialTestProto
+    CanonicalDelimiter: pb
+    BasedOnStyle:    google
+ReferenceAlignment: Pointer
+ReflowComments:  true
+RemoveBracesLLVM: false
+SeparateDefinitionBlocks: Leave
+ShortNamespaceLines: 1
+SortIncludes:    CaseSensitive
+SortJavaStaticImport: Before
+SortUsingDeclarations: true
+SpaceAfterCStyleCast: false
+SpaceAfterLogicalNot: false
+SpaceAfterTemplateKeyword: true
+SpaceBeforeAssignmentOperators: true
+SpaceBeforeCaseColon: false
+SpaceBeforeCpp11BracedList: false
+SpaceBeforeCtorInitializerColon: true
+SpaceBeforeInheritanceColon: true
+SpaceBeforeParens: ControlStatements
+SpaceBeforeParensOptions:
+  AfterControlStatements: true
+  AfterForeachMacros: true
+  AfterFunctionDefinitionName: false
+  AfterFunctionDeclarationName: false
+  AfterIfMacros:   true
+  AfterOverloadedOperator: false
+  BeforeNonEmptyParentheses: false
+SpaceAroundPointerQualifiers: Default
+SpaceBeforeRangeBasedForLoopColon: true
+SpaceInEmptyBlock: false
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 2
+SpacesInAngles:  Never
+SpacesInConditionalStatement: false
+SpacesInContainerLiterals: true
+SpacesInCStyleCastParentheses: false
+SpacesInLineCommentPrefix:
+  Minimum:         1
+  Maximum:         -1
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+SpaceBeforeSquareBrackets: false
+BitFieldColonSpacing: Both
+Standard:        Auto
+StatementAttributeLikeMacros:
+  - Q_EMIT
+StatementMacros:
+  - Q_UNUSED
+  - QT_REQUIRE_VERSION
+TabWidth:        8
+UseCRLF:         false
+UseTab:          Never
+WhitespaceSensitiveMacros:
+  - STRINGIZE
+  - PP_STRINGIZE
+  - BOOST_PP_STRINGIZE
+  - NS_SWIFT_NAME
+  - CF_SWIFT_NAME
+...
+
diff --git a/.github/workflows/code-coverage.yml b/.github/workflows/code-coverage.yml
index 93e24da..45298b7 100644
--- a/.github/workflows/code-coverage.yml
+++ b/.github/workflows/code-coverage.yml
@@ -19,7 +19,7 @@
     if: github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == 'apache/tsfile' || github.event_name == 'push'
 
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
       - name: Cache Maven packages
         uses: actions/cache@v4
         with:
@@ -29,7 +29,7 @@
       - name: Generate code coverage reports
         run: |
           sudo apt-get install lcov
-          ./mvnw -B -P with-java,with-cpp,with-code-coverage clean verify
+          ./mvnw -B -P with-java,with-cpp,with-code-coverage clean verify -Dspotless.skip=true
           lcov --capture --directory cpp/target/build/test --output-file cpp/target/build/test/coverage.info
           lcov --remove cpp/target/build/test/coverage.info '*/tsfile/cpp/test/*' --output-file cpp/target/build/test/coverage_filtered.info
           genhtml cpp/target/build/test/coverage_filtered.info --output-directory cpp/target/build/test/coverage_report
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index 4ae5499..d5023a0 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -60,7 +60,7 @@
         # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
     steps:
     - name: Checkout repository
-      uses: actions/checkout@v4
+      uses: actions/checkout@v5
 
     # Initializes the CodeQL tools for scanning.
     - name: Initialize CodeQL
diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml
index c833f27..7c47ee1 100644
--- a/.github/workflows/greetings.yml
+++ b/.github/workflows/greetings.yml
@@ -6,7 +6,7 @@
   greeting:
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/first-interaction@v1.3.0
+    - uses: actions/first-interaction@v3.0.0
       continue-on-error: true
       with:
         repo-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/site-build.yaml b/.github/workflows/site-build.yaml
index a9ecf0f..e77229a 100644
--- a/.github/workflows/site-build.yaml
+++ b/.github/workflows/site-build.yaml
@@ -24,7 +24,7 @@
     if: github.event_name == 'pull_request'
 
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
 
       - name: Install pnpm
         uses: pnpm/action-setup@v4
@@ -59,7 +59,7 @@
     if: github.event_name == 'workflow_dispatch' && github.ref_name == 'develop' || github.event_name == 'push'
 
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
 
       - name: Install pnpm
         uses: pnpm/action-setup@v4
diff --git a/.github/workflows/unit-test-cpp.yml b/.github/workflows/unit-test-cpp.yml
index 1c6e496..08c1380 100644
--- a/.github/workflows/unit-test-cpp.yml
+++ b/.github/workflows/unit-test-cpp.yml
@@ -78,7 +78,7 @@
     steps:
 
       - name: Checkout repository
-        uses: actions/checkout@v4
+        uses: actions/checkout@v5
 
       # Setup caching of the artifacts in the .m2 directory, so they don't have to
       # all be downloaded again for every build.
@@ -104,6 +104,19 @@
               core.setOutput('platform_suffix', ``)
             }
 
+      - name: Install clang-format
+        shell: bash
+        run: |
+          if [[ "$RUNNER_OS" == "Linux" ]]; then
+            sudo update-alternatives --install /usr/bin/clang-format clang-format /usr/bin/clang-format-17 100
+            sudo update-alternatives --set clang-format /usr/bin/clang-format-17
+          elif [[ "$RUNNER_OS" == "Windows" ]]; then
+            choco install llvm --version 17.0.6 --force
+          else
+            brew install llvm@17
+            ln -sf $(brew --prefix llvm@17)/bin/clang-format /opt/homebrew/bin/clang-format
+          fi
+
       # Run the actual maven build including all tests.
       - name: Build and test with Maven
         shell: bash
diff --git a/.github/workflows/unit-test-java.yml b/.github/workflows/unit-test-java.yml
index 2e0cbf4..bacbbd4 100644
--- a/.github/workflows/unit-test-java.yml
+++ b/.github/workflows/unit-test-java.yml
@@ -51,10 +51,10 @@
     steps:
 
       - name: Checkout repository
-        uses: actions/checkout@v4
+        uses: actions/checkout@v5
 
       - name: Set up JDK ${{ matrix.java }}
-        uses: actions/setup-java@v4
+        uses: actions/setup-java@v5
         with:
           distribution: corretto
           java-version: ${{ matrix.java }}
diff --git a/.github/workflows/unit-test-python.yml b/.github/workflows/unit-test-python.yml
index 44b7d56..c999359 100644
--- a/.github/workflows/unit-test-python.yml
+++ b/.github/workflows/unit-test-python.yml
@@ -49,7 +49,7 @@
     steps:
 
       - name: Checkout repository
-        uses: actions/checkout@v4
+        uses: actions/checkout@v5
 
       # Setup caching of the artifacts in the .m2 directory, so they don't have to
       # all be downloaded again for every build.
@@ -79,7 +79,7 @@
       - name: Build and test with Maven
         shell: bash
         run: |
-          ./mvnw${{ steps.platform_suffix.outputs.platform_suffix }} -P with-python -Denable.asan=OFF -Dbuild.type=Release clean verify
+          ./mvnw${{ steps.platform_suffix.outputs.platform_suffix }} -P with-python -Denable.asan=OFF -Dbuild.type=Release clean verify -Dspotless.skip=true
 
       - name: Upload whl Artifact
         uses: actions/upload-artifact@v4
diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml
new file mode 100644
index 0000000..bfcfbd7
--- /dev/null
+++ b/.github/workflows/unit-test.yml
@@ -0,0 +1,88 @@
+# This workflow will build a Java project with Maven
+# For more information see: https://help.github.com/actions/language-and-framework-guides/building-and-testing-java-with-maven
+
+name: Unit-Test
+
+on:
+  push:
+    branches:
+      - develop
+      - iotdb
+      - rc/*
+      - dev/*
+    paths-ignore:
+      - 'docs/**'
+  pull_request:
+    branches:
+      - develop
+      - iotdb
+      - rc/*
+      - dev/*
+    paths-ignore:
+      - 'docs/**'
+  # Enable manually starting builds, and allow forcing updating of SNAPSHOT dependencies.
+  workflow_dispatch:
+    inputs:
+      forceUpdates:
+        description: "Forces a snapshot update"
+        required: false
+        default: 'false'
+
+concurrency:
+  group: ${{ github.workflow }}-${{ github.ref }}
+  cancel-in-progress: true
+
+env:
+  MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+  GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
+
+jobs:
+  unit-test:
+    strategy:
+      fail-fast: false
+      max-parallel: 20
+      matrix:
+        java: [ 8, 17, 21 ]
+        os: [ ubuntu-latest, macos-latest, windows-latest ]
+    runs-on: ${{ matrix.os }}
+
+    steps:
+
+      - name: Checkout repository
+        uses: actions/checkout@v4
+
+      - name: Set up JDK ${{ matrix.java }}
+        uses: actions/setup-java@v4
+        with:
+          distribution: corretto
+          java-version: ${{ matrix.java }}
+
+      # Setup caching of the artifacts in the .m2 directory, so they don't have to
+      # all be downloaded again for every build.
+      - name: Cache Maven packages
+        uses: actions/cache@v4
+        with:
+          path: ~/.m2
+          key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
+          restore-keys: ${{ runner.os }}-m2-
+
+      # On Windows systems the 'mvnw' script needs an additional ".cmd" appended.
+      - name: Calculate platform suffix
+        id: platform_suffix
+        uses: actions/github-script@v7.0.1
+        env:
+          OS: ${{ matrix.os }}
+        with:
+          script: |
+            const { OS } = process.env
+            if (OS.includes("windows")) {
+              core.setOutput('platform_suffix', `.cmd`)
+            } else {
+              core.setOutput('platform_suffix', ``)
+            }
+
+      # Run the actual maven build including all unit- and integration-tests.
+      - name: Build and test with Maven
+        shell: bash
+        run: |
+          ./mvnw${{ steps.platform_suffix.outputs.platform_suffix }} -P with-java clean verify
diff --git a/.mvn/.develocity/develocity-workspace-id b/.mvn/.develocity/develocity-workspace-id
new file mode 100644
index 0000000..16a54d3
--- /dev/null
+++ b/.mvn/.develocity/develocity-workspace-id
@@ -0,0 +1 @@
+wdxgfg7cv5hitihoiryglvddnm
\ No newline at end of file
diff --git a/.mvn/gradle-enterprise.xml b/.mvn/gradle-enterprise.xml
new file mode 100644
index 0000000..cf1a9a0
--- /dev/null
+++ b/.mvn/gradle-enterprise.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+
+-->
+<gradleEnterprise xmlns="https://www.gradle.com/gradle-enterprise-maven" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://www.gradle.com/gradle-enterprise-maven https://www.gradle.com/schema/gradle-enterprise-maven.xsd">
+    <server>
+        <url>https://ge.apache.org</url>
+        <allowUntrusted>false</allowUntrusted>
+    </server>
+    <buildScan>
+        <capture>
+            <goalInputFiles>true</goalInputFiles>
+            <buildLogging>true</buildLogging>
+            <testLogging>true</testLogging>
+        </capture>
+        <backgroundBuildScanUpload>#{isFalse(env['GITHUB_ACTIONS'])}</backgroundBuildScanUpload>
+        <publish>ALWAYS</publish>
+        <publishIfAuthenticated>true</publishIfAuthenticated>
+        <obfuscation>
+            <ipAddresses>#{{'0.0.0.0'}}</ipAddresses>
+        </obfuscation>
+    </buildScan>
+    <buildCache>
+        <local>
+            <enabled>#{isFalse(env['GITHUB_ACTIONS'])}</enabled>
+        </local>
+        <remote>
+            <enabled>false</enabled>
+        </remote>
+    </buildCache>
+</gradleEnterprise>
diff --git a/README-zh.md b/README-zh.md
index 9476d4c..c8e1e73 100644
--- a/README-zh.md
+++ b/README-zh.md
@@ -26,7 +26,7 @@
 \__    ___/____\_   _____/|__|  |   ____  
   |    | /  ___/|    __)  |  |  | _/ __ \ 
   |    | \___ \ |     \   |  |  |_\  ___/ 
-  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0-SNAPSHOT
+  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0
              \/     \/                 \/  
 </pre>
 [![codecov](https://codecov.io/github/apache/tsfile/graph/badge.svg?token=0Y8MVAB3K1)](https://codecov.io/github/apache/tsfile)
@@ -119,6 +119,8 @@
 | BOOLEAN | RLE        | LZ4    |
 | TEXT    | DICTIONARY | LZ4    |
 
+我们的编码方法还包括了 FLEA,这是一种专为模式化时间序列设计的新型无损压缩算法。它独特地利用了速率最优框架内的频域分析,将数据分解为频率分量和残差分量,并通过自适应地选择量化参数来共同最小化其编码成本。通过进一步采用双区域编码策略,结合针对密集和稀疏频率数据的专用编码器以及混合残差编码器,FLEA 实现了高压缩比,尤其是在具有周期性或循环性结构的数据上。
+
 更多类型的编码和压缩方式参见[文档](https://iotdb.apache.org/zh/UserGuide/latest/Basic-Concept/Encoding-and-Compression.html)
 
 ## 开发和使用 TsFile
diff --git a/README.md b/README.md
index 9d79713..c4ca9b2 100644
--- a/README.md
+++ b/README.md
@@ -26,7 +26,7 @@
 \__    ___/____\_   _____/|__|  |   ____  
   |    | /  ___/|    __)  |  |  | _/ __ \ 
   |    | \___ \ |     \   |  |  |_\  ___/ 
-  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0-SNAPSHOT
+  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0
              \/     \/                 \/  
 </pre>
 [![codecov](https://codecov.io/github/apache/tsfile/graph/badge.svg?token=0Y8MVAB3K1)](https://codecov.io/github/apache/tsfile)
@@ -118,6 +118,8 @@
 | BOOLEAN | RLE        | LZ4    |
 | TEXT    | DICTIONARY | LZ4    |
 
+We also include FLEA, which is a novel lossless compression algorithm designed specifically for patterned time series. It uniquely leverages frequency-domain analysis within a rate-optimal framework, decomposing data into frequency and residual components whose encoding costs are jointly minimized by adaptively selecting a quantization parameter. By further employing a bi-regional encoding strategy with specialized coders for dense and sparse frequency data, alongside a hybrid residual encoder, FLEA achieves high compression ratios, particularly on data exhibiting periodic or cyclical structures.
+
 more see [Docs](https://iotdb.apache.org/UserGuide/latest/Basic-Concept/Encoding-and-Compression.html)
 
 ## Build and Use TsFile
diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md
index 399b500..fe34172 100644
--- a/RELEASE_NOTES.md
+++ b/RELEASE_NOTES.md
@@ -19,6 +19,36 @@
 
 -->
 
+# Apache TsFile 2.1.1
+
+## Improvement/Bugfix
+* [JAVA] AbstractAlignedTimeSeriesMetadata.typeMatch always return true in #538
+* [JAVA] Ignore the null value passed in the Tablet.addValue method in #540
+* [JAVA] Implement extract time filters in #539
+* [JAVA] Init all series writer for AlignedChunkGroupWriter in #545
+* [JAVA] Check max tsfile version in #548
+* [JAVA] Include common classes in tsfile.jar to fix #501 in #510
+* [JAVA] Implement extract value filters in #554
+* [JAVA] Fix wrong Private-Package declaration (related to #551) in #556
+* [JAVA] Avoid repeated calculation of shallow size of map in #559
+* [JAVA] Refactor UnknownType to extend AbstractType in #561
+* [JAVA] Add Tablet.append in #562
+
+# Apache TsFile 2.1.0
+
+## New Feature
+- [Java] Support setting default compression by datatype(#523).
+- [Java] Support using environment variables to generate main encrypt key(#512).
+- [Java] Support estimating ram usage of measurement schema(#508).
+- [Java] Add TsFileLastReader to retrieve the last points in a TsFile(#498).
+- [Cpp/C/Python] Support TsFile Table reader and writer.
+
+## Improvement/Bugfix
+- [Java] Fix memory calculation of BinaryColumnBuilder(#530).
+- [Java] Resolved case sensitivity issue when reading column names(#518).
+- [Java] Fix npe when closing the last reader that has not been used(#513).
+- [Java] Fix float RLBE encoding loss of precision(#484).
+
 # Apache TsFile 2.0.3
 
 ## Improvement/Bugfix
diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt
index 75684ce..51f69e8 100755
--- a/cpp/CMakeLists.txt
+++ b/cpp/CMakeLists.txt
@@ -20,51 +20,51 @@
 project(TsFile_CPP)
 
 cmake_policy(SET CMP0079 NEW)
-set(TsFile_CPP_VERSION 2.1.0.dev)
+set(TsFile_CPP_VERSION 2.2.0.dev)
 set(CMAKE_CXX_FLAGS "$ENV{CXXFLAGS} -Wall")
-if(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
-  set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -D__STDC_FORMAT_MACROS")
-endif()
+if (CMAKE_CXX_COMPILER_ID MATCHES "GNU")
+    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wunused -Wuninitialized -D__STDC_FORMAT_MACROS")
+endif ()
 
 message("cmake using: USE_CPP11=${USE_CPP11}")
 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
 
-if(DEFINED ENV{CXX})
+if (DEFINED ENV{CXX})
     set(CMAKE_CXX_COMPILER $ENV{CXX})
     message("cmake using: CXX=${CMAKE_CXX_COMPILER}")
-endif()
+endif ()
 
-if(DEFINED ENV{CC})
-  set(CMAKE_C_COMPILER $ENV{CC})
-  message("cmake using: CC=${CMAKE_C_COMPILER}")
-endif()
+if (DEFINED ENV{CC})
+    set(CMAKE_C_COMPILER $ENV{CC})
+    message("cmake using: CC=${CMAKE_C_COMPILER}")
+endif ()
 
 message("cmake using: DEBUG_SE=${DEBUG_SE}")
 if (${DEBUG_SE})
-  add_definitions(-DDEBUG_SE=1)
-  message("add_definitions -DDEBUG_SE=1")
-endif()
+    add_definitions(-DDEBUG_SE=1)
+    message("add_definitions -DDEBUG_SE=1")
+endif ()
 
 if (${COV_ENABLED})
-  add_definitions(-DCOV_ENABLED=1)
-  message("add_definitions -DCOV_ENABLED=1")
-endif()
+    add_definitions(-DCOV_ENABLED=1)
+    message("add_definitions -DCOV_ENABLED=1")
+endif ()
 
 
 if (NOT CMAKE_BUILD_TYPE)
-  set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build." FORCE)
+    set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build." FORCE)
 endif ()
 
 message("CMAKE BUILD TYPE " ${CMAKE_BUILD_TYPE})
 if (CMAKE_BUILD_TYPE STREQUAL "Debug")
-  set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g")
+    set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g")
 elseif (CMAKE_BUILD_TYPE STREQUAL "Release")
-  set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O2")
+    set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O2")
 elseif (CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo")
-  set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O2 -g")
+    set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O2 -g")
 elseif (CMAKE_BUILD_TYPE STREQUAL "MinSizeRel")
-  set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} -ffunction-sections -fdata-sections -Os")
-  set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gc-sections")
+    set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} -ffunction-sections -fdata-sections -Os")
+    set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gc-sections")
 endif ()
 message("CMAKE DEBUG: CMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}")
 
@@ -72,20 +72,20 @@
 option(ENABLE_ASAN "Enable Address Sanitizer" OFF)
 
 if (NOT WIN32)
-  if (ENABLE_ASAN)
-    message("Address Sanitizer is enabled.")
+    if (ENABLE_ASAN)
+        message("Address Sanitizer is enabled.")
 
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address -fno-omit-frame-pointer")
+        set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address,undefined -fno-omit-frame-pointer")
 
-    if (NOT APPLE)
-      set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-libasan")
-      set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address -static-libasan")
+        if (NOT APPLE)
+            set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static-libasan")
+            set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address,undefined -static-libasan -static-libubsan")
+        else ()
+            set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address,undefined")
+        endif ()
     else ()
-      set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address")
+        message("Address Sanitizer is disabled.")
     endif ()
-  else ()
-    message("Address Sanitizer is disabled.")
-  endif ()
 endif ()
 
 
@@ -100,11 +100,15 @@
 set(LIBRARY_INCLUDE_DIR ${PROJECT_BINARY_DIR}/include CACHE STRING "TsFile includes")
 set(THIRD_PARTY_INCLUDE ${PROJECT_BINARY_DIR}/third_party)
 
+set(SAVED_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+set(CMAKE_CXX_FLAGS "$ENV{CXXFLAGS} -Wall -std=c++11")
 add_subdirectory(third_party)
+set(CMAKE_CXX_FLAGS "${SAVED_CXX_FLAGS}")
+
 add_subdirectory(src)
 add_subdirectory(test)
 add_subdirectory(examples)
-if(TESTS_ENABLED)
+if (TESTS_ENABLED)
     add_dependencies(TsFile_Test tsfile)
-endif()
+endif ()
 
diff --git a/cpp/README-zh.md b/cpp/README-zh.md
index b1279a5..6a26f2b 100644
--- a/cpp/README-zh.md
+++ b/cpp/README-zh.md
@@ -25,7 +25,7 @@
 \__    ___/____\_   _____/|__|  |   ____  
   |    | /  ___/|    __)  |  |  | _/ __ \ 
   |    | \___ \ |     \   |  |  |_\  ___/ 
-  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0-SNAPSHOT
+  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0
              \/     \/                 \/  
 </pre>
 
diff --git a/cpp/README.md b/cpp/README.md
index b742c4d..9e13cdd 100644
--- a/cpp/README.md
+++ b/cpp/README.md
@@ -26,7 +26,7 @@
 \__    ___/____\_   _____/|__|  |   ____  
   |    | /  ___/|    __)  |  |  | _/ __ \ 
   |    | \___ \ |     \   |  |  |_\  ___/ 
-  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0-SNAPSHOT
+  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0
              \/     \/                 \/  
 </pre>
 
diff --git a/cpp/examples/c_examples/c_examples.c b/cpp/examples/c_examples/c_examples.c
new file mode 100644
index 0000000..d2398fa
--- /dev/null
+++ b/cpp/examples/c_examples/c_examples.c
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "c_examples.h"
+
+#include <fcntl.h>
+#include <malloc.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <unistd.h>
+
+
+#define HANDLE_ERROR(err_no)                  \
+    do {                                      \
+        if (err_no != 0) {                    \
+            printf("get err no: %d", err_no); \
+            return err_no;                    \
+        }                                     \
+    } while (0)
+
+ErrorCode write_tsfile() {
+    ErrorCode err_code;
+    CTsFileWriter writer;
+    if (access("c_rw.tsfile", 0) == 0) {
+        if (remove("test.tsfile") != 0) {
+            printf("Failed to delete test.tsfile file\n");
+            return -1;
+        }
+    }
+    writer = ts_writer_open("c_rw.tsfile", &err_code);
+    if (NULL == writer) {
+        return err_code;
+    }
+    ColumnSchema columnSchema;
+    columnSchema.name = "temperature";
+    columnSchema.column_def = TS_TYPE_INT32;
+    err_code =
+        tsfile_register_table_column(writer, "test_table", &columnSchema);
+    HANDLE_ERROR(err_code);
+    TableSchema tableSchema;
+    tableSchema.column_num = 3;
+    tableSchema.table_name = "test_table";
+    tableSchema.column_schema =
+        (ColumnSchema **)malloc(tableSchema.column_num * sizeof(TableSchema *));
+    tableSchema.column_schema[0] = (ColumnSchema *)malloc(sizeof(ColumnSchema));
+    tableSchema.column_schema[0]->column_def = TS_TYPE_DOUBLE;
+    tableSchema.column_schema[0]->name = "level";
+    tableSchema.column_schema[1] = (ColumnSchema *)malloc(sizeof(ColumnSchema));
+    tableSchema.column_schema[1]->column_def = TS_TYPE_BOOLEAN;
+    tableSchema.column_schema[1]->name = "up";
+    tableSchema.column_schema[2] = (ColumnSchema *)malloc(sizeof(ColumnSchema));
+    tableSchema.column_schema[2]->column_def = TS_TYPE_FLOAT;
+    tableSchema.column_schema[2]->name = "humi";
+    err_code = tsfile_register_table(writer, &tableSchema);
+    free(tableSchema.column_schema[0]);
+    free(tableSchema.column_schema[1]);
+    free(tableSchema.column_schema[2]);
+    free(tableSchema.column_schema);
+    HANDLE_ERROR(err_code);
+    printf("register table success\n");
+    TsFileRowData rowData = create_tsfile_row("test_table", 1, 4);
+    insert_data_into_tsfile_row_double(rowData, "level", 10);
+    insert_data_into_tsfile_row_float(rowData, "humi", 10.0f);
+    insert_data_into_tsfile_row_boolean(rowData, "up", true);
+    insert_data_into_tsfile_row_int32(rowData, "temperature", 10);
+    err_code = tsfile_write_row_data(writer, rowData);
+
+    rowData = create_tsfile_row("test_table", 2, 4);
+    insert_data_into_tsfile_row_double(rowData, "level", 12);
+    err_code = tsfile_write_row_data(writer, rowData);
+
+    for (int ind = 10; ind < 2000; ind++) {
+        rowData = create_tsfile_row("test_table", ind, 4);
+        insert_data_into_tsfile_row_double(rowData, "level", 12 + ind);
+        insert_data_into_tsfile_row_float(rowData, "humi", 12.0f + ind);
+        insert_data_into_tsfile_row_boolean(rowData, "up", true);
+        insert_data_into_tsfile_row_int32(rowData, "temperature", 12 + ind);
+        err_code = tsfile_write_row_data(writer, rowData);
+    }
+    printf("writer row data success\n");
+    HANDLE_ERROR(err_code);
+    HANDLE_ERROR(tsfile_flush_data(writer));
+    printf("flush data success\n");
+    HANDLE_ERROR(ts_writer_close(writer));
+    printf("close writer success\n");
+    return 0;
+}
+
+ErrorCode read_tsfile() {
+    ErrorCode err_code;
+    CTsFileReader reader;
+    reader = ts_reader_open("c_rw.tsfile", &err_code);
+    if (NULL == reader) {
+        return err_code;
+    }
+    const char *columns[] = {"temperature", "level", "up", "humi"};
+    //  TimeFilterExpression* exp = create_andquery_timefilter();
+    //  TimeFilterExpression* time_filter = create_time_filter("test_table",
+    //  "temperature", GT, 11); TimeFilterExpression* time_filter2 =
+    //  create_time_filter("test_table", "humi", GT, 10); TimeFilterExpression*
+    //  time_filter3 = create_time_filter("test_table", "level", LE, 20);
+    //  add_time_filter_to_and_query(exp, time_filter);
+    //  add_time_filter_to_and_query(exp, time_filter2);
+    //  add_time_filter_to_and_query(exp, time_filter3);
+
+    QueryDataRet ret = ts_reader_query(reader, "test_table", columns, 4, NULL);
+    printf("query success\n");
+    DataResult *result = ts_next(ret, 20);
+    if (result == NULL) {
+        printf("get result failed\n");
+        return -1;
+    }
+    print_data_result(result);
+    //  destory_time_filter_query(exp);
+    HANDLE_ERROR(destory_query_dataret(ret));
+    HANDLE_ERROR(destory_tablet(result));
+    return 0;
+}
diff --git a/cpp/examples/c_examples/demo_read.c b/cpp/examples/c_examples/demo_read.c
index d3c17a1..05cc862 100644
--- a/cpp/examples/c_examples/demo_read.c
+++ b/cpp/examples/c_examples/demo_read.c
@@ -25,7 +25,6 @@
 
 // This example shows you how to read tsfile.
 ERRNO read_tsfile() {
-
     ERRNO code = 0;
     char* table_name = "table1";
 
@@ -46,7 +45,8 @@
     int column_num = tsfile_result_set_metadata_get_column_num(metadata);
 
     for (int i = 1; i <= column_num; i++) {
-        printf("column:%s, datatype:%d\n", tsfile_result_set_metadata_get_column_name(metadata, i),
+        printf("column:%s, datatype:%d\n",
+               tsfile_result_set_metadata_get_column_name(metadata, i),
                tsfile_result_set_metadata_get_data_type(metadata, i));
     }
 
@@ -62,8 +62,9 @@
             } else {
                 switch (tsfile_result_set_metadata_get_data_type(metadata, i)) {
                     case TS_DATATYPE_BOOLEAN:
-                        printf("%d\n", tsfile_result_set_get_value_by_index_bool(
-                                         ret, i));
+                        printf(
+                            "%d\n",
+                            tsfile_result_set_get_value_by_index_bool(ret, i));
                         break;
                     case TS_DATATYPE_INT32:
                         printf("%d\n",
@@ -76,8 +77,9 @@
                                                                             i));
                         break;
                     case TS_DATATYPE_FLOAT:
-                        printf("%f\n", tsfile_result_set_get_value_by_index_float(
-                                         ret, i));
+                        printf(
+                            "%f\n",
+                            tsfile_result_set_get_value_by_index_float(ret, i));
                         break;
                     case TS_DATATYPE_DOUBLE:
                         printf("%lf\n",
diff --git a/cpp/examples/c_examples/demo_write.c b/cpp/examples/c_examples/demo_write.c
index 9f11621..326cfdc 100644
--- a/cpp/examples/c_examples/demo_write.c
+++ b/cpp/examples/c_examples/demo_write.c
@@ -17,9 +17,9 @@
  * under the License.
  */
 
-#include <stdlib.h>
 #include <stdint.h>
 #include <stdio.h>
+#include <stdlib.h>
 #include <string.h>
 
 #include "c_examples.h"
@@ -27,6 +27,14 @@
 // This example shows you how to write tsfile.
 ERRNO write_tsfile() {
     ERRNO code = 0;
+    code = set_global_compression(TS_COMPRESSION_LZ4);
+    if (code != RET_OK) {
+        return code;
+    }
+    code = set_datatype_encoding(TS_DATATYPE_INT32, TS_ENCODING_TS_2DIFF);
+    if (code != RET_OK) {
+        return code;
+    }
     char* table_name = "table1";
 
     // Create table schema to describe a table in a tsfile.
@@ -37,16 +45,16 @@
         (ColumnSchema*)malloc(sizeof(ColumnSchema) * 3);
     table_schema.column_schemas[0] =
         (ColumnSchema){.column_name = strdup("id1"),
-                     .data_type = TS_DATATYPE_STRING,
-                     .column_category = TAG};
+                       .data_type = TS_DATATYPE_STRING,
+                       .column_category = TAG};
     table_schema.column_schemas[1] =
         (ColumnSchema){.column_name = strdup("id2"),
-                     .data_type = TS_DATATYPE_STRING,
-                     .column_category = TAG};
+                       .data_type = TS_DATATYPE_STRING,
+                       .column_category = TAG};
     table_schema.column_schemas[2] =
         (ColumnSchema){.column_name = strdup("s1"),
-                     .data_type = TS_DATATYPE_INT32,
-                     .column_category = FIELD};
+                       .data_type = TS_DATATYPE_INT32,
+                       .column_category = FIELD};
 
     remove("test_c.tsfile");
     // Create a file with specify path to write tsfile.
diff --git a/cpp/pom.xml b/cpp/pom.xml
index cc62aa8..fa6b007 100644
--- a/cpp/pom.xml
+++ b/cpp/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <groupId>org.apache.tsfile</groupId>
         <artifactId>tsfile-parent</artifactId>
-        <version>2.1.0-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <artifactId>tsfile-cpp</artifactId>
     <packaging>pom</packaging>
@@ -223,5 +223,56 @@
                 <coverage.enabled>ON</coverage.enabled>
             </properties>
         </profile>
+        <profile>
+            <id>.java-9-and-above</id>
+            <activation>
+                <jdk>[9,)</jdk>
+            </activation>
+            <build>
+                <plugins>
+                    <plugin>
+                        <groupId>com.diffplug.spotless</groupId>
+                        <artifactId>spotless-maven-plugin</artifactId>
+                        <version>${spotless.version}</version>
+                        <configuration>
+                            <cpp>
+                                <includes>
+                                    <!-- You have to set the target manually -->
+                                    <include>bench_mark/**/*.h</include>
+                                    <include>bench_mark/**/*.cc</include>
+                                    <include>examples/**/*.h</include>
+                                    <include>examples/**/*.c</include>
+                                    <include>examples/**/*.cc</include>
+                                    <include>src/**/*.h</include>
+                                    <include>src/**/*.c</include>
+                                    <include>src/**/*.cc</include>
+                                    <include>test/**/*.h</include>
+                                    <include>test/**/*.c</include>
+                                    <include>test/**/*.cc</include>
+                                </includes>
+                                <clangFormat>
+                                    <!--pathToExe>/path/to/buf</pathToExe-->
+                                    <!-- optional: if clang-format isn't in your path -->
+                                    <version>${clang.format.version}</version>
+                                    <style>file</style>
+                                    <!-- optional: can be LLVM, Google, Chromium, Mozilla, WebKit -->
+                                </clangFormat>
+                            </cpp>
+                            <lineEndings>UNIX</lineEndings>
+                            <skip>${spotless.skip}</skip>
+                        </configuration>
+                        <executions>
+                            <execution>
+                                <id>spotless-check</id>
+                                <goals>
+                                    <goal>check</goal>
+                                </goals>
+                                <phase>validate</phase>
+                            </execution>
+                        </executions>
+                    </plugin>
+                </plugins>
+            </build>
+        </profile>
     </profiles>
 </project>
diff --git a/cpp/src/common/allocator/byte_stream.h b/cpp/src/common/allocator/byte_stream.h
index 7836634..65c4b80 100644
--- a/cpp/src/common/allocator/byte_stream.h
+++ b/cpp/src/common/allocator/byte_stream.h
@@ -20,6 +20,7 @@
 #ifndef COMMON_ALLOCATOR_BYTE_STREAM_H
 #define COMMON_ALLOCATOR_BYTE_STREAM_H
 
+#include <common/constant/tsfile_constant.h>
 #include <stdio.h>
 #include <stdlib.h>
 
@@ -823,6 +824,78 @@
         ui64 = (ui64 << 8) | (buf[7] & 0xFF);
         return ret;
     }
+
+    FORCE_INLINE static int write_int_little_endian_padded_on_bit_width(
+        int32_t value, ByteStream &out, int bitWidth) {
+        int paddedByteNum = (bitWidth + 7) / 8;
+        if (paddedByteNum > 4) {
+            return E_TSFILE_CORRUPTED;
+        }
+        auto u = static_cast<uint32_t>(value);
+        for (int i = 0; i < paddedByteNum; ++i) {
+            uint8_t byte = (u >> (i * 8)) & 0xFF;
+            out.write_buf(&byte, 1);
+        }
+        return E_OK;
+    }
+
+    FORCE_INLINE static int write_int64_little_endian_padded_on_bit_width(
+        int64_t value, ByteStream &out, int bit_width) {
+        int padded_byte_num = (bit_width + 7) / 8;
+        if (padded_byte_num > 8) {
+            return E_TSFILE_CORRUPTED;
+        }
+        auto u = static_cast<uint64_t>(value);
+        for (int i = 0; i < padded_byte_num; ++i) {
+            uint8_t byte = (u >> (i * 8)) & 0xFF;
+            out.write_buf(&byte, 1);
+        }
+        return E_OK;
+    }
+
+    FORCE_INLINE static int read_int_little_endian_padded_on_bit_width(
+        ByteStream &in, int bitWidth, int32_t &out_val) {
+        int padded_byte_num = (bitWidth + 7) / 8;
+        if (padded_byte_num > 4) {
+            return E_TSFILE_CORRUPTED;
+        }
+        uint8_t buf[4] = {0};
+        uint32_t read_len = 0;
+        int ret = in.read_buf(buf, padded_byte_num, read_len);
+        if (ret != E_OK || read_len != static_cast<uint32_t>(padded_byte_num)) {
+            return E_TSFILE_CORRUPTED;
+        }
+        uint32_t result = 0;
+        for (int i = 0; i < padded_byte_num; ++i) {
+            result |= static_cast<uint32_t>(buf[i]) << (i * 8);
+        }
+        out_val = static_cast<int32_t>(result);
+        return E_OK;
+    }
+
+    FORCE_INLINE static int chunk_read_all_data(ByteStream &in, ByteStream &out,
+                                                size_t chunk_size = 4096) {
+        char *buffer = new char[chunk_size];
+        int ret = common::E_OK;
+        while (in.remaining_size() > 0) {
+            // Adjust read size based on remaining input size
+            uint32_t bytes_to_read = static_cast<uint32_t>(
+                std::min(chunk_size, static_cast<size_t>(in.remaining_size())));
+
+            uint32_t bytes_read = 0;
+            ret = in.read_buf(buffer, bytes_to_read, bytes_read);
+            if (ret != E_OK || bytes_read == 0) {
+                break;
+            }
+            if (RET_FAIL(out.write_buf(buffer, bytes_read))) {
+                ret = common::E_ENCODE_ERR;
+                break;
+            }
+        }
+        delete[] buffer;
+        return ret;
+    }
+
     // caller guarantee buffer has at least 1 byte
     FORCE_INLINE static uint8_t read_ui8(char *buffer) {
         return *(uint8_t *)buffer;
@@ -1003,14 +1076,13 @@
         return common::E_OK;
     }
     FORCE_INLINE static int write_var_int(int32_t i32, ByteStream &out) {
-        // TODO 8byte to 4byte.
-        // but in IoTDB java, it has only write_var_uint(i32)
-        int ui32 = i32 << 1;
+        uint32_t ui32 = static_cast<uint32_t>(i32) << 1;
         if (i32 < 0) {
             ui32 = ~ui32;
         }
-        return do_write_var_uint(static_cast<uint32_t>(ui32), out);
+        return do_write_var_uint(ui32, out);
     }
+
     FORCE_INLINE static int read_var_int(int32_t &i32, ByteStream &in) {
         int ret = common::E_OK;
         uint32_t ui32;
@@ -1047,6 +1119,54 @@
         }
         return ret;
     }
+
+    // If the str is nullptr, NO_STR_TO_READ will be added instead.
+    FORCE_INLINE static int write_var_char_ptr(const std::string *str,
+                                               ByteStream &out) {
+        int ret = common::E_OK;
+        if (str == nullptr) {
+            write_var_int(storage::NO_STR_TO_READ, out);
+            return ret;
+        }
+        size_t str_len = str->length();
+        if (RET_FAIL(write_var_int(str_len, out))) {
+            return ret;
+        } else if (RET_FAIL(out.write_buf(str->c_str(), str_len))) {
+            return ret;
+        }
+        return ret;
+    }
+
+    // If `str` is not a nullptr after calling `read_var_char_ptr`, it
+    // indicates that memory has been allocated and must be freed.
+    FORCE_INLINE static int read_var_char_ptr(std::string *&str,
+                                              ByteStream &in) {
+        int ret = common::E_OK;
+        int32_t len = 0;
+        int32_t read_len = 0;
+        if (RET_FAIL(read_var_int(len, in))) {
+            return ret;
+        } else {
+            if (len == storage::NO_STR_TO_READ) {
+                str = nullptr;
+                return ret;
+            } else {
+                char *tmp_buf = static_cast<char *>(malloc(len));
+                if (RET_FAIL(in.read_buf(tmp_buf, len, read_len))) {
+                    free(tmp_buf);
+                    return ret;
+                } else if (len != read_len) {
+                    free(tmp_buf);
+                    ret = E_BUF_NOT_ENOUGH;
+                } else {
+                    str = new std::string(tmp_buf, len);
+                    free(tmp_buf);
+                }
+            }
+        }
+        return ret;
+    }
+
     FORCE_INLINE static int read_var_str(std::string &str, ByteStream &in) {
         int ret = common::E_OK;
         int32_t len = 0;
diff --git a/cpp/src/common/allocator/mem_alloc.cc b/cpp/src/common/allocator/mem_alloc.cc
index a1e16d9..0cc78c8 100644
--- a/cpp/src/common/allocator/mem_alloc.cc
+++ b/cpp/src/common/allocator/mem_alloc.cc
@@ -63,45 +63,41 @@
     /* 27 */ "HASH_TABLE",
 };
 
-const uint32_t HEADER_SIZE_4B = 4;
-const uint32_t HEADER_SIZE_8B = 8;
+// Most modern CPUs (e.g., x86_64, Arm) support at least 8-byte alignment,
+// and C++ mandates that alignof(std::max_align_t) reflects the strictest
+// alignment requirement for built-in types (typically 8 or 16 bytes, especially
+// with SIMD)
+
+// To ensure that the returned memory pointer from mem_alloc is properly aligned
+// for any type, we standardize on an 8-byte header(HEADER_SIZE_8B).
+// If the actual header content is smaller, additional padding is inserted
+// automatically before the aligned payload to preserve alignment.
+// constexpr uint32_t HEADER_SIZE_4B = 4;
+constexpr size_t HEADER_PTR_SIZE = 8;
+// Default alignment is 8 bytes, sufficient for basic types.
+// If SIMD (e.g., SSE/AVX) is introduced later, increase ALIGNMENT to 16/32/64
+// as needed.
+// constexpr size_t ALIGNMENT = alignof(std::max_align_t);
+constexpr size_t ALIGNMENT = 8;
 
 void *mem_alloc(uint32_t size, AllocModID mid) {
     // use 7bit at most
     ASSERT(mid <= 127);
-
-    if (size <= 0xFFFFFF) {
-        // use 3B size + 1B mod
-        char *p = (char *)malloc(size + HEADER_SIZE_4B);
-        if (UNLIKELY(p == nullptr)) {
-            return nullptr;
-        } else {
-            uint32_t header = (size << 8) | ((uint32_t)mid);
-            *((uint32_t *)p) = header;
-            ModStat::get_instance().update_alloc(mid, size);
-            // cppcheck-suppress memleak
-            // cppcheck-suppress unmatchedSuppression
-            return p + HEADER_SIZE_4B;
-        }
-    } else {
-        char *p = (char *)malloc(size + HEADER_SIZE_8B);
-        if (UNLIKELY(p == nullptr)) {
-            std::cout << "alloc big filed for size " << size + HEADER_SIZE_4B
-                      << std::endl;
-            return nullptr;
-        } else {
-            uint64_t large_size = size;
-            uint64_t header = ((large_size) << 8) | (((uint32_t)mid) | (0x80));
-            uint32_t low4b = (uint32_t)(header & 0xFFFFFFFF);
-            uint32_t high4b = (uint32_t)(header >> 32);
-            *(uint32_t *)p = high4b;
-            *(uint32_t *)(p + 4) = low4b;
-            ModStat::get_instance().update_alloc(mid, size);
-            // cppcheck-suppress unmatchedSuppression
-            // cppcheck-suppress memleak
-            return p + HEADER_SIZE_8B;
-        }
+    static_assert(HEADER_PTR_SIZE <= ALIGNMENT,
+                  "Header must fit within alignment");
+    constexpr size_t header_size = ALIGNMENT;
+    const size_t total_size = size + header_size;
+    auto raw = static_cast<char *>(malloc(total_size));
+    if (UNLIKELY(raw == nullptr)) {
+        return nullptr;
     }
+    uint64_t data_size = size;
+    uint64_t header = (data_size << 8) | static_cast<uint32_t>(mid);
+    auto low4b = static_cast<uint32_t>(header & 0xFFFFFFFF);
+    auto high4b = static_cast<uint32_t>(header >> 32);
+    *reinterpret_cast<uint32_t *>(raw) = high4b;
+    *reinterpret_cast<uint32_t *>(raw + 4) = low4b;
+    return raw + header_size;
 }
 
 #ifndef _WIN32
@@ -132,109 +128,39 @@
 #endif
 
 void mem_free(void *ptr) {
-    // try as 4Byte header
-    char *p = (char *)ptr;
-    uint32_t header = *(uint32_t *)(p - HEADER_SIZE_4B);
-    if ((header & 0x80) == 0) {
-        // 4Byte header
-        uint32_t size = header >> 8;
-        AllocModID mid = (AllocModID)(header & 0x7F);
-        ModStat::get_instance().update_free(mid, size);
-        ::free(p - HEADER_SIZE_4B);
-    } else {
-        // 8Byte header
-        uint64_t header8b = ((uint64_t)(*(uint32_t *)(p - 4))) |
-                            ((uint64_t)(*(uint32_t *)(p - 8)) << 32);
-        AllocModID mid = (AllocModID)(header8b & 0x7F);
-        uint32_t size = (uint32_t)(header8b >> 8);
-        ModStat::get_instance().update_free(mid, size);
-        ::free(p - HEADER_SIZE_8B);
-    }
+    char *p = static_cast<char *>(ptr);
+    char *raw_ptr = p - ALIGNMENT;
+    uint64_t header =
+        static_cast<uint64_t>(*reinterpret_cast<uint32_t *>(raw_ptr + 4)) |
+        (static_cast<uint64_t>(*reinterpret_cast<uint32_t *>(raw_ptr)) << 32);
+    auto mid = static_cast<AllocModID>(header & 0x7F);
+    auto size = static_cast<uint32_t>(header >> 8);
+    ModStat::get_instance().update_free(mid, size);
+    ::free(raw_ptr);
 }
 
 void *mem_realloc(void *ptr, uint32_t size) {
-    AllocModID mid_org;
-    uint32_t size_org;
-    char *p = (char *)ptr;
-    uint32_t header_org =
-        *(uint32_t *)(p - HEADER_SIZE_4B);  // try as 4Byte header
-    if ((header_org & 0x80) == 0) {
-        // header_org is 4byte
-        size_org = header_org >> 8;
-        mid_org = (AllocModID)(header_org & 0x7F);
-        if (size <= 0xFFFFFF) {
-            p = (char *)realloc(p - HEADER_SIZE_4B, size + HEADER_SIZE_4B);
-            if (UNLIKELY(p == nullptr)) {
-                return nullptr;
-            } else {
-                uint32_t header =
-                    (size << 8) | ((uint32_t)mid_org);  // size changed
-                *((uint32_t *)p) = header;
-                ModStat::get_instance().update_alloc(
-                    mid_org, int32_t(size) - int32_t(size_org));
-                return p + HEADER_SIZE_4B;
-            }
-        } else {  // size > 0xFFFFFF, realloc(os_p, size + header_len)
-            p = (char *)realloc(p - HEADER_SIZE_4B, size + HEADER_SIZE_8B);
-            if (UNLIKELY(p == nullptr)) {
-                return nullptr;
-            } else {
-                std::memmove(p + HEADER_SIZE_8B, p + HEADER_SIZE_4B, size_org);
-                // reconstruct 8-byte header
-                uint64_t large_size = size;
-                uint64_t header =
-                    ((large_size) << 8) | (((uint32_t)mid_org) | (0x80));
-                uint32_t low4b = (uint32_t)(header & 0xFFFFFFFF);
-                uint32_t high4b = (uint32_t)(header >> 32);
-                *(uint32_t *)p = high4b;
-                *(uint32_t *)(p + 4) = low4b;
-                ModStat::get_instance().update_alloc(
-                    mid_org, int32_t(size) - int32_t(size_org));
-                return p + HEADER_SIZE_8B;
-            }
-        }
-    } else {  // header_org is 8byte
-        uint64_t header =
-            ((uint64_t)(*(uint32_t *)(p - 4))) |
-            ((uint64_t)(*(uint32_t *)(p - 8)) << 32);  // 8Byte header
-        mid_org = (AllocModID)(header & 0x7F);
-        size_org = (uint32_t)(header >> 8);
-        if (size <= 0xFFFFFF) {
-            uint32_t save_data =
-                *(uint32_t *)(p - HEADER_SIZE_8B + HEADER_SIZE_4B + size);
-            p = (char *)realloc(p - HEADER_SIZE_8B, size + HEADER_SIZE_4B);
-            if (UNLIKELY(p == nullptr)) {
-                return nullptr;
-            } else {
-                std::memmove(p + HEADER_SIZE_4B, p + HEADER_SIZE_8B,
-                             size - HEADER_SIZE_4B);
-                // reconstruct 4-byte header
-                uint32_t header4b = (size << 8) | (((uint32_t)mid_org));
-                *((uint32_t *)p) = header4b;
-                // reconstruct data
-                *(uint32_t *)((char *)p + size - 4) = save_data;
-                ModStat::get_instance().update_alloc(
-                    mid_org, int32_t(size) - int32_t(size_org));
-                return p + HEADER_SIZE_4B;
-            }
-        } else {
-            p = (char *)realloc(p - HEADER_SIZE_8B, size + HEADER_SIZE_8B);
-            if (UNLIKELY(p == nullptr)) {
-                return nullptr;
-            } else {
-                uint64_t large_size = size;
-                uint64_t header8b =
-                    ((large_size) << 8) | (((uint32_t)mid_org) | (0x80));
-                uint32_t low4b = (uint32_t)(header8b & 0xFFFFFFFF);
-                uint32_t high4b = (uint32_t)(header8b >> 32);
-                *(uint32_t *)p = high4b;
-                *(uint32_t *)(p + 4) = low4b;
-                ModStat::get_instance().update_alloc(
-                    mid_org, int32_t(size) - int32_t(size_org));
-                return p + HEADER_SIZE_8B;
-            }
-        }
+    char *p = static_cast<char *>(ptr);
+    char *raw_ptr = p - ALIGNMENT;
+    const uint64_t header =
+        static_cast<uint64_t>(*reinterpret_cast<uint32_t *>(raw_ptr + 4)) |
+        (static_cast<uint64_t>(*reinterpret_cast<uint32_t *>(raw_ptr)) << 32);
+    auto mid = static_cast<AllocModID>(header & 0x7F);
+    auto original_size = static_cast<uint32_t>(header >> 8);
+    p = static_cast<char *>(realloc(raw_ptr, size + ALIGNMENT));
+    if (UNLIKELY(p == nullptr)) {
+        return nullptr;
     }
+
+    uint64_t data_size = size;
+    uint64_t header_new = (data_size << 8) | static_cast<uint32_t>(mid);
+    auto low4b = static_cast<uint32_t>(header_new & 0xFFFFFFFF);
+    auto high4b = static_cast<uint32_t>(header_new >> 32);
+    *reinterpret_cast<uint32_t *>(p) = high4b;
+    *reinterpret_cast<uint32_t *>(p + 4) = low4b;
+    ModStat::get_instance().update_alloc(
+        mid, int32_t(size) - int32_t(original_size));
+    return p + ALIGNMENT;
 }
 
 void ModStat::init() {
@@ -247,11 +173,6 @@
 
 void ModStat::destroy() { ::free(stat_arr_); }
 
-// TODO return to SQL
-void ModStat::print_stat() {
-    //
-}
-
 BaseAllocator g_base_allocator;
 
 }  // end namespace common
\ No newline at end of file
diff --git a/cpp/src/common/allocator/my_string.h b/cpp/src/common/allocator/my_string.h
index 9f5d8a5..ef27f2d 100644
--- a/cpp/src/common/allocator/my_string.h
+++ b/cpp/src/common/allocator/my_string.h
@@ -35,11 +35,12 @@
 
     String() : buf_(nullptr), len_(0) {}
     String(char *buf, uint32_t len) : buf_(buf), len_(len) {}
-    String(const std::string& str, common::PageArena& pa) : buf_(nullptr), len_(0) {
+    String(const std::string &str, common::PageArena &pa)
+        : buf_(nullptr), len_(0) {
         dup_from(str, pa);
     }
-    String(const std::string& str) {
-        buf_ = (char*)str.c_str();
+    String(const std::string &str) {
+        buf_ = (char *)str.c_str();
         len_ = str.size();
     }
     FORCE_INLINE bool is_null() const { return buf_ == nullptr && len_ == 0; }
@@ -67,6 +68,7 @@
     FORCE_INLINE int dup_from(const String &str, common::PageArena &pa) {
         len_ = str.len_;
         if (UNLIKELY(len_ == 0)) {
+            buf_ = nullptr;
             return common::E_OK;
         }
         buf_ = pa.alloc(len_);
@@ -123,9 +125,15 @@
     // return < 0, if this < that
     // return > 0, if this > that
     FORCE_INLINE int compare(const String &that) const {
-        if (len_ == 0 || that.len_ == 0) {
+        if (len_ == 0 && that.len_ == 0) {
             return 0;
         }
+        if (len_ == 0) {
+            return -1;
+        }
+        if (that.len_ == 0) {
+            return 1;
+        }
         uint32_t min_len = std::min(len_, that.len_);
         int cmp_res = memcmp(buf_, that.buf_, min_len);
         if (cmp_res == 0) {
diff --git a/cpp/src/common/allocator/object_pool.h b/cpp/src/common/allocator/object_pool.h
new file mode 100644
index 0000000..721823a
--- /dev/null
+++ b/cpp/src/common/allocator/object_pool.h
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef COMMON_ALLOCTOR_OBJECT_POOL_H
+#define COMMON_ALLOCTOR_OBJECT_POOL_H
+
+#include "common/allocator/alloc_base.h"
+#include "common/mutex/mutex.h"
+
+namespace common {
+
+template <class T>
+class ObjectPool {
+   private:
+    struct ObjectPoolNode {
+        T data_;
+        ObjectPoolNode *next_;
+
+        ObjectPoolNode() : data_(), next_(nullptr) {}
+    };
+
+   public:
+    /*
+     * max_cache_count is a soft limitation:
+     */
+    ObjectPool(const uint32_t max_cache_count, const AllocModID mid,
+               BaseAllocator &allocator = g_base_allocator)
+        : max_cache_count_(max_cache_count),
+          cur_alloc_count_(0),
+          mid_(mid),
+          allocator_(allocator),
+          mutex_(),
+          head_(nullptr) {
+        assert(max_cache_count > 1);
+    }
+
+    ~ObjectPool() { destroy(); }
+
+    void destroy() {
+        ObjectPoolNode *cur = head_;
+        while (cur) {
+            head_ = cur->next_;
+            allocator_.free(cur);
+            cur = head_;
+            cur_alloc_count_--;
+        }
+        ASSERT(cur_alloc_count_ == 0);
+    }
+
+    T *alloc() {
+        T *ret_obj = nullptr;
+        common::MutexGuard g(mutex_);
+        if (head_) {
+            ret_obj = &(head_->data_);
+            head_ = head_->next_;
+            return ret_obj;
+        } else {
+            void *buf = allocator_.alloc(sizeof(ObjectPoolNode), mid_);
+            if (UNLIKELY(buf == nullptr)) {
+                return nullptr;
+            }
+            cur_alloc_count_++;
+            ret_obj = &(new (buf) ObjectPoolNode)->data_;
+            return ret_obj;
+        }
+    }
+
+    void free(T *obj) {
+        ASSERT(obj != nullptr);
+        common::MutexGuard g(mutex_);
+        if (cur_alloc_count_ > max_cache_count_) {
+            allocator_.free(obj);
+            cur_alloc_count_--;
+            ASSERT(cur_alloc_count_ >= 0);
+        } else {
+            ObjectPoolNode *n = (ObjectPoolNode *)obj;
+            n->next_ = head_;
+            head_ = n;
+        }
+    }
+
+    uint32_t get_cur_alloc_count() const { return cur_alloc_count_; }
+
+   private:
+    uint32_t max_cache_count_;
+    uint32_t cur_alloc_count_;
+    AllocModID mid_;
+    BaseAllocator allocator_;
+    common::Mutex mutex_;
+    ObjectPoolNode *head_;  // freelist head
+};
+
+}  // namespace common
+#endif  // COMMON_ALLOCTOR_OBJECT_POOL_H
diff --git a/cpp/src/common/allocator/page_arena.h b/cpp/src/common/allocator/page_arena.h
index b8053cf..938cac1 100644
--- a/cpp/src/common/allocator/page_arena.h
+++ b/cpp/src/common/allocator/page_arena.h
@@ -20,6 +20,8 @@
 #ifndef COMMON_ALLOCATOR_PAGE_ARENA_H
 #define COMMON_ALLOCATOR_PAGE_ARENA_H
 
+#include <cstddef>
+
 #include "alloc_base.h"
 
 namespace common {
@@ -68,20 +70,17 @@
                 (char *)this + sizeof(Page);  // equals to (char*)(this+1)
             page_end_ = cur_alloc_ + page_size;
         }
-        INLINE char *alloc(uint32_t size) {
-            if (cur_alloc_ + size > page_end_) {
+        INLINE char *alloc(uint32_t size,
+                           size_t alignment = alignof(std::max_align_t)) {
+            auto current = reinterpret_cast<uintptr_t>(cur_alloc_);
+            uintptr_t aligned = (current + alignment - 1) & ~(alignment - 1);
+            char *ret = reinterpret_cast<char *>(aligned);
+            if (ret + size > page_end_) {
                 return nullptr;
-            } else {
-                char *ret = cur_alloc_;
-                cur_alloc_ += size;
-                return ret;
-                //        char *ret = cur_alloc_;
-                //        cur_alloc_ += size;
-                //        int address = reinterpret_cast<uintptr_t>(cur_alloc_);
-                //        int new_addr = (address + 3) & (~3);
-                //        cur_alloc_ = reinterpret_cast<char *>(new_addr);
-                //        return ret;
             }
+
+            cur_alloc_ = ret + size;
+            return ret;
         }
 
        public:
diff --git a/cpp/src/common/allocator/stl_allocator.h b/cpp/src/common/allocator/stl_allocator.h
new file mode 100644
index 0000000..e858398
--- /dev/null
+++ b/cpp/src/common/allocator/stl_allocator.h
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef COMMON_ALLOCATOR_STL_ALLOCATOR_H
+#define COMMON_ALLOCATOR_STL_ALLOCATOR_H
+
+#include "alloc_base.h"
+
+namespace common {
+
+template <class T, AllocModID Mid, class TAllocator = BaseAllocator>
+class StlAllocator {
+   public:
+    typedef size_t size_type;
+    typedef ptrdiff_t difference_type;
+    typedef T *pointer;
+    typedef const T *const_pointer;
+    typedef T &reference;
+    typedef const T &const_reference;
+    typedef T value_type;
+
+    /*
+     * rebind provides a way to obtain an allocator for a different type.
+     * For example, std::list will alloc object Node<T> beside alloc object T.
+     */
+    template <class U>
+    struct rebind {
+        typedef StlAllocator<U, Mid, TAllocator> other;
+    };
+
+    StlAllocator() {}
+    StlAllocator(const StlAllocator &) {}
+
+    template <class T2, AllocModID Mid2>
+    StlAllocator(const StlAllocator<T2, Mid2, TAllocator> &) {}
+
+    StlAllocator(TAllocator base_allocator) : base_allocator_(base_allocator) {}
+
+    pointer address(reference x) const { return &x; }
+    const_pointer address(const_reference x) { return &x; }
+
+    pointer allocate(size_type n, const void *hint = 0) {
+        return (pointer)base_allocator_.alloc(n * sizeof(T), Mid);
+    };
+    void deallocate(void *p, size_type) { base_allocator_.free(p); }
+    size_type max_size() const { return uint32_t(-1); }
+
+    void construct(pointer p, const T &val) { new ((T *)p) T(val); }
+    void destroy(pointer p) { p->~T(); }
+
+   private:
+    TAllocator base_allocator_;
+};
+
+/*
+ * According to the manual, allocator is stateless.
+ * Although we define a base_allocator_ here, but base_allocator_ is also
+ * stateless. so '==' is always true and '!=' is always false. refer to
+ * https://en.cppreference.com/w/cpp/memory/allocator/operator_cmp.
+ */
+template <class T1, AllocModID Mid1, class T2, AllocModID Mid2>
+bool operator==(const StlAllocator<T1, Mid1> &a1,
+                const StlAllocator<T2, Mid2> &a2) {
+    return true;
+}
+
+template <class T1, AllocModID Mid1, class T2, AllocModID Mid2>
+bool operator!=(const StlAllocator<T1, Mid1> &a1,
+                const StlAllocator<T2, Mid2> &a2) {
+    return false;
+}
+
+}  // end namespace common
+#endif  // COMMON_ALLOCATOR_STL_ALLOCATOR_H
diff --git a/cpp/src/common/allocator/util_define.h b/cpp/src/common/allocator/util_define.h
new file mode 100644
index 0000000..0526c08
--- /dev/null
+++ b/cpp/src/common/allocator/util_define.h
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*
+ * This file defines some basic macros
+ */
+
+#ifndef COMMON_UTIL_DEFINE_H
+#define COMMON_UTIL_DEFINE_H
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+/* ======== cmake config define ======== */
+
+/* ======== unsued ======== */
+#define UNUSED(v) ((void)(v))
+
+/* ======== inline ======== */
+#ifdef __GNUC__
+#define FORCE_INLINE inline __attribute__((always_inline))
+#else
+#define FORCE_INLINE inline
+#endif  // __GNUC__
+
+#ifdef BUILD_FOR_SMALL_BINARY
+#define INLINE FORCE_INLINE
+#else
+#define INLINE
+#endif  // BUILD_FOR_SMALL_BINARY
+
+/* ======== likely ======== */
+#if defined(__GNUC__) && __GNUC__ >= 4
+#define LIKELY(x) (__builtin_expect((x), 1))
+#define UNLIKELY(x) (__builtin_expect((x), 0))
+#else
+#define LIKELY(x) (x)
+#define UNLIKELY(x) (x)
+#endif  // __GNUC__ >= 4
+
+/* ======== nullptr ======== */
+#if __cplusplus < 201103L
+#ifndef nullptr
+#define nullptr NULL
+#endif
+#define OVERRIDE
+#else
+#define OVERRIDE override
+#endif  // __cplusplus < 201103L
+
+/* ======== cache line ======== */
+#ifndef CACHE_LINE_SIZE
+#define CACHE_LINE_SIZE 64
+#endif  // CACHE_LINE_SIZE
+
+/* ======== assert ======== */
+#ifdef NDEBUG
+#define ASSERT(condition) ((void)0)
+#else
+#define ASSERT(condition) assert((condition))
+#endif  // NDEBUG
+
+/* ======== statis assert ======== */
+/*
+ * To be compatible with C++ before C++11,
+ * @msg should be a single word (use -/_ to concat)
+ * such as This_should_be_TRUE
+ */
+#if __cplusplus < 201103L
+// TODO only define this when DEBUG
+#define STATIC_ASSERT(cond, msg) \
+    typedef char static_assertion_##msg[(cond) ? 1 : -1] __attribute__((unused))
+#else
+#define STATIC_ASSERT(cond, msg) static_assert((cond), #msg)
+#endif  // __cplusplus < 201103L
+
+/* ======== atomic operation ======== */
+#define ATOMIC_FAA(val_addr, addv) \
+    __atomic_fetch_add((val_addr), (addv), __ATOMIC_SEQ_CST)
+#define ATOMIC_AAF(val_addr, addv) \
+    __atomic_add_fetch((val_addr), (addv), __ATOMIC_SEQ_CST)
+/*
+ * It implements an atomic compare and exchange operation.
+ * This compares the contents of *ptr with the contents of *expected.
+ * - If equal, the operation is a reader-modify-writer operation that writes
+ * desired into *ptr.
+ * - If they are not equal, the operation is a reader and the current contents
+ * of *ptr are written into *expected
+ */
+#define ATOMIC_CAS(val_addr, expected, desired)                            \
+    __atomic_compare_exchange_n((val_addr), (expected), (desired),         \
+                                /* weak = */ false,                        \
+                                /* success_memorder = */ __ATOMIC_SEQ_CST, \
+                                /* failure_memorder = */ __ATOMIC_SEQ_CST)
+#define ATOMIC_LOAD(val_addr) __atomic_load_n((val_addr), __ATOMIC_SEQ_CST)
+#define ATOMIC_STORE(val_addr, val) \
+    __atomic_store_n((val_addr), (val), __ATOMIC_SEQ_CST)
+
+/* ======== align ======== */
+#define ALIGNED(a) __attribute__((aligned(a)))
+#define ALIGNED_4 ALIGNED(4)
+#define ALIGNED_8 ALIGNED(8)
+
+/* ======== disallow copy and assign ======== */
+#if __cplusplus < 201103L
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+    TypeName(const TypeName&);             \
+    void operator=(const TypeName&)
+#else
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+    TypeName(const TypeName&) = delete;    \
+    TypeName& operator=(const TypeName&) = delete;
+#endif
+
+/* ======== return value check ======== */
+#define RET_FAIL(expr) UNLIKELY(common::E_OK != (ret = (expr)))
+#define RFAIL(expr) UNLIKELY(common::E_OK != (ret = (expr)))
+#define RET_SUCC(expr) LIKELY(common::E_OK != (ret = (exprt)))
+#define RSUCC(expr) LIKELY(common::E_OK != (ret = (exprt)))
+#define IS_SUCC(ret) LIKELY(common::E_OK == (ret))
+#define IS_FAIL(ret) UNLIKELY(common::E_OK != (ret))
+
+#define IS_NULL(ptr) UNLIKELY((ptr) == nullptr)
+
+/* ======== min/max ======== */
+#define UTIL_MAX(a, b) ((a) > (b) ? (a) : (b))
+#define UTIL_MIN(a, b) ((a) > (b) ? (b) : (a))
+
+/*
+ * int64_max < 10^20
+ * consider +/- and the '\0' tail. 24 is enough
+ */
+#define INT64_TO_BASE10_MAX_LEN 24
+
+#endif  // COMMON_UTIL_DEFINE_H
diff --git a/cpp/src/common/cache/lru_cache.h b/cpp/src/common/cache/lru_cache.h
index b316558..048a16e 100644
--- a/cpp/src/common/cache/lru_cache.h
+++ b/cpp/src/common/cache/lru_cache.h
@@ -52,12 +52,11 @@
     typedef std::list<KeyValuePair<Key, Value>> list_type;
     typedef Map map_type;
     /**
-     * the maxSize is the soft limit of entries and (maxSize + elasticity) is the
-     * hard limit
-     * the cache is allowed to grow till (maxSize + elasticity) and is pruned
-     * back to maxSize entries
-     * set maxSize = 0 for an unbounded cache (but in that
-     * case, you're better off using a std::unordered_map directly anyway! :)
+     * the maxSize is the soft limit of entries and (maxSize + elasticity) is
+     * the hard limit the cache is allowed to grow till (maxSize + elasticity)
+     * and is pruned back to maxSize entries set maxSize = 0 for an unbounded
+     * cache (but in that case, you're better off using a std::unordered_map
+     * directly anyway! :)
      */
     explicit Cache(size_t maxSize = 64, size_t elasticity = 10)
         : maxSize_(maxSize), elasticity_(elasticity) {}
diff --git a/cpp/src/common/constant/tsfile_constant.h b/cpp/src/common/constant/tsfile_constant.h
index af5d70f..d3f4dec 100644
--- a/cpp/src/common/constant/tsfile_constant.h
+++ b/cpp/src/common/constant/tsfile_constant.h
@@ -16,30 +16,36 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-#include <string>
+
+#ifndef COMMON_CONSTANT_TSFILE_CONSTANT_H_
+#define COMMON_CONSTANT_TSFILE_CONSTANT_H_
 #include <regex>
+#include <string>
 
-namespace storage
-{
-    static const std::string TSFILE_SUFFIX = ".tsfile";
-    static const std::string TSFILE_HOME = "TSFILE_HOME";
-    static const std::string TSFILE_CONF = "TSFILE_CONF";
-    static const std::string PATH_ROOT = "root";
-    static const std::string TMP_SUFFIX = "tmp";
-    static const std::string PATH_SEPARATOR = ".";
-    static const char PATH_SEPARATOR_CHAR = '.';
-    static const std::string PATH_SEPARATER_NO_REGEX = "\\.";
-    static const char DOUBLE_QUOTE = '"';
-    static const char BACK_QUOTE = '`';
-    static const std::string BACK_QUOTE_STRING = "`";
-    static const std::string DOUBLE_BACK_QUOTE_STRING = "``";
- 
-    static const unsigned char TIME_COLUMN_MASK = 0x80;
-    static const unsigned char VALUE_COLUMN_MASK = 0x40;
+namespace storage {
+static const std::string TSFILE_SUFFIX = ".tsfile";
+static const std::string TSFILE_HOME = "TSFILE_HOME";
+static const std::string TSFILE_CONF = "TSFILE_CONF";
+static const std::string PATH_ROOT = "root";
+static const std::string TMP_SUFFIX = "tmp";
+static const std::string PATH_SEPARATOR = ".";
+static const char PATH_SEPARATOR_CHAR = '.';
+static const std::string PATH_SEPARATER_NO_REGEX = "\\.";
+static const char DOUBLE_QUOTE = '"';
+static const char BACK_QUOTE = '`';
+static const std::string BACK_QUOTE_STRING = "`";
+static const std::string DOUBLE_BACK_QUOTE_STRING = "``";
 
-    static const std::string TIME_COLUMN_ID = "";
- 
-    static const std::regex IDENTIFIER_PATTERN("([a-zA-Z0-9_\\u2E80-\\u9FFF]+)");
-    static const std::regex NODE_NAME_PATTERN("(\\*{0,2}[a-zA-Z0-9_\\u2E80-\\u9FFF]+\\*{0,2})");
-    static const int DEFAULT_SEGMENT_NUM_FOR_TABLE_NAME = 3;
-} // namespace storage
+static const unsigned char TIME_COLUMN_MASK = 0x80;
+static const unsigned char VALUE_COLUMN_MASK = 0x40;
+
+static const std::string TIME_COLUMN_ID = "";
+static const int NO_STR_TO_READ = -1;
+
+static const std::regex IDENTIFIER_PATTERN("([a-zA-Z0-9_\\u2E80-\\u9FFF]+)");
+static const std::regex NODE_NAME_PATTERN(
+    "(\\*{0,2}[a-zA-Z0-9_\\u2E80-\\u9FFF]+\\*{0,2})");
+static const int DEFAULT_SEGMENT_NUM_FOR_TABLE_NAME = 3;
+}  // namespace storage
+
+#endif
diff --git a/cpp/src/common/container/byte_buffer.h b/cpp/src/common/container/byte_buffer.h
index 967d364..becf4a4 100644
--- a/cpp/src/common/container/byte_buffer.h
+++ b/cpp/src/common/container/byte_buffer.h
@@ -107,9 +107,11 @@
 
     // for variable len value
     FORCE_INLINE char *read(uint32_t offset, uint32_t *len) {
-        // get len
-        *len = *reinterpret_cast<uint32_t *>(&data_[offset]);
-        // get value
+        uint32_t tmp;
+        // Directly memcpy to avoid potential alignment issues when casting
+        // int32_t array pointer
+        std::memcpy(&tmp, data_ + offset, sizeof(tmp));
+        *len = tmp;
         char *p = &data_[offset + variable_type_len_];
         return p;
     }
diff --git a/cpp/src/common/container/murmur_hash3.cc b/cpp/src/common/container/murmur_hash3.cc
index 14a0602..2565423 100644
--- a/cpp/src/common/container/murmur_hash3.cc
+++ b/cpp/src/common/container/murmur_hash3.cc
@@ -22,84 +22,88 @@
 
 /* ================ Murmur128Hash ================ */
 // follow Java IoTDB exactly.
-int64_t Murmur128Hash::inner_hash(const char *buf, int32_t len, int64_t seed) {
-    const int block_count = len >> 4;  // as 128-bit blocks
-    int64_t h1 = seed;
-    int64_t h2 = seed;
-    int64_t c1 = 0x87c37b91114253d5L;
-    int64_t c2 = 0x4cf5ad432745937fL;
+int64_t Murmur128Hash::inner_hash(const char* buf, int32_t len, int64_t seed) {
+    const int32_t block_count = len >> 4;
+    uint64_t h1 = static_cast<uint64_t>(seed);
+    uint64_t h2 = static_cast<uint64_t>(seed);
+    const uint64_t c1 = 0x87c37b91114253d5ULL;
+    const uint64_t c2 = 0x4cf5ad432745937fULL;
 
-    for (int i = 0; i < block_count; i++) {
-        int64_t k1 = get_block(buf, i * 2);
-        int64_t k2 = get_block(buf, i * 2 + 1);
+    // body blocks
+    for (int32_t i = 0; i < block_count; ++i) {
+        uint64_t k1 = get_block(buf, i * 2);
+        uint64_t k2 = get_block(buf, i * 2 + 1);
+
         k1 *= c1;
         k1 = rotl64(k1, 31);
         k1 *= c2;
         h1 ^= k1;
         h1 = rotl64(h1, 27);
         h1 += h2;
-        h1 = h1 * 5 + 0x52dce729;
+        h1 = h1 * 5 + 0x52dce729ULL;
+
         k2 *= c2;
         k2 = rotl64(k2, 33);
         k2 *= c1;
         h2 ^= k2;
         h2 = rotl64(h2, 31);
         h2 += h1;
-        h2 = h2 * 5 + 0x38495ab5;
+        h2 = h2 * 5 + 0x38495ab5ULL;
     }
 
-    int offset = block_count * 16;
-    int64_t k1 = 0;
-    int64_t k2 = 0;
+    // tail
+    const int32_t offset = block_count * 16;
+    uint64_t k1 = 0;
+    uint64_t k2 = 0;
     switch (len & 15) {
         case 15:
-            k2 ^= ((int64_t)(buf[offset + 14])) << 48;
+            k2 ^= (uint64_t)(uint8_t)buf[offset + 14] << 48;
             // fallthrough
         case 14:
-            k2 ^= ((int64_t)(buf[offset + 13])) << 40;
+            k2 ^= (uint64_t)(uint8_t)buf[offset + 13] << 40;
             // fallthrough
         case 13:
-            k2 ^= ((int64_t)(buf[offset + 12])) << 32;
+            k2 ^= (uint64_t)(uint8_t)buf[offset + 12] << 32;
             // fallthrough
         case 12:
-            k2 ^= ((int64_t)(buf[offset + 11])) << 24;
+            k2 ^= (uint64_t)(uint8_t)buf[offset + 11] << 24;
             // fallthrough
         case 11:
-            k2 ^= ((int64_t)(buf[offset + 10])) << 16;
+            k2 ^= (uint64_t)(uint8_t)buf[offset + 10] << 16;
             // fallthrough
         case 10:
-            k2 ^= ((int64_t)(buf[offset + 9])) << 8;
+            k2 ^= (uint64_t)(uint8_t)buf[offset + 9] << 8;
             // fallthrough
         case 9:
-            k2 ^= buf[offset + 8];
+            k2 ^= (uint64_t)(uint8_t)buf[offset + 8];
             k2 *= c2;
             k2 = rotl64(k2, 33);
             k2 *= c1;
             h2 ^= k2;
             // fallthrough
         case 8:
-            k1 ^= ((int64_t)buf[offset + 7]) << 56;
+            k1 ^= (uint64_t)(uint8_t)buf[offset + 7] << 56;
             // fallthrough
         case 7:
-            k1 ^= ((int64_t)buf[offset + 6]) << 48;
+            k1 ^= (uint64_t)(uint8_t)buf[offset + 6] << 48;
             // fallthrough
         case 6:
-            k1 ^= ((int64_t)buf[offset + 5]) << 40;
+            k1 ^= (uint64_t)(uint8_t)buf[offset + 5] << 40;
             // fallthrough
         case 5:
-            k1 ^= ((int64_t)buf[offset + 4]) << 32;
+            k1 ^= (uint64_t)(uint8_t)buf[offset + 4] << 32;
             // fallthrough
         case 4:
-            k1 ^= ((int64_t)buf[offset + 3]) << 24;
+            k1 ^= (uint64_t)(uint8_t)buf[offset + 3] << 24;
             // fallthrough
         case 3:
-            k1 ^= ((int64_t)buf[offset + 2]) << 16;
+            k1 ^= (uint64_t)(uint8_t)buf[offset + 2] << 16;
             // fallthrough
         case 2:
-            k1 ^= ((int64_t)buf[offset + 1]) << 8;
+            k1 ^= (uint64_t)(uint8_t)buf[offset + 1] << 8;
             // fallthrough
         case 1:
-            k1 ^= buf[offset];
+            k1 ^= (uint64_t)(uint8_t)buf[offset];
             k1 *= c1;
             k1 = rotl64(k1, 31);
             k1 *= c2;
@@ -110,18 +114,21 @@
             break;
     }
 
-    h1 ^= len;
-    h2 ^= len;
+    // finalization
+    h1 ^= static_cast<uint64_t>(len);
+    h2 ^= static_cast<uint64_t>(len);
     h1 += h2;
     h2 += h1;
+
     h1 = fmix(h1);
     h2 = fmix(h2);
+
     h1 += h2;
     h2 += h1;
-    return h1 + h2;
+    return static_cast<int64_t>(h1 + h2);
 }
 
-int64_t Murmur128Hash::get_block(const char *buf, int32_t index) {
+int64_t Murmur128Hash::get_block(const char* buf, int32_t index) {
     int block_offset = index << 3;
     int64_t res = 0;
     res += ((int64_t)(buf[block_offset + 0] & 0xFF));
diff --git a/cpp/src/common/datatype/CMakeLists.txt b/cpp/src/common/datatype/CMakeLists.txt
new file mode 100644
index 0000000..f7db3af
--- /dev/null
+++ b/cpp/src/common/datatype/CMakeLists.txt
@@ -0,0 +1,18 @@
+#[[
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+]]
diff --git a/cpp/src/common/datatype/date_converter.h b/cpp/src/common/datatype/date_converter.h
new file mode 100644
index 0000000..82d7644
--- /dev/null
+++ b/cpp/src/common/datatype/date_converter.h
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef COMMON_DATATYPE_DATE_CONVERTER_H
+#define COMMON_DATATYPE_DATE_CONVERTER_H
+
+#include <cstdint>
+#include <ctime>
+
+#include "utils/errno_define.h"
+
+namespace common {
+class DateConverter {
+   public:
+    static int date_to_int(const std::tm& tm_date, int32_t& out_int) {
+        if (tm_date.tm_year == -1 || tm_date.tm_mon == -1 ||
+            tm_date.tm_mday == -1) {
+            return E_INVALID_ARG;
+        }
+
+        const int year = tm_date.tm_year + 1900;
+        const int month = tm_date.tm_mon + 1;
+        const int day = tm_date.tm_mday;
+
+        if (year < 1000 || year > 9999 || month < 1 || month > 12 || day < 1 ||
+            day > 31) {
+            return E_INVALID_ARG;
+        }
+
+        // Normalize the tm structure and validate the date
+        std::tm tmp = tm_date;
+        tmp.tm_hour = 12;
+        tmp.tm_isdst = -1;
+        if (std::mktime(&tmp) == -1) {
+            return E_INVALID_ARG;
+        }
+
+        if (tmp.tm_year != tm_date.tm_year || tmp.tm_mon != tm_date.tm_mon ||
+            tmp.tm_mday != tm_date.tm_mday) {
+            return E_INVALID_ARG;
+        }
+
+        const int64_t result =
+            static_cast<int64_t>(year) * 10000 + month * 100 + day;
+        if (result > INT32_MAX || result < INT32_MIN) {
+            return E_OUT_OF_RANGE;
+        }
+
+        out_int = static_cast<int32_t>(result);
+        return E_OK;
+    }
+
+    static bool is_tm_ymd_equal(const std::tm& tm1, const std::tm& tm2) {
+        return tm1.tm_year == tm2.tm_year && tm1.tm_mon == tm2.tm_mon &&
+               tm1.tm_mday == tm2.tm_mday;
+    }
+
+    static int int_to_date(int32_t date_int, std::tm& out_tm) {
+        if (date_int == 0) {
+            out_tm.tm_year = out_tm.tm_mon = out_tm.tm_mday = -1;
+            return E_INVALID_ARG;
+        }
+
+        int year = date_int / 10000;
+        int month = (date_int % 10000) / 100;
+        int day = date_int % 100;
+
+        if (year < 1000 || year > 9999 || month < 1 || month > 12 || day < 1 ||
+            day > 31) {
+            return E_INVALID_ARG;
+        }
+
+        out_tm = {0};
+        out_tm.tm_year = year - 1900;
+        out_tm.tm_mon = month - 1;
+        out_tm.tm_mday = day;
+        out_tm.tm_hour = 12;
+        out_tm.tm_isdst = -1;
+
+        if (std::mktime(&out_tm) == -1) {
+            return E_INVALID_ARG;
+        }
+        if (out_tm.tm_year != year - 1900 || out_tm.tm_mon != month - 1 ||
+            out_tm.tm_mday != day) {
+            return E_INVALID_ARG;
+        }
+
+        return E_OK;
+    }
+};
+}  // namespace common
+#endif  // COMMON_DATATYPE_DATE_CONVERTER_H
diff --git a/cpp/src/common/datatype/value.h b/cpp/src/common/datatype/value.h
index 29fd570..8fb77a1 100644
--- a/cpp/src/common/datatype/value.h
+++ b/cpp/src/common/datatype/value.h
@@ -88,6 +88,7 @@
                 value_.bval_ = *(bool *)val;
                 break;
             }
+            case common::DATE:
             case common::INT32: {
                 value_.ival_ = *(int32_t *)val;
                 break;
@@ -104,6 +105,8 @@
                 value_.dval_ = *(double *)val;
                 break;
             }
+            case common::BLOB:
+            case common::STRING:
             case common::TEXT: {
                 value_.sval_ = strdup((const char *)val);
                 break;
diff --git a/cpp/src/common/db_common.h b/cpp/src/common/db_common.h
index 5fe8b4f..485a0c1 100644
--- a/cpp/src/common/db_common.h
+++ b/cpp/src/common/db_common.h
@@ -21,16 +21,18 @@
 #define COMMON_DB_COMMON_H
 
 #include <iostream>
+#include <unordered_set>
 
-#include "utils/util_define.h"
 #include "common/allocator/my_string.h"
+#include "utils/util_define.h"
 
 namespace common {
 
 /**
  * @brief Represents the data type of a measurement.
  *
- * This enumeration defines the supported data types for measurements in the system.
+ * This enumeration defines the supported data types for measurements in the
+ * system.
  */
 enum TSDataType : uint8_t {
     BOOLEAN = 0,
@@ -40,6 +42,10 @@
     DOUBLE = 4,
     TEXT = 5,
     VECTOR = 6,
+    UNKNOWN = 7,
+    TIMESTAMP = 8,
+    DATE = 9,
+    BLOB = 10,
     STRING = 11,
     NULL_TYPE = 254,
     INVALID_DATATYPE = 255
@@ -48,7 +54,8 @@
 /**
  * @brief Represents the encoding method for a measurement.
  *
- * This enumeration defines the supported encoding methods that can be applied to measurements.
+ * This enumeration defines the supported encoding methods that can be applied
+ * to measurements.
  */
 enum TSEncoding : uint8_t {
     PLAIN = 0,
@@ -62,13 +69,15 @@
     GORILLA = 8,
     ZIGZAG = 9,
     FREQ = 10,
+    SPRINTZ = 12,
     INVALID_ENCODING = 255
 };
 
 /**
  * @brief Represents the compression type for a measurement.
  *
- * This enumeration defines the supported compression methods that can be applied to measurements.
+ * This enumeration defines the supported compression methods that can be
+ * applied to measurements.
  */
 enum CompressionType : uint8_t {
     UNCOMPRESSED = 0,
@@ -132,14 +141,53 @@
     return common::STRING;
 }
 
+template <typename T>
+FORCE_INLINE std::unordered_set<common::TSDataType>
+GetDataTypesFromTemplateType() {
+    return {common::INVALID_DATATYPE};
+}
+
+template <>
+FORCE_INLINE std::unordered_set<common::TSDataType>
+GetDataTypesFromTemplateType<bool>() {
+    return {common::BOOLEAN};
+}
+template <>
+FORCE_INLINE std::unordered_set<common::TSDataType>
+GetDataTypesFromTemplateType<int32_t>() {
+    return {common::INT32, common::DATE, common::INT64};
+}
+template <>
+FORCE_INLINE std::unordered_set<common::TSDataType>
+GetDataTypesFromTemplateType<int64_t>() {
+    return {common::INT64, TIMESTAMP};
+}
+template <>
+FORCE_INLINE std::unordered_set<common::TSDataType>
+GetDataTypesFromTemplateType<float>() {
+    return {common::FLOAT, common::DOUBLE};
+}
+template <>
+FORCE_INLINE std::unordered_set<common::TSDataType>
+GetDataTypesFromTemplateType<double>() {
+    return {common::DOUBLE};
+}
+template <>
+FORCE_INLINE std::unordered_set<common::TSDataType>
+GetDataTypesFromTemplateType<common::String>() {
+    return {common::STRING, common::TEXT, common::BLOB};
+}
+
 FORCE_INLINE size_t get_data_type_size(TSDataType data_type) {
     switch (data_type) {
         case common::BOOLEAN:
             return 1;
+        case common::DATE:
         case common::INT32:
         case common::FLOAT:
             return 4;
         case common::INT64:
+        case common::TIMESTAMP:
         case common::DOUBLE:
             return 8;
         default:
diff --git a/cpp/src/common/device_id.h b/cpp/src/common/device_id.h
index 021cb6a..b3a173d 100644
--- a/cpp/src/common/device_id.h
+++ b/cpp/src/common/device_id.h
@@ -35,25 +35,25 @@
 
 namespace storage {
 class IDeviceID {
-public:
+   public:
     virtual ~IDeviceID() = default;
     virtual int serialize(common::ByteStream& write_stream) { return 0; }
     virtual int deserialize(common::ByteStream& read_stream) { return 0; }
     virtual std::string get_table_name() { return ""; }
     virtual int segment_num() { return 0; }
-    virtual const std::vector<std::string>& get_segments() const {
+    virtual const std::vector<std::string*>& get_segments() const {
         return empty_segments_;
     }
     virtual std::string get_device_name() const { return ""; };
-    virtual bool operator<(const IDeviceID& other) { return 0; }
+    virtual bool operator<(const IDeviceID& other) { return false; }
     virtual bool operator==(const IDeviceID& other) { return false; }
     virtual bool operator!=(const IDeviceID& other) { return false; }
 
-protected:
+   protected:
     IDeviceID() : empty_segments_() {}
 
-private:
-    const std::vector<std::string> empty_segments_;
+   private:
+    const std::vector<std::string*> empty_segments_;
 };
 
 struct IDeviceIDComparator {
@@ -64,23 +64,51 @@
 };
 
 class StringArrayDeviceID : public IDeviceID {
-public:
+   public:
     explicit StringArrayDeviceID(const std::vector<std::string>& segments)
         : segments_(formalize(segments)) {}
 
-    explicit StringArrayDeviceID(const std::string& device_id_string)
-        : segments_(split_device_id_string(device_id_string)) {}
+    explicit StringArrayDeviceID(const std::string& device_id_string) {
+        auto segments = split_device_id_string(device_id_string);
+        segments_.reserve(segments.size());
+        for (const auto& segment : segments) {
+            segments_.push_back(new std::string(segment));
+        }
+    }
+
+    explicit StringArrayDeviceID(const std::vector<std::string*>& segments) {
+        segments_.reserve(segments.size());
+        for (const auto& segment : segments) {
+            segments_.push_back(segment == nullptr ? nullptr
+                                                   : new std::string(*segment));
+        }
+    }
 
     explicit StringArrayDeviceID() : segments_() {}
 
-    ~StringArrayDeviceID() override = default;
+    ~StringArrayDeviceID() override {
+        for (const auto& segment : segments_) {
+            delete segment;
+        }
+    }
 
     std::string get_device_name() const override {
-        return segments_.empty() ? "" : std::accumulate(std::next(segments_.begin()), segments_.end(),
-                               segments_.front(),
-                               [](std::string a, const std::string& b) {
-                                   return std::move(a) + "." + b;
-                               });
+        if (segments_.empty()) {
+            return "";
+        }
+
+        std::string result(*segments_.front());
+        for (auto it = std::next(segments_.begin()); it != segments_.end();
+             ++it) {
+            result += '.';
+            if (*it != nullptr) {
+                result += **it;
+            } else {
+                result += "null";
+            }
+        }
+
+        return result;
     };
 
     int serialize(common::ByteStream& write_stream) override {
@@ -88,12 +116,12 @@
         if (RET_FAIL(common::SerializationUtil::write_var_uint(segment_num(),
                                                                write_stream))) {
             return ret;
-                                                               }
+        }
         for (const auto& segment : segments_) {
-            if (RET_FAIL(common::SerializationUtil::write_var_str(segment,
-                                                              write_stream))) {
+            if (RET_FAIL(common::SerializationUtil::write_var_char_ptr(
+                    segment, write_stream))) {
                 return ret;
-                                                              }
+            }
         }
         return ret;
     }
@@ -101,13 +129,23 @@
     int deserialize(common::ByteStream& read_stream) override {
         int ret = common::E_OK;
         uint32_t num_segments;
-        if (RET_FAIL(common::SerializationUtil::read_var_uint(num_segments, read_stream))) {
+        if (RET_FAIL(common::SerializationUtil::read_var_uint(num_segments,
+                                                              read_stream))) {
             return ret;
         }
+
+        for (auto& segment : segments_) {
+            if (segment != nullptr) {
+                delete segment;
+            }
+        }
+
         segments_.clear();
         for (uint32_t i = 0; i < num_segments; ++i) {
-            std::string segment;
-            if (RET_FAIL(common::SerializationUtil::read_var_str(segment, read_stream))) {
+            std::string* segment;
+            if (RET_FAIL(common::SerializationUtil::read_var_char_ptr(
+                    segment, read_stream))) {
+                delete segment;
                 return ret;
             }
             segments_.push_back(segment);
@@ -116,52 +154,69 @@
     }
 
     std::string get_table_name() override {
-        return segments_.empty() ? "" : segments_[0];
+        return segments_.empty() ? "" : *segments_[0];
     }
 
     int segment_num() override { return static_cast<int>(segments_.size()); }
 
-    const std::vector<std::string>& get_segments() const override {
+    const std::vector<std::string*>& get_segments() const override {
         return segments_;
     }
 
-    virtual bool operator<(const IDeviceID& other) override {
+    bool operator<(const IDeviceID& other) override {
         auto other_segments = other.get_segments();
-        return std::lexicographical_compare(segments_.begin(), segments_.end(),
-                                            other_segments.begin(),
-                                            other_segments.end());
+        return std::lexicographical_compare(
+            segments_.begin(), segments_.end(), other_segments.begin(),
+            other_segments.end(),
+            [](const std::string* a, const std::string* b) {
+                if (a == nullptr && b == nullptr) return false;  // equal
+                if (a == nullptr) return true;   // nullptr < any string
+                if (b == nullptr) return false;  // any string > nullptr
+                return *a < *b;
+            });
     }
 
     bool operator==(const IDeviceID& other) override {
         auto other_segments = other.get_segments();
         return (segments_.size() == other_segments.size()) &&
                std::equal(segments_.begin(), segments_.end(),
-                          other_segments.begin());
+                          other_segments.begin(),
+                          [](const std::string* a, const std::string* b) {
+                              if (a == nullptr && b == nullptr) return true;
+                              if (a == nullptr || b == nullptr) return false;
+                              return *a == *b;
+                          });
     }
 
     bool operator!=(const IDeviceID& other) override {
         return !(*this == other);
     }
 
-private:
-    std::vector<std::string> segments_;
+   private:
+    std::vector<std::string*> segments_;
 
-    std::vector<std::string> formalize(
+    static std::vector<std::string*> formalize(
         const std::vector<std::string>& segments) {
         auto it =
             std::find_if(segments.rbegin(), segments.rend(),
                          [](const std::string& seg) { return !seg.empty(); });
-        return std::vector<std::string>(segments.begin(), it.base());
+        std::vector<std::string> validate_segments(segments.begin(), it.base());
+        std::vector<std::string*> result;
+        result.reserve(validate_segments.size());
+        for (const auto& segment : validate_segments) {
+            result.emplace_back(new std::string(segment));
+        }
+        return result;
     }
 
-    std::vector<std::string> split_device_id_string(
+    static std::vector<std::string> split_device_id_string(
         const std::string& device_id_string) {
         auto splits =
             storage::PathNodesGenerator::invokeParser(device_id_string);
         return split_device_id_string(splits);
     }
 
-    std::vector<std::string> split_device_id_string(
+    static std::vector<std::string> split_device_id_string(
         const std::vector<std::string>& splits) {
         size_t segment_cnt = splits.size();
         std::vector<std::string> final_segments;
@@ -173,8 +228,9 @@
         if (segment_cnt == 1) {
             // "root" -> {"root"}
             final_segments.push_back(splits[0]);
-        } else if (segment_cnt < static_cast<size_t>(
-            storage::DEFAULT_SEGMENT_NUM_FOR_TABLE_NAME + 1)) {
+        } else if (segment_cnt <
+                   static_cast<size_t>(
+                       storage::DEFAULT_SEGMENT_NUM_FOR_TABLE_NAME + 1)) {
             // "root.a" -> {"root", "a"}
             // "root.a.b" -> {"root.a", "b"}
             std::string table_name = std::accumulate(
@@ -184,26 +240,26 @@
                 });
             final_segments.push_back(table_name);
             final_segments.push_back(splits.back());
-            } else {
-                // "root.a.b.c" -> {"root.a.b", "c"}
-                // "root.a.b.c.d" -> {"root.a.b", "c", "d"}
-                std::string table_name = std::accumulate(
-                    splits.begin(),
-                    splits.begin() + storage::DEFAULT_SEGMENT_NUM_FOR_TABLE_NAME,
-                    std::string(), [](const std::string& a, const std::string& b) {
-                        return a.empty() ? b : a + storage::PATH_SEPARATOR + b;
-                    });
+        } else {
+            // "root.a.b.c" -> {"root.a.b", "c"}
+            // "root.a.b.c.d" -> {"root.a.b", "c", "d"}
+            std::string table_name = std::accumulate(
+                splits.begin(),
+                splits.begin() + storage::DEFAULT_SEGMENT_NUM_FOR_TABLE_NAME,
+                std::string(), [](const std::string& a, const std::string& b) {
+                    return a.empty() ? b : a + storage::PATH_SEPARATOR + b;
+                });
 
-                final_segments.emplace_back(std::move(table_name));
-                final_segments.insert(
-                    final_segments.end(),
-                    splits.begin() + storage::DEFAULT_SEGMENT_NUM_FOR_TABLE_NAME,
-                    splits.end());
-            }
+            final_segments.emplace_back(std::move(table_name));
+            final_segments.insert(
+                final_segments.end(),
+                splits.begin() + storage::DEFAULT_SEGMENT_NUM_FOR_TABLE_NAME,
+                splits.end());
+        }
 
         return final_segments;
     }
 };
-}
+}  // namespace storage
 
 #endif
\ No newline at end of file
diff --git a/cpp/src/common/global.cc b/cpp/src/common/global.cc
index 32b9371..21b3dad 100644
--- a/cpp/src/common/global.cc
+++ b/cpp/src/common/global.cc
@@ -59,16 +59,18 @@
         case BOOLEAN:
             return g_config_value_.boolean_encoding_type_;
         case INT32:
+        case DATE:
             return g_config_value_.int32_encoding_type_;
         case INT64:
+        case TIMESTAMP:
             return g_config_value_.int64_encoding_type_;
         case FLOAT:
             return g_config_value_.float_encoding_type_;
         case DOUBLE:
             return g_config_value_.double_encoding_type_;
         case TEXT:
-            return g_config_value_.string_encoding_type_;
         case STRING:
+        case BLOB:
             return g_config_value_.string_encoding_type_;
         case VECTOR:
             break;
@@ -100,7 +102,7 @@
 
 const char* s_encoding_names[12] = {
     "PLAIN",      "DICTIONARY", "RLE",     "DIFF",   "TS_2DIFF", "BITMAP",
-    "GORILLA_V1", "REGULAR",    "GORILLA", "ZIGZAG", "FREQ"};
+    "GORILLA_V1", "REGULAR",    "GORILLA", "ZIGZAG", "FREQ",     "SPRINTZ"};
 
 const char* s_compression_names[8] = {
     "UNCOMPRESSED", "SNAPPY", "GZIP", "LZO", "SDT", "PAA", "PLA", "LZ4",
diff --git a/cpp/src/common/global.h b/cpp/src/common/global.h
index 3f33146..564f30c 100644
--- a/cpp/src/common/global.h
+++ b/cpp/src/common/global.h
@@ -40,7 +40,8 @@
 
 FORCE_INLINE int set_global_time_encoding(uint8_t encoding) {
     ASSERT(encoding >= PLAIN && encoding <= FREQ);
-    if (encoding != TS_2DIFF && encoding != PLAIN) {
+    if (encoding != TS_2DIFF && encoding != PLAIN && encoding != GORILLA &&
+        encoding != ZIGZAG && encoding != RLE && encoding != SPRINTZ) {
         return E_NOT_SUPPORT;
     }
     g_config_value_.time_encoding_type_ = static_cast<TSEncoding>(encoding);
@@ -49,7 +50,8 @@
 
 FORCE_INLINE int set_global_time_compression(uint8_t compression) {
     ASSERT(compression >= UNCOMPRESSED && compression <= LZ4);
-    if (compression != UNCOMPRESSED && compression != LZ4) {
+    if (compression != UNCOMPRESSED && compression != SNAPPY &&
+        compression != GZIP && compression != LZO && compression != LZ4) {
         return E_NOT_SUPPORT;
     }
     g_config_value_.time_compress_type_ =
@@ -58,52 +60,52 @@
 }
 
 FORCE_INLINE int set_datatype_encoding(uint8_t data_type, uint8_t encoding) {
-    int code = E_OK;
-    TSDataType dtype = static_cast<TSDataType>(data_type);
+    const TSDataType dtype = static_cast<TSDataType>(data_type);
+    const TSEncoding encoding_type = static_cast<TSEncoding>(encoding);
+
+    // Validate input parameters
     ASSERT(dtype >= BOOLEAN && dtype <= STRING);
-    TSEncoding encoding_type = static_cast<TSEncoding>(encoding);
-    ASSERT(encoding >= PLAIN && encoding <= FREQ);
+    ASSERT(encoding >= PLAIN && encoding <= SPRINTZ);
+
+    // Check encoding support for each data type
     switch (dtype) {
         case BOOLEAN:
-            if (encoding_type != PLAIN) {
-                return E_NOT_SUPPORT;
-            }
+            if (encoding_type != PLAIN) return E_NOT_SUPPORT;
             g_config_value_.boolean_encoding_type_ = encoding_type;
             break;
+
         case INT32:
-            if (encoding_type != PLAIN && encoding_type != TS_2DIFF &&
-                encoding_type != GORILLA) {
-                return E_NOT_SUPPORT;
-            }
-            g_config_value_.int32_encoding_type_ = encoding_type;
-            break;
+        case DATE:
         case INT64:
             if (encoding_type != PLAIN && encoding_type != TS_2DIFF &&
-                encoding_type != GORILLA) {
+                encoding_type != GORILLA && encoding_type != ZIGZAG &&
+                encoding_type != RLE && encoding_type != SPRINTZ) {
                 return E_NOT_SUPPORT;
             }
-            g_config_value_.int64_encoding_type_ = encoding_type;
+            dtype == INT32
+                ? g_config_value_.int32_encoding_type_ = encoding_type
+                : g_config_value_.int64_encoding_type_ = encoding_type;
             break;
+
+        case FLOAT:
+        case DOUBLE:
+            if (encoding_type != PLAIN && encoding_type != TS_2DIFF &&
+                encoding_type != GORILLA && encoding_type != SPRINTZ) {
+                return E_NOT_SUPPORT;
+            }
+            dtype == FLOAT
+                ? g_config_value_.float_encoding_type_ = encoding_type
+                : g_config_value_.double_encoding_type_ = encoding_type;
+            break;
+
         case STRING:
-            if (encoding_type != PLAIN) {
+        case TEXT:
+            if (encoding_type != PLAIN && encoding_type != DICTIONARY) {
                 return E_NOT_SUPPORT;
             }
             g_config_value_.string_encoding_type_ = encoding_type;
             break;
-        case FLOAT:
-            if (encoding_type != PLAIN && encoding_type != TS_2DIFF &&
-                encoding_type != GORILLA) {
-                return E_NOT_SUPPORT;
-            }
-            g_config_value_.float_encoding_type_ = encoding_type;
-            break;
-        case DOUBLE:
-            if (encoding_type != PLAIN && encoding_type != TS_2DIFF &&
-                encoding_type != GORILLA) {
-                return E_NOT_SUPPORT;
-            }
-            g_config_value_.double_encoding_type_ = encoding_type;
-            break;
+
         default:
             break;
     }
@@ -112,7 +114,8 @@
 
 FORCE_INLINE int set_global_compression(uint8_t compression) {
     ASSERT(compression >= UNCOMPRESSED && compression <= LZ4);
-    if (compression != UNCOMPRESSED && compression != LZ4) {
+    if (compression != UNCOMPRESSED && compression != SNAPPY &&
+        compression != GZIP && compression != LZO && compression != LZ4) {
         return E_NOT_SUPPORT;
     }
     g_config_value_.default_compression_type_ =
@@ -120,6 +123,46 @@
     return E_OK;
 }
 
+FORCE_INLINE uint8_t get_global_time_encoding() {
+    return static_cast<uint8_t>(g_config_value_.time_encoding_type_);
+}
+
+FORCE_INLINE uint8_t get_global_time_compression() {
+    return static_cast<uint8_t>(g_config_value_.time_compress_type_);
+}
+
+FORCE_INLINE uint8_t get_datatype_encoding(uint8_t data_type) {
+    const TSDataType dtype = static_cast<TSDataType>(data_type);
+
+    // Validate input parameter
+    ASSERT(dtype >= BOOLEAN && dtype <= STRING);
+
+    switch (dtype) {
+        case BOOLEAN:
+            return static_cast<uint8_t>(g_config_value_.boolean_encoding_type_);
+        case INT32:
+            return static_cast<uint8_t>(g_config_value_.int32_encoding_type_);
+        case INT64:
+            return static_cast<uint8_t>(g_config_value_.int64_encoding_type_);
+        case FLOAT:
+            return static_cast<uint8_t>(g_config_value_.float_encoding_type_);
+        case DOUBLE:
+            return static_cast<uint8_t>(g_config_value_.double_encoding_type_);
+        case STRING:
+        case TEXT:
+            return static_cast<uint8_t>(g_config_value_.string_encoding_type_);
+        case DATE:
+            return static_cast<uint8_t>(g_config_value_.int64_encoding_type_);
+        default:
+            return static_cast<uint8_t>(
+                PLAIN);  // Return default encoding for unknown types
+    }
+}
+
+FORCE_INLINE uint8_t get_global_compression() {
+    return static_cast<uint8_t>(g_config_value_.default_compression_type_);
+}
+
 extern int init_common();
 extern bool is_timestamp_column_name(const char *time_col_name);
 extern void cols_to_json(ByteStream *byte_stream,
diff --git a/cpp/src/common/mutex/mutex.h b/cpp/src/common/mutex/mutex.h
index 264d71d..8409e15 100644
--- a/cpp/src/common/mutex/mutex.h
+++ b/cpp/src/common/mutex/mutex.h
@@ -43,7 +43,7 @@
     void unlock() {
         int ret = pthread_mutex_unlock(&mutex_);
         ASSERT(ret == 0);
-        (void) ret;
+        (void)ret;
     }
 
     bool try_lock() {
diff --git a/cpp/src/common/path.h b/cpp/src/common/path.h
index f4b6f82..8318a47 100644
--- a/cpp/src/common/path.h
+++ b/cpp/src/common/path.h
@@ -51,7 +51,8 @@
                     PathNodesGenerator::invokeParser(path_sc);
                 if (nodes.size() > 1) {
                     device_id_ = std::make_shared<StringArrayDeviceID>(
-                        std::vector<std::string>(nodes.begin(), nodes.end() - 1));
+                        std::vector<std::string>(nodes.begin(),
+                                                 nodes.end() - 1));
                     measurement_ = nodes[nodes.size() - 1];
                     full_path_ =
                         device_id_->get_device_name() + "." + measurement_;
diff --git a/cpp/src/common/record.h b/cpp/src/common/record.h
index fc3c563..370508a 100644
--- a/cpp/src/common/record.h
+++ b/cpp/src/common/record.h
@@ -24,6 +24,7 @@
 #include <vector>
 
 #include "common/allocator/my_string.h"
+#include "common/datatype/date_converter.h"
 #include "common/db_common.h"
 #include "utils/errno_define.h"
 
@@ -46,7 +47,6 @@
 struct DataPoint {
     bool isnull = false;
     std::string measurement_name_;
-    common::TSDataType data_type_;
     union {
         bool bool_val_;
         int32_t i32_val_;
@@ -58,45 +58,33 @@
     TextType text_val_;
 
     DataPoint(const std::string &measurement_name, bool b)
-        : measurement_name_(measurement_name),
-          data_type_(common::BOOLEAN),
-          text_val_() {
+        : measurement_name_(measurement_name), text_val_() {
         u_.bool_val_ = b;
     }
 
     DataPoint(const std::string &measurement_name, int32_t i32)
-        : measurement_name_(measurement_name),
-          data_type_(common::INT32),
-          text_val_() {
+        : measurement_name_(measurement_name), text_val_() {
         u_.i32_val_ = i32;
     }
 
     DataPoint(const std::string &measurement_name, int64_t i64)
-        : measurement_name_(measurement_name),
-          data_type_(common::INT64),
-          text_val_() {
+        : measurement_name_(measurement_name), text_val_() {
         u_.i64_val_ = i64;
     }
 
     DataPoint(const std::string &measurement_name, float f)
-        : measurement_name_(measurement_name),
-          data_type_(common::FLOAT),
-          text_val_() {
+        : measurement_name_(measurement_name), text_val_() {
         u_.float_val_ = f;
     }
 
     DataPoint(const std::string &measurement_name, double d)
-        : measurement_name_(measurement_name),
-          data_type_(common::DOUBLE),
-          text_val_() {
+        : measurement_name_(measurement_name), text_val_() {
         u_.double_val_ = d;
     }
 
     DataPoint(const std::string &measurement_name, common::String &str,
               common::PageArena &pa)
-        : measurement_name_(measurement_name),
-          data_type_(common::STRING),
-          text_val_() {
+        : measurement_name_(measurement_name), text_val_() {
         char *p_buf = (char *)pa.alloc(sizeof(common::String));
         u_.str_val_ = new (p_buf) common::String();
         u_.str_val_->dup_from(str, pa);
@@ -110,22 +98,18 @@
     DataPoint(const std::string &measurement_name)
         : isnull(true), measurement_name_(measurement_name) {}
     void set_i32(int32_t i32) {
-        data_type_ = common::INT32;
         u_.i32_val_ = i32;
         isnull = false;
     }
     void set_i64(int64_t i64) {
-        data_type_ = common::INT64;
         u_.i64_val_ = i64;
         isnull = false;
     }
     void set_float(float f) {
-        data_type_ = common::FLOAT;
         u_.float_val_ = f;
         isnull = false;
     }
     void set_double(double d) {
-        data_type_ = common::DOUBLE;
         u_.double_val_ = d;
         isnull = false;
     }
@@ -155,15 +139,26 @@
         points_.emplace_back(DataPoint(measurement_name, val));
         return ret;
     }
-
 };
 
-template<>
-inline int TsRecord::add_point(const std::string &measurement_name, common::String val) {
+template <>
+inline int TsRecord::add_point(const std::string &measurement_name,
+                               common::String val) {
     int ret = common::E_OK;
     points_.emplace_back(DataPoint(measurement_name, val, pa));
     return ret;
 }
 
+template <>
+inline int TsRecord::add_point(const std::string &measurement_name,
+                               std::tm val) {
+    int ret = common::E_OK;
+    int data_int;
+    if (RET_SUCC(common::DateConverter::date_to_int(val, data_int))) {
+        points_.emplace_back(DataPoint(measurement_name, data_int));
+    }
+    return ret;
+}
+
 }  // end namespace storage
 #endif  // COMMON_RECORD_H
diff --git a/cpp/src/common/row_record.h b/cpp/src/common/row_record.h
index 2e449a5..5ff5e23 100644
--- a/cpp/src/common/row_record.h
+++ b/cpp/src/common/row_record.h
@@ -23,6 +23,7 @@
 #include <vector>
 
 #include "common/allocator/my_string.h"
+#include "common/datatype/date_converter.h"
 #include "common/db_common.h"
 
 namespace storage {
@@ -30,18 +31,15 @@
     Field() : type_(common::INVALID_DATATYPE) {}
     Field(common::TSDataType type) : type_(type) {}
 
-    ~Field() {
-        free_memory();
-    }
+    ~Field() { free_memory(); }
 
     FORCE_INLINE void free_memory() {
-        if (type_ == common::TEXT && value_.sval_ != nullptr) {
-            free(value_.sval_);
-            value_.sval_ = nullptr;
-        }
-        if (type_ == common::STRING && value_.strval_ != nullptr) {
-            delete value_.strval_;
-            value_.strval_ = nullptr;
+        if (type_ == common::BLOB || type_ == common::TEXT ||
+            type_ == common::STRING) {
+            if (value_.strval_ != nullptr) {
+                delete value_.strval_;
+                value_.strval_ = nullptr;
+            }
         }
     }
 
@@ -55,9 +53,8 @@
                is_type(common::NULL_TYPE);
     }
 
-    template <class T>
-    FORCE_INLINE void set_value(common::TSDataType type, T val,
-                                common::PageArena &pa) {
+    void set_value(common::TSDataType type, void *val, size_t len,
+                   common::PageArena &pa) {
         if (val == nullptr) {
             type_ = common::NULL_TYPE;
             return;
@@ -68,10 +65,12 @@
                 value_.bval_ = *(bool *)val;
                 break;
             }
+            case common::DATE:
             case common::INT32: {
                 value_.ival_ = *(int32_t *)val;
                 break;
             }
+            case common::TIMESTAMP:
             case common::INT64: {
                 value_.lval_ = *(int64_t *)val;
                 break;
@@ -84,15 +83,14 @@
                 value_.dval_ = *(double *)val;
                 break;
             }
+            case common::TEXT:
+            case common::BLOB:
             case common::STRING: {
                 value_.strval_ = new common::String();
-                value_.strval_->dup_from(*(common::String *)val, pa);
+                value_.strval_->dup_from(
+                    std::string(static_cast<char *>(val), len), pa);
                 break;
             }
-            // case common::TEXT: {
-            //   value_.sval_ = strdup(val);
-            //   break;
-            // }
             default: {
                 assert(false);
                 std::cout << "unknown data type" << std::endl;
@@ -107,14 +105,13 @@
                 return value_.bval_;
             case common::TSDataType::INT32:
                 return value_.ival_;
+            case common::TSDataType::TIMESTAMP:
             case common::TSDataType::INT64:
                 return value_.lval_;
             case common::TSDataType::FLOAT:
                 return value_.fval_;
             case common::TSDataType::DOUBLE:
                 return value_.dval_;
-            // case common::TSDataType::TEXT :
-            //     return value_.sval_;
             default:
                 std::cout << "unknown data type" << std::endl;
                 break;
@@ -122,8 +119,18 @@
         return -1;  // when data type is unknown
     }
 
+    FORCE_INLINE std::tm get_date_value() {
+        std::tm date_value{};
+        if (type_ == common::DATE) {
+            common::DateConverter::int_to_date(value_.ival_, date_value);
+            return date_value;
+        }
+        return date_value;
+    }
+
     FORCE_INLINE common::String *get_string_value() {
-        if (type_ == common::STRING) {
+        if (type_ == common::STRING || type_ == common::TEXT ||
+            type_ == common::BLOB) {
             return value_.strval_;
         } else {
             return nullptr;
@@ -211,6 +218,7 @@
     FORCE_INLINE void reset() {
         for (uint32_t i = 0; i < col_num_; ++i) {
             if ((*fields_)[i]->type_ == common::TEXT ||
+                (*fields_)[i]->type_ == common::BLOB ||
                 (*fields_)[i]->type_ == common::STRING) {
                 (*fields_)[i]->free_memory();
             }
diff --git a/cpp/src/common/schema.h b/cpp/src/common/schema.h
index 0227aa5..06e7e7e 100644
--- a/cpp/src/common/schema.h
+++ b/cpp/src/common/schema.h
@@ -331,9 +331,7 @@
         }
     }
 
-    size_t get_column_pos_index_num() const {
-        return column_pos_index_.size();
-    }
+    size_t get_column_pos_index_num() const { return column_pos_index_.size(); }
 
     void update(ChunkGroupMeta *chunk_group_meta) {
         for (auto iter = chunk_group_meta->chunk_meta_list_.begin();
diff --git a/cpp/src/common/statistic.h b/cpp/src/common/statistic.h
index 8eb866e..d4d31b9 100644
--- a/cpp/src/common/statistic.h
+++ b/cpp/src/common/statistic.h
@@ -21,6 +21,7 @@
 #define COMMON_STATISTIC_H
 
 #include <inttypes.h>
+
 #include <sstream>
 
 #include "common/allocator/alloc_base.h"
@@ -95,6 +96,16 @@
         }                                       \
     } while (false)
 
+#define TEXT_VALUE_STAT_UPDATE(value)           \
+    do {                                        \
+        if (UNLIKELY(count_ == 0)) {            \
+            first_value_.dup_from(value, *pa_); \
+            last_value_.dup_from(value, *pa_);  \
+        } else {                                \
+            last_value_.dup_from(value, *pa_);  \
+        }                                       \
+    } while (false)
+
 #define NUM_STAT_UPDATE(time, value)    \
     do {                                \
         /* update time */               \
@@ -113,6 +124,22 @@
         count_++;                          \
     } while (false)
 
+#define TEXT_STAT_UPDATE(time, value)    \
+    do {                                 \
+        /* update time */                \
+        TIME_STAT_UPDATE((time));        \
+        /* update string value */        \
+        TEXT_VALUE_STAT_UPDATE((value)); \
+        count_++;                        \
+    } while (false)
+
+#define BLOB_STAT_UPDATE(time, value) \
+    do {                              \
+        /* update time */             \
+        TIME_STAT_UPDATE((time));     \
+        count_++;                     \
+    } while (false)
+
 #define BOOL_STAT_UPDATE(time, value)    \
     do {                                 \
         /* update time */                \
@@ -164,6 +191,11 @@
         ASSERT(false);
         return 0;
     }
+
+    int get_count() const { return count_; }
+
+    int64_t get_end_time() const { return end_time_; }
+
     virtual int deserialize_from(common::ByteStream &in) {
         int ret = common::E_OK;
         if (RET_FAIL(common::SerializationUtil::read_var_uint(
@@ -310,6 +342,66 @@
         return common::E_OK;                                           \
     } while (false)
 
+#define MERGE_TEXT_STAT_FROM(StatType, untyped_stat)                   \
+    do {                                                               \
+        if (UNLIKELY(untyped_stat == nullptr)) {                       \
+            return common::E_INVALID_ARG;                              \
+        }                                                              \
+        StatType *typed_stat = (StatType *)(untyped_stat);             \
+        if (UNLIKELY(typed_stat == nullptr)) {                         \
+            return common::E_TYPE_NOT_MATCH;                           \
+        }                                                              \
+        if (UNLIKELY(typed_stat->count_ == 0)) {                       \
+            return common::E_OK;                                       \
+        }                                                              \
+        if (count_ == 0) {                                             \
+            count_ = typed_stat->count_;                               \
+            start_time_ = typed_stat->start_time_;                     \
+            end_time_ = typed_stat->end_time_;                         \
+            first_value_.dup_from(typed_stat->first_value_, *pa_);     \
+            last_value_.dup_from(typed_stat->last_value_, *pa_);       \
+        } else {                                                       \
+            count_ += typed_stat->count_;                              \
+            if (typed_stat->start_time_ < start_time_) {               \
+                start_time_ = typed_stat->start_time_;                 \
+                first_value_.dup_from(typed_stat->first_value_, *pa_); \
+            }                                                          \
+            if (typed_stat->end_time_ > end_time_) {                   \
+                end_time_ = typed_stat->end_time_;                     \
+                last_value_.dup_from(typed_stat->last_value_, *pa_);   \
+            }                                                          \
+        }                                                              \
+        return common::E_OK;                                           \
+    } while (false)
+
+#define MERGE_BLOB_STAT_FROM(StatType, untyped_stat)       \
+    do {                                                   \
+        if (UNLIKELY(untyped_stat == nullptr)) {           \
+            return common::E_INVALID_ARG;                  \
+        }                                                  \
+        StatType *typed_stat = (StatType *)(untyped_stat); \
+        if (UNLIKELY(typed_stat == nullptr)) {             \
+            return common::E_TYPE_NOT_MATCH;               \
+        }                                                  \
+        if (UNLIKELY(typed_stat->count_ == 0)) {           \
+            return common::E_OK;                           \
+        }                                                  \
+        if (count_ == 0) {                                 \
+            count_ = typed_stat->count_;                   \
+            start_time_ = typed_stat->start_time_;         \
+            end_time_ = typed_stat->end_time_;             \
+        } else {                                           \
+            count_ += typed_stat->count_;                  \
+            if (typed_stat->start_time_ < start_time_) {   \
+                start_time_ = typed_stat->start_time_;     \
+            }                                              \
+            if (typed_stat->end_time_ > end_time_) {       \
+                end_time_ = typed_stat->end_time_;         \
+            }                                              \
+        }                                                  \
+        return common::E_OK;                               \
+    } while (false)
+
 #define MERGE_TIME_STAT_FROM(StatType, untyped_stat)       \
     do {                                                   \
         if (UNLIKELY(untyped_stat == nullptr)) {           \
@@ -395,6 +487,38 @@
         return common::E_OK;                                   \
     } while (false)
 
+#define DEEP_COPY_TEXT_STAT_FROM(StatType, untyped_stat)       \
+    do {                                                       \
+        if (UNLIKELY(untyped_stat == nullptr)) {               \
+            return common::E_INVALID_ARG;                      \
+        }                                                      \
+        StatType *typed_stat = (StatType *)(untyped_stat);     \
+        if (UNLIKELY(typed_stat == nullptr)) {                 \
+            return common::E_TYPE_NOT_MATCH;                   \
+        }                                                      \
+        count_ = typed_stat->count_;                           \
+        start_time_ = typed_stat->start_time_;                 \
+        end_time_ = typed_stat->end_time_;                     \
+        first_value_.dup_from(typed_stat->first_value_, *pa_); \
+        last_value_.dup_from(typed_stat->last_value_, *pa_);   \
+        return common::E_OK;                                   \
+    } while (false)
+
+#define DEEP_COPY_BLOB_STAT_FROM(StatType, untyped_stat)   \
+    do {                                                   \
+        if (UNLIKELY(untyped_stat == nullptr)) {           \
+            return common::E_INVALID_ARG;                  \
+        }                                                  \
+        StatType *typed_stat = (StatType *)(untyped_stat); \
+        if (UNLIKELY(typed_stat == nullptr)) {             \
+            return common::E_TYPE_NOT_MATCH;               \
+        }                                                  \
+        count_ = typed_stat->count_;                       \
+        start_time_ = typed_stat->start_time_;             \
+        end_time_ = typed_stat->end_time_;                 \
+        return common::E_OK;                               \
+    } while (false)
+
 #define DEEP_COPY_TIME_STAT_FROM(StatType, untyped_stat)   \
     do {                                                   \
         if (UNLIKELY(untyped_stat == nullptr)) {           \
@@ -430,7 +554,7 @@
         last_value_ = that.last_value_;
     }
 
-    FORCE_INLINE void reset() { 
+    FORCE_INLINE void reset() {
         count_ = 0;
         sum_value_ = 0;
         first_value_ = false;
@@ -501,7 +625,7 @@
         last_value_ = that.last_value_;
     }
 
-    FORCE_INLINE void reset() { 
+    FORCE_INLINE void reset() {
         count_ = 0;
         sum_value_ = 0;
         min_value_ = 0;
@@ -562,19 +686,19 @@
 
     std::string to_string() const {
         std::ostringstream oss;
-        oss << "{count=" << count_
-            << ", start_time=" << start_time_
-            << ", end_time=" << end_time_
-            << ", first_val=" << first_value_
-            << ", last_val=" << last_value_
-            << ", sum_value=" << sum_value_
-            << ", min_value=" << min_value_
-            << ", max_value=" << max_value_
+        oss << "{count=" << count_ << ", start_time=" << start_time_
+            << ", end_time=" << end_time_ << ", first_val=" << first_value_
+            << ", last_val=" << last_value_ << ", sum_value=" << sum_value_
+            << ", min_value=" << min_value_ << ", max_value=" << max_value_
             << "}";
         return oss.str();
     }
 };
 
+class DateStatistic : public Int32Statistic {
+    FORCE_INLINE common::TSDataType get_type() { return common::DATE; }
+};
+
 class Int64Statistic : public Statistic {
    public:
     double sum_value_;
@@ -602,7 +726,7 @@
         last_value_ = that.last_value_;
     }
 
-    FORCE_INLINE void reset() { 
+    FORCE_INLINE void reset() {
         count_ = 0;
         sum_value_ = 0;
         min_value_ = 0;
@@ -655,14 +779,10 @@
 
     std::string to_string() const {
         std::ostringstream oss;
-        oss << "{count=" << count_
-            << ", start_time=" << start_time_
-            << ", end_time=" << end_time_
-            << ", first_val=" << first_value_
-            << ", last_val=" << last_value_
-            << ", sum_value=" << sum_value_
-            << ", min_value=" << min_value_
-            << ", max_value=" << max_value_
+        oss << "{count=" << count_ << ", start_time=" << start_time_
+            << ", end_time=" << end_time_ << ", first_val=" << first_value_
+            << ", last_val=" << last_value_ << ", sum_value=" << sum_value_
+            << ", min_value=" << min_value_ << ", max_value=" << max_value_
             << "}";
         return oss.str();
     }
@@ -695,7 +815,7 @@
         last_value_ = that.last_value_;
     }
 
-    FORCE_INLINE void reset() { 
+    FORCE_INLINE void reset() {
         count_ = 0;
         sum_value_ = 0;
         min_value_ = 0;
@@ -772,7 +892,7 @@
         last_value_ = that.last_value_;
     }
 
-    FORCE_INLINE void reset() { 
+    FORCE_INLINE void reset() {
         count_ = 0;
         sum_value_ = 0;
         min_value_ = 0;
@@ -840,7 +960,7 @@
         end_time_ = that.end_time_;
     }
 
-    FORCE_INLINE void reset() { 
+    FORCE_INLINE void reset() {
         count_ = 0;
         start_time_ = 0;
         end_time_ = 0;
@@ -865,14 +985,16 @@
 
     std::string to_string() const {
         std::ostringstream oss;
-        oss << "{count=" << count_
-            << ", start_time=" << start_time_
-            << ", end_time=" << end_time_
-            << "}";
+        oss << "{count=" << count_ << ", start_time=" << start_time_
+            << ", end_time=" << end_time_ << "}";
         return oss.str();
     }
 };
 
+class TimestampStatistics : public Int64Statistic {
+    FORCE_INLINE common::TSDataType get_type() { return common::TIMESTAMP; }
+};
+
 class StringStatistic : public Statistic {
    public:
     common::String min_value_;
@@ -897,7 +1019,7 @@
         }
     }
 
-    FORCE_INLINE void reset() { 
+    FORCE_INLINE void reset() {
         count_ = 0;
         start_time_ = 0;
         end_time_ = 0;
@@ -925,27 +1047,26 @@
 
     int serialize_typed_stat(common::ByteStream &out) {
         int ret = common::E_OK;
-        if (RET_FAIL(common::SerializationUtil::write_str(
-                       first_value_, out))) {
-        } else if (RET_FAIL(common::SerializationUtil::write_str(
-                       last_value_, out))) {
-        } else if (RET_FAIL(common::SerializationUtil::write_str(
-                       min_value_, out))) {
-        } else if (RET_FAIL(common::SerializationUtil::write_str(
-                       max_value_, out))) {
+        if (RET_FAIL(common::SerializationUtil::write_str(first_value_, out))) {
+        } else if (RET_FAIL(common::SerializationUtil::write_str(last_value_,
+                                                                 out))) {
+        } else if (RET_FAIL(
+                       common::SerializationUtil::write_str(min_value_, out))) {
+        } else if (RET_FAIL(
+                       common::SerializationUtil::write_str(max_value_, out))) {
         }
         return ret;
     }
     int deserialize_typed_stat(common::ByteStream &in) {
         int ret = common::E_OK;
-        if (RET_FAIL(common::SerializationUtil::read_str(
-                       first_value_, pa_, in))) {
-        } else if (RET_FAIL(common::SerializationUtil::read_str(
-                       last_value_, pa_, in))) {
-        } else if (RET_FAIL(common::SerializationUtil::read_str(
-                       min_value_, pa_, in))) {
-        } else if (RET_FAIL(common::SerializationUtil::read_str(
-                       max_value_, pa_, in))) {
+        if (RET_FAIL(
+                common::SerializationUtil::read_str(first_value_, pa_, in))) {
+        } else if (RET_FAIL(common::SerializationUtil::read_str(last_value_,
+                                                                pa_, in))) {
+        } else if (RET_FAIL(common::SerializationUtil::read_str(min_value_, pa_,
+                                                                in))) {
+        } else if (RET_FAIL(common::SerializationUtil::read_str(max_value_, pa_,
+                                                                in))) {
         }
         return ret;
     }
@@ -960,12 +1081,134 @@
     common::PageArena *pa_;
 };
 
+class TextStatistic : public Statistic {
+   public:
+    common::String first_value_;
+    common::String last_value_;
+    TextStatistic() : first_value_(), last_value_() {
+        pa_ = new common::PageArena();
+        pa_->init(512, common::MOD_STATISTIC_OBJ);
+    }
+
+    TextStatistic(common::PageArena *pa)
+        : first_value_(), last_value_(), pa_(pa) {}
+
+    ~TextStatistic() { destroy(); }
+
+    void destroy() {
+        if (pa_) {
+            delete pa_;
+            pa_ = nullptr;
+        }
+    }
+
+    FORCE_INLINE void reset() {
+        count_ = 0;
+        start_time_ = 0;
+        end_time_ = 0;
+        first_value_ = common::String();
+        last_value_ = common::String();
+    }
+    void clone_from(const TextStatistic &that) {
+        count_ = that.count_;
+        start_time_ = that.start_time_;
+        end_time_ = that.end_time_;
+
+        first_value_.dup_from(that.first_value_, *pa_);
+        last_value_.dup_from(that.last_value_, *pa_);
+    }
+
+    FORCE_INLINE void update(int64_t time, common::String value) {
+        TEXT_STAT_UPDATE(time, value);
+    }
+
+    FORCE_INLINE common::TSDataType get_type() { return common::TEXT; }
+
+    int serialize_typed_stat(common::ByteStream &out) {
+        int ret = common::E_OK;
+        if (RET_FAIL(common::SerializationUtil::write_str(first_value_, out))) {
+        } else if (RET_FAIL(common::SerializationUtil::write_str(last_value_,
+                                                                 out))) {
+        }
+        return ret;
+    }
+    int deserialize_typed_stat(common::ByteStream &in) {
+        int ret = common::E_OK;
+        if (RET_FAIL(
+                common::SerializationUtil::read_str(first_value_, pa_, in))) {
+        } else if (RET_FAIL(common::SerializationUtil::read_str(last_value_,
+                                                                pa_, in))) {
+        }
+        return ret;
+    }
+    int merge_with(Statistic *stat) {
+        MERGE_TEXT_STAT_FROM(TextStatistic, stat);
+    }
+    int deep_copy_from(Statistic *stat) {
+        DEEP_COPY_TEXT_STAT_FROM(TextStatistic, stat);
+    }
+
+   private:
+    common::PageArena *pa_;
+};
+
+class BlobStatistic : public Statistic {
+   public:
+    BlobStatistic() {
+        pa_ = new common::PageArena();
+        pa_->init(512, common::MOD_STATISTIC_OBJ);
+    }
+
+    BlobStatistic(common::PageArena *pa) {}
+
+    ~BlobStatistic() { destroy(); }
+
+    void destroy() {
+        if (pa_) {
+            delete pa_;
+            pa_ = nullptr;
+        }
+    }
+
+    FORCE_INLINE void reset() {
+        count_ = 0;
+        start_time_ = 0;
+        end_time_ = 0;
+    }
+    void clone_from(const BlobStatistic &that) {
+        count_ = that.count_;
+        start_time_ = that.start_time_;
+        end_time_ = that.end_time_;
+    }
+
+    FORCE_INLINE void update(int64_t time, common::String value) {
+        BLOB_STAT_UPDATE(time, value);
+    }
+
+    FORCE_INLINE common::TSDataType get_type() { return common::BLOB; }
+
+    int serialize_typed_stat(common::ByteStream &out) { return common::E_OK; }
+    int deserialize_typed_stat(common::ByteStream &in) { return common::E_OK; }
+    int merge_with(Statistic *stat) {
+        MERGE_BLOB_STAT_FROM(BlobStatistic, stat);
+    }
+    int deep_copy_from(Statistic *stat) {
+        DEEP_COPY_BLOB_STAT_FROM(BlobStatistic, stat);
+    }
+
+   private:
+    common::PageArena *pa_;
+};
+
 FORCE_INLINE uint32_t get_typed_statistic_sizeof(common::TSDataType type) {
     uint32_t ret_size = 0;
     switch (type) {
         case common::BOOLEAN:
             ret_size = sizeof(BooleanStatistic);
             break;
+        case common::DATE:
+            ret_size = sizeof(DateStatistic);
+            break;
         case common::INT32:
             ret_size = sizeof(Int32Statistic);
             break;
@@ -982,7 +1225,13 @@
             ret_size = sizeof(StringStatistic);
             break;
         case common::TEXT:
-            ASSERT(false);
+            ret_size = sizeof(TextStatistic);
+            break;
+        case common::BLOB:
+            ret_size = sizeof(BlobStatistic);
+            break;
+        case common::TIMESTAMP:
+            ret_size = sizeof(TimestampStatistics);
             break;
         case common::VECTOR:
             ret_size = sizeof(TimeStatistic);
@@ -1001,6 +1250,9 @@
         case common::BOOLEAN:
             s = new (buf) BooleanStatistic;
             break;
+        case common::DATE:
+            s = new (buf) DateStatistic;
+            break;
         case common::INT32:
             s = new (buf) Int32Statistic;
             break;
@@ -1017,7 +1269,13 @@
             s = new (buf) StringStatistic;
             break;
         case common::TEXT:
-            ASSERT(false);
+            s = new (buf) TextStatistic;
+            break;
+        case common::BLOB:
+            s = new (buf) BlobStatistic;
+            break;
+        case common::TIMESTAMP:
+            s = new (buf) TimestampStatistics;
             break;
         case common::VECTOR:
             s = new (buf) TimeStatistic;
@@ -1046,6 +1304,9 @@
         case common::BOOLEAN:
             TYPED_CLONE_STATISTIC(BooleanStatistic);
             break;
+        case common::DATE:
+            TYPED_CLONE_STATISTIC(DateStatistic);
+            break;
         case common::INT32:
             TYPED_CLONE_STATISTIC(Int32Statistic);
             break;
@@ -1062,7 +1323,13 @@
             TYPED_CLONE_STATISTIC(StringStatistic);
             break;
         case common::TEXT:
-            ASSERT(false);
+            TYPED_CLONE_STATISTIC(TextStatistic);
+            break;
+        case common::BLOB:
+            TYPED_CLONE_STATISTIC(BlobStatistic);
+            break;
+        case common::TIMESTAMP:
+            TYPED_CLONE_STATISTIC(TimestampStatistics);
             break;
         case common::VECTOR:
             TYPED_CLONE_STATISTIC(TimeStatistic);
@@ -1105,6 +1372,9 @@
             case common::BOOLEAN:
                 ALLOC_STATISTIC(BooleanStatistic);
                 break;
+            case common::DATE:
+                ALLOC_STATISTIC(DateStatistic);
+                break;
             case common::INT32:
                 ALLOC_STATISTIC(Int32Statistic);
                 break;
@@ -1121,7 +1391,13 @@
                 ALLOC_STATISTIC(StringStatistic);
                 break;
             case common::TEXT:
-                ASSERT(false);
+                ALLOC_STATISTIC(TextStatistic);
+                break;
+            case common::BLOB:
+                ALLOC_STATISTIC(BlobStatistic);
+                break;
+            case common::TIMESTAMP:
+                ALLOC_STATISTIC(TimestampStatistics);
                 break;
             case common::VECTOR:
                 ALLOC_STATISTIC(TimeStatistic);
@@ -1156,11 +1432,20 @@
                 ALLOC_HEAP_STATISTIC_WITH_PA(StringStatistic);
                 break;
             case common::TEXT:
-                ASSERT(false);
+                ALLOC_HEAP_STATISTIC_WITH_PA(TextStatistic);
+                break;
+            case common::BLOB:
+                ALLOC_HEAP_STATISTIC_WITH_PA(BlobStatistic);
+                break;
+            case common::TIMESTAMP:
+                ALLOC_STATISTIC_WITH_PA(TimestampStatistics);
                 break;
             case common::VECTOR:
                 ALLOC_STATISTIC_WITH_PA(TimeStatistic);
                 break;
+            case common::DATE:
+                ALLOC_STATISTIC_WITH_PA(DateStatistic);
+                break;
             default:
                 ASSERT(false);
         }
diff --git a/cpp/src/common/tablet.cc b/cpp/src/common/tablet.cc
index 8bc801c..dcb5e06 100644
--- a/cpp/src/common/tablet.cc
+++ b/cpp/src/common/tablet.cc
@@ -21,6 +21,7 @@
 
 #include <cstdlib>
 
+#include "datatype/date_converter.h"
 #include "utils/errno_define.h"
 
 using namespace common;
@@ -52,23 +53,37 @@
             case BOOLEAN:
                 value_matrix_[c].bool_data = (bool *)malloc(
                     get_data_type_size(schema.data_type_) * max_row_num_);
+                memset(value_matrix_[c].bool_data, 0,
+                       get_data_type_size(schema.data_type_) * max_row_num_);
                 break;
+            case DATE:
             case INT32:
                 value_matrix_[c].int32_data = (int32_t *)malloc(
                     get_data_type_size(schema.data_type_) * max_row_num_);
+                memset(value_matrix_[c].int32_data, 0,
+                       get_data_type_size(schema.data_type_) * max_row_num_);
                 break;
+            case TIMESTAMP:
             case INT64:
                 value_matrix_[c].int64_data = (int64_t *)malloc(
                     get_data_type_size(schema.data_type_) * max_row_num_);
+                memset(value_matrix_[c].int64_data, 0,
+                       get_data_type_size(schema.data_type_) * max_row_num_);
                 break;
             case FLOAT:
                 value_matrix_[c].float_data = (float *)malloc(
                     get_data_type_size(schema.data_type_) * max_row_num_);
+                memset(value_matrix_[c].float_data, 0,
+                       get_data_type_size(schema.data_type_) * max_row_num_);
                 break;
             case DOUBLE:
                 value_matrix_[c].double_data = (double *)malloc(
                     get_data_type_size(schema.data_type_) * max_row_num_);
+                memset(value_matrix_[c].double_data, 0,
+                       get_data_type_size(schema.data_type_) * max_row_num_);
                 break;
+            case BLOB:
+            case TEXT:
             case STRING: {
                 value_matrix_[c].string_data =
                     (common::String *)malloc(sizeof(String) * max_row_num_);
@@ -97,9 +112,11 @@
         for (size_t c = 0; c < schema_vec_->size(); c++) {
             const MeasurementSchema &schema = schema_vec_->at(c);
             switch (schema.data_type_) {
+                case DATE:
                 case INT32:
                     free(value_matrix_[c].int32_data);
                     break;
+                case TIMESTAMP:
                 case INT64:
                     free(value_matrix_[c].int64_data);
                     break;
@@ -112,6 +129,8 @@
                 case BOOLEAN:
                     free(value_matrix_[c].bool_data);
                     break;
+                case BLOB:
+                case TEXT:
                 case STRING:
                     free(value_matrix_[c].string_data);
                     break;
@@ -201,10 +220,12 @@
             (value_matrix_[schema_index].bool_data)[row_index] =
                 static_cast<bool>(val);
             break;
+        case common::DATE:
         case common::INT32:
             value_matrix_[schema_index].int32_data[row_index] =
                 static_cast<int32_t>(val);
             break;
+        case common::TIMESTAMP:
         case common::INT64:
             value_matrix_[schema_index].int64_data[row_index] =
                 static_cast<int64_t>(val);
@@ -234,20 +255,28 @@
         ret = common::E_OUT_OF_RANGE;
     } else {
         const MeasurementSchema &schema = schema_vec_->at(schema_index);
-        if (UNLIKELY(GetDataTypeFromTemplateType<T>() != schema.data_type_)) {
-            if (GetDataTypeFromTemplateType<T>() == common::INT32 &&
-                schema.data_type_ == common::INT64) {
-                process_val(row_index, schema_index, static_cast<int64_t>(val));
-            } else if (GetDataTypeFromTemplateType<T>() == common::FLOAT &&
-                       schema.data_type_ == common::DOUBLE) {
-                process_val(row_index, schema_index, static_cast<double>(val));
-            } else {
-                ASSERT(false);
-                return E_TYPE_NOT_MATCH;
-            }
-        } else {
-            process_val(row_index, schema_index, val);
+        auto dic = GetDataTypesFromTemplateType<T>();
+        if (dic.find(schema.data_type_) == dic.end()) {
+            return E_TYPE_NOT_MATCH;
         }
+        process_val(row_index, schema_index, val);
+    }
+    return ret;
+}
+
+template <>
+int Tablet::add_value(uint32_t row_index, uint32_t schema_index, std::tm val) {
+    if (err_code_ != E_OK) {
+        return err_code_;
+    }
+    int ret = common::E_OK;
+    if (UNLIKELY(schema_index >= schema_vec_->size())) {
+        ASSERT(false);
+        ret = common::E_OUT_OF_RANGE;
+    }
+    int32_t date_int;
+    if (RET_SUCC(common::DateConverter::date_to_int(val, date_int))) {
+        process_val(row_index, schema_index, date_int);
     }
     return ret;
 }
@@ -280,7 +309,7 @@
     if (err_code_ != E_OK) {
         return err_code_;
     }
-    SchemaMapIterator find_iter = schema_map_.find(measurement_name);
+    SchemaMapIterator find_iter = schema_map_.find(to_lower(measurement_name));
     if (LIKELY(find_iter == schema_map_.end())) {
         ret = E_INVALID_ARG;
     } else {
@@ -334,22 +363,36 @@
 }
 
 std::shared_ptr<IDeviceID> Tablet::get_device_id(int i) const {
-    std::vector<std::string> id_array;
-    id_array.push_back(insert_target_name_);
+    std::vector<std::string *> id_array;
+    id_array.push_back(new std::string(insert_target_name_));
     for (auto id_column_idx : id_column_indexes_) {
         common::TSDataType data_type = INVALID_DATATYPE;
         void *value_ptr = get_value(i, id_column_idx, data_type);
+        if (value_ptr == nullptr) {
+            id_array.push_back(nullptr);
+            continue;
+        }
         common::String str;
         switch (data_type) {
             case STRING:
                 str = *static_cast<common::String *>(value_ptr);
-                id_array.push_back(str.to_std_string());
+                if (str.buf_ == nullptr || str.len_ == 0) {
+                    id_array.push_back(new std::string());
+                } else {
+                    id_array.push_back(new std::string(str.buf_, str.len_));
+                }
                 break;
             default:
                 break;
         }
     }
-    return std::make_shared<StringArrayDeviceID>(id_array);
+    auto res = std::make_shared<StringArrayDeviceID>(id_array);
+    for (auto &id : id_array) {
+        if (id != nullptr) {
+            delete id;
+        }
+    }
+    return res;
 }
 
 }  // end namespace storage
\ No newline at end of file
diff --git a/cpp/src/common/tablet.h b/cpp/src/common/tablet.h
index b036cca..b30fc51 100644
--- a/cpp/src/common/tablet.h
+++ b/cpp/src/common/tablet.h
@@ -38,10 +38,12 @@
 class TabletColIterator;
 
 /**
- * @brief Represents a collection of data rows with associated metadata for insertion into a table.
+ * @brief Represents a collection of data rows with associated metadata for
+ * insertion into a table.
  *
- * This class is used to manage and organize data that will be inserted into a specific target table.
- * It handles the storage of timestamps and values, along with their associated metadata such as column names and types.
+ * This class is used to manage and organize data that will be inserted into a
+ * specific target table. It handles the storage of timestamps and values, along
+ * with their associated metadata such as column names and types.
  */
 class Tablet {
     struct ValueMatrixEntry {
@@ -106,7 +108,8 @@
                        [](const std::string &name, common::TSDataType type) {
                            return MeasurementSchema(name, type);
                        });
-        schema_vec_ = std::make_shared<std::vector<MeasurementSchema>>(measurement_vec);
+        schema_vec_ =
+            std::make_shared<std::vector<MeasurementSchema>>(measurement_vec);
         err_code_ = init();
     }
 
@@ -124,7 +127,8 @@
         schema_vec_ = std::make_shared<std::vector<MeasurementSchema>>();
         for (size_t i = 0; i < column_names.size(); i++) {
             schema_vec_->emplace_back(
-                MeasurementSchema(column_names[i], data_types[i], common::get_value_encoder(data_types[i]),
+                MeasurementSchema(column_names[i], data_types[i],
+                                  common::get_value_encoder(data_types[i]),
                                   common::get_default_compressor()));
         }
         set_column_categories(column_categories);
@@ -134,34 +138,33 @@
     /**
      * @brief Constructs a Tablet object with the given parameters.
      *
-     * @param column_names A vector containing the names of the columns in the tablet.
-     *                     Each name corresponds to a column in the target table.
+     * @param column_names A vector containing the names of the columns in the
+     * tablet. Each name corresponds to a column in the target table.
      * @param data_types A vector containing the data types of each column.
      *                   These must match the schema of the target table.
-     * @param max_rows The maximum number of rows that this tablet can hold. Defaults to DEFAULT_MAX_ROWS.
+     * @param max_rows The maximum number of rows that this tablet can hold.
+     * Defaults to DEFAULT_MAX_ROWS.
      */
     Tablet(const std::vector<std::string> &column_names,
-       const std::vector<common::TSDataType> &data_types,
-       uint32_t max_rows = DEFAULT_MAX_ROWS)
-    : max_row_num_(max_rows),
-      cur_row_size_(0),
-      timestamps_(nullptr),
-      value_matrix_(nullptr),
-      bitmaps_(nullptr) {
+           const std::vector<common::TSDataType> &data_types,
+           uint32_t max_rows = DEFAULT_MAX_ROWS)
+        : max_row_num_(max_rows),
+          cur_row_size_(0),
+          timestamps_(nullptr),
+          value_matrix_(nullptr),
+          bitmaps_(nullptr) {
         schema_vec_ = std::make_shared<std::vector<MeasurementSchema>>();
         for (size_t i = 0; i < column_names.size(); i++) {
-            schema_vec_->emplace_back(
-                column_names[i], data_types[i], common::get_value_encoder(data_types[i]),
-                                  common::get_default_compressor());
+            schema_vec_->emplace_back(column_names[i], data_types[i],
+                                      common::get_value_encoder(data_types[i]),
+                                      common::get_default_compressor());
         }
         err_code_ = init();
     }
 
     ~Tablet() { destroy(); }
 
-    const std::string& get_table_name() const{
-        return insert_target_name_;
-    }
+    const std::string &get_table_name() const { return insert_target_name_; }
     void set_table_name(const std::string &table_name) {
         insert_target_name_ = table_name;
     }
@@ -171,8 +174,8 @@
     /**
      * @brief Adds a timestamp to the specified row.
      *
-     * @param row_index The index of the row to which the timestamp will be added.
-     *                  Must be less than the maximum number of rows.
+     * @param row_index The index of the row to which the timestamp will be
+     * added. Must be less than the maximum number of rows.
      * @param timestamp The timestamp value to add.
      * @return Returns 0 on success, or a non-zero error code on failure.
      */
@@ -181,12 +184,14 @@
     void *get_value(int row_index, uint32_t schema_index,
                     common::TSDataType &data_type) const;
     /**
-     * @brief Template function to add a value of type T to the specified row and column.
+     * @brief Template function to add a value of type T to the specified row
+     * and column.
      *
      * @tparam T The type of the value to add.
      * @param row_index The index of the row to which the value will be added.
      *                  Must be less than the maximum number of rows.
-     * @param schema_index The index of the column schema corresponding to the value being added.
+     * @param schema_index The index of the column schema corresponding to the
+     * value being added.
      * @param val The value to add.
      * @return Returns 0 on success, or a non-zero error code on failure.
      */
@@ -197,13 +202,14 @@
         const std::vector<common::ColumnCategory> &column_categories);
     std::shared_ptr<IDeviceID> get_device_id(int i) const;
     /**
-     * @brief Template function to add a value of type T to the specified row and column by name.
+     * @brief Template function to add a value of type T to the specified row
+     * and column by name.
      *
      * @tparam T The type of the value to add.
      * @param row_index The index of the row to which the value will be added.
      *                  Must be less than the maximum number of rows.
-     * @param measurement_name The name of the column to which the value will be added.
-     *                         Must match one of the column names provided during construction.
+     * @param measurement_name The name of the column to which the value will be
+     * added. Must match one of the column names provided during construction.
      * @param val The value to add.
      * @return Returns 0 on success, or a non-zero error code on failure.
      */
@@ -211,7 +217,8 @@
     int add_value(uint32_t row_index, const std::string &measurement_name,
                   T val);
 
-    FORCE_INLINE const std::string &get_column_name(uint32_t column_index) const {
+    FORCE_INLINE const std::string &get_column_name(
+        uint32_t column_index) const {
         return schema_vec_->at(column_index).measurement_name_;
     }
 
@@ -219,7 +226,7 @@
         schema_vec_->at(column_index).measurement_name_ = name;
     }
 
-    const std::map<std::string, int>& get_schema_map() const {
+    const std::map<std::string, int> &get_schema_map() const {
         return schema_map_;
     }
 
diff --git a/cpp/src/common/tsblock/tsblock.cc b/cpp/src/common/tsblock/tsblock.cc
index f290e3f..c6a675c 100644
--- a/cpp/src/common/tsblock/tsblock.cc
+++ b/cpp/src/common/tsblock/tsblock.cc
@@ -51,9 +51,11 @@
 int TsBlock::build_vector(common::TSDataType type, uint32_t row_count) {
     Vector *vec;
     int ret = 0;
-    if (LIKELY(type != common::TEXT && type != common::STRING)) {
+    if (LIKELY(type != common::TEXT && type != common::STRING &&
+               type != common::BLOB)) {
         vec = new FixedLengthVector(type, row_count, get_len(type), this);
-    } else if (type == common::TEXT || type == common::STRING) {
+    } else if (type == common::TEXT || type == common::STRING ||
+               type == common::BLOB) {
         vec = new VariableLengthVector(
             type, row_count, DEFAULT_RESERVED_SIZE_OF_TEXT + TEXT_LEN, this);
     } else {
@@ -72,24 +74,24 @@
     switch (type) {
         case common::INT64: {
             int64_t ival = *reinterpret_cast<int64_t *>(val);
-            strval = to_string(ival);
+            strval = std::to_string(ival);
             break;
         }
         case common::INT32: {
             int32_t ival = *reinterpret_cast<int32_t *>(val);
-            strval = to_string(ival);
+            strval = std::to_string(ival);
             break;
         }
         case common::FLOAT: {
             float ival = *reinterpret_cast<float *>(
                 val);  // cppcheck-suppress invalidPointerCast
-            strval = to_string(ival);
+            strval = std::to_string(ival);
             break;
         }
         case common::DOUBLE: {
             double ival = *reinterpret_cast<double *>(
                 val);  // cppcheck-suppress invalidPointerCast
-            strval = to_string(ival);
+            strval = std::to_string(ival);
             break;
         }
         case common::BOOLEAN: {
@@ -246,7 +248,8 @@
         if (is_null) {
             out << "NULL";
         } else {
-            ColumnSchema &col_schema = tsblock_->tuple_desc_->get_column_schema(i);
+            ColumnSchema &col_schema =
+                tsblock_->tuple_desc_->get_column_schema(i);
             switch (col_schema.data_type_) {
                 case common::BOOLEAN: {
                     out << *static_cast<bool *>(value);
diff --git a/cpp/src/common/tsblock/tsblock.h b/cpp/src/common/tsblock/tsblock.h
index 47f110c..a0e9439 100644
--- a/cpp/src/common/tsblock/tsblock.h
+++ b/cpp/src/common/tsblock/tsblock.h
@@ -202,8 +202,16 @@
 
     FORCE_INLINE uint32_t get_col_row_count() { return column_row_count_; }
     FORCE_INLINE uint32_t get_column_index() { return column_index_; }
-    FORCE_INLINE int fill(const char *value, uint32_t len,
-                           uint32_t end_index) {
+    FORCE_INLINE int fill_null(uint32_t end_index) {
+        while (column_row_count_ < end_index) {
+            if (!add_row()) {
+                return E_INVALID_ARG;
+            }
+            append_null();
+        }
+        return E_OK;
+    }
+    FORCE_INLINE int fill(const char *value, uint32_t len, uint32_t end_index) {
         while (column_row_count_ < end_index) {
             if (!add_row()) {
                 return E_INVALID_ARG;
@@ -249,6 +257,13 @@
         }
     }
 
+    FORCE_INLINE void next(size_t ind) const {
+        ASSERT(row_id_ < tsblock_->row_count_);
+        tsblock_->vectors_[ind]->update_offset();
+    }
+
+    FORCE_INLINE void update_row_id() { row_id_++; }
+
     FORCE_INLINE char *read(uint32_t column_index, uint32_t *__restrict len,
                             bool *__restrict null) {
         ASSERT(column_index < column_count_);
@@ -278,8 +293,10 @@
     FORCE_INLINE bool end() const { return row_id_ >= tsblock_->row_count_; }
 
     FORCE_INLINE void next() {
+        if (!vec_->is_null(row_id_)) {
+            vec_->update_offset();
+        }
         ++row_id_;
-        vec_->update_offset();
     }
 
     FORCE_INLINE bool has_null() { return vec_->has_null(); }
diff --git a/cpp/src/common/tsblock/tuple_desc.cc b/cpp/src/common/tsblock/tuple_desc.cc
index be0abf7..f0550eb 100644
--- a/cpp/src/common/tsblock/tuple_desc.cc
+++ b/cpp/src/common/tsblock/tuple_desc.cc
@@ -28,10 +28,12 @@
                 total_len += sizeof(bool);
                 break;
             }
+            case common::DATE:
             case common::INT32: {
                 total_len += sizeof(int32_t);
                 break;
             }
+            case common::TIMESTAMP:
             case common::INT64: {
                 total_len += sizeof(int64_t);
                 break;
@@ -44,14 +46,12 @@
                 total_len += sizeof(double);
                 break;
             }
+            case common::TEXT:
+            case common::BLOB:
             case common::STRING: {
                 total_len += DEFAULT_RESERVED_SIZE_OF_STRING + STRING_LEN;
                 break;
             }
-            case common::TEXT: {
-                total_len += DEFAULT_RESERVED_SIZE_OF_TEXT + TEXT_LEN;
-                break;
-            }
             default: {
                 // log_err("TsBlock::BuildVector unknown type %d",
                 // static_cast<int>(column_list_[i].type_));
@@ -68,9 +68,11 @@
         case common::BOOLEAN: {
             return sizeof(bool);
         }
+        case common::DATE:
         case common::INT32: {
             return sizeof(int32_t);
         }
+        case common::TIMESTAMP:
         case common::INT64: {
             return sizeof(int64_t);
         }
diff --git a/cpp/src/common/tsblock/tuple_desc.h b/cpp/src/common/tsblock/tuple_desc.h
index 98bd341..85ba130 100644
--- a/cpp/src/common/tsblock/tuple_desc.h
+++ b/cpp/src/common/tsblock/tuple_desc.h
@@ -71,6 +71,11 @@
         return column_list_[index].data_type_;
     }
 
+    FORCE_INLINE common::ColumnCategory get_column_category(
+        const uint32_t index) const {
+        return column_list_[index].column_category_;
+    }
+
     FORCE_INLINE std::string get_column_name(uint32_t index) {
         return column_list_[index].column_name_;
     }
diff --git a/cpp/src/common/tsblock/vector/variable_length_vector.h b/cpp/src/common/tsblock/vector/variable_length_vector.h
index bec599a..23ebb75 100644
--- a/cpp/src/common/tsblock/vector/variable_length_vector.h
+++ b/cpp/src/common/tsblock/vector/variable_length_vector.h
@@ -61,6 +61,7 @@
             *null = nulls_.test(rowid);
         } else {
             *null = false;
+            *len = 0;
         }
         if (LIKELY(!(*null))) {
             char *result = values_.read(offset_, len);
diff --git a/cpp/src/common/tsfile_common.cc b/cpp/src/common/tsfile_common.cc
index 17c3d47..31c718b 100644
--- a/cpp/src/common/tsfile_common.cc
+++ b/cpp/src/common/tsfile_common.cc
@@ -29,8 +29,7 @@
 namespace storage {
 
 const char *MAGIC_STRING_TSFILE = "TsFile";
-const int MAGIC_STRING_TSFILE_LEN = 6;
-const char VERSION_NUM_BYTE = 0x04;//0x03;
+const char VERSION_NUM_BYTE = 0x04;  // 0x03;
 const char CHUNK_GROUP_HEADER_MARKER = 0;
 const char CHUNK_HEADER_MARKER = 1;
 const char ONLY_ONE_PAGE_CHUNK_HEADER_MARKER = 5;
@@ -104,13 +103,14 @@
             chunk_meta_iter_++;
         }
         if (!tmp.empty()) {
-            tsm_chunk_meta_info_[chunk_group_meta_iter_.get()
-                                     ->device_id_] = tmp;
+            tsm_chunk_meta_info_[chunk_group_meta_iter_.get()->device_id_] =
+                tmp;
         }
 
         chunk_group_meta_iter_++;
     }
-    if (!tsm_chunk_meta_info_.empty() && !tsm_chunk_meta_info_.begin()->second.empty()) {
+    if (!tsm_chunk_meta_info_.empty() &&
+        !tsm_chunk_meta_info_.begin()->second.empty()) {
         tsm_measurement_iter_ = tsm_chunk_meta_info_.begin()->second.begin();
     }
     tsm_device_iter_ = tsm_chunk_meta_info_.begin();
@@ -121,7 +121,8 @@
     return tsm_device_iter_ != tsm_chunk_meta_info_.end();
 }
 
-int TSMIterator::get_next(std::shared_ptr<IDeviceID> &ret_device_id, String &ret_measurement_name,
+int TSMIterator::get_next(std::shared_ptr<IDeviceID> &ret_device_id,
+                          String &ret_measurement_name,
                           TimeseriesIndex &ret_ts_index) {
     int ret = E_OK;
     SimpleList<ChunkMeta *> chunk_meta_list_of_this_ts(
@@ -153,7 +154,6 @@
     ret_ts_index.set_data_type(data_type);
     ret_ts_index.init_statistic(data_type);
 
-
     SimpleList<ChunkMeta *>::Iterator ts_chunk_meta_iter =
         chunk_meta_list_of_this_ts.begin();
     for (;
@@ -199,9 +199,10 @@
     }
 
     common::SerializationUtil::write_var_int(tsfile_properties_.size(), out);
-    for (const auto& tsfile_property : tsfile_properties_) {
+    for (const auto &tsfile_property : tsfile_properties_) {
         common::SerializationUtil::write_var_str(tsfile_property.first, out);
-        common::SerializationUtil::write_var_str(tsfile_property.second, out);
+        common::SerializationUtil::write_var_char_ptr(tsfile_property.second,
+                                                      out);
     }
 
     return out.total_size() - start_idx;
@@ -250,24 +251,26 @@
     int32_t tsfile_properties_size = 0;
     common::SerializationUtil::read_var_int(tsfile_properties_size, in);
     for (int i = 0; i < tsfile_properties_size; i++) {
-        std::string key, value;
+        std::string key, *value;
         common::SerializationUtil::read_var_str(key, in);
-        common::SerializationUtil::read_var_str(value, in);
-        tsfile_properties_.emplace(key, std::move(value));
+        common::SerializationUtil::read_var_char_ptr(value, in);
+        tsfile_properties_.emplace(key, value);
     }
     return ret;
 }
 
 /* ================ MetaIndexNode ================ */
-int MetaIndexNode::binary_search_children(std::shared_ptr<IComparable> key, bool exact_search,
-                                          std::shared_ptr<IMetaIndexEntry> &ret_index_entry,
-                                          int64_t &ret_end_offset) {
+int MetaIndexNode::binary_search_children(
+    std::shared_ptr<IComparable> key, bool exact_search,
+    std::shared_ptr<IMetaIndexEntry> &ret_index_entry,
+    int64_t &ret_end_offset) {
 #if DEBUG_SE
     std::cout << "MetaIndexNode::binary_search_children start, name=" << key
               << ", exact_search=" << exact_search
               << ", children_.size=" << children_.size() << std::endl;
     for (int i = 0; i < (int)children_.size(); i++) {
-        std::cout << "Iterating children: " << children_[i]->get_name() << std::endl;
+        std::cout << "Iterating children: " << children_[i]->get_name()
+                  << std::endl;
     }
 #endif
     bool is_aligned = false;
@@ -298,7 +301,7 @@
                 break;
             } else if (cmp > 0) {  // children_[m] > name
                 h = m;
-            } else {               // children_[m] < name
+            } else {  // children_[m] < name
                 l = m;
             }
         }
diff --git a/cpp/src/common/tsfile_common.h b/cpp/src/common/tsfile_common.h
index 878fc4d..dd22ca4 100644
--- a/cpp/src/common/tsfile_common.h
+++ b/cpp/src/common/tsfile_common.h
@@ -40,7 +40,7 @@
 namespace storage {
 
 extern const char *MAGIC_STRING_TSFILE;
-extern const int MAGIC_STRING_TSFILE_LEN;
+constexpr int MAGIC_STRING_TSFILE_LEN = 6;
 extern const char VERSION_NUM_BYTE;
 extern const char CHUNK_GROUP_HEADER_MARKER;
 extern const char CHUNK_HEADER_MARKER;
@@ -621,10 +621,10 @@
                  common::String &ret_measurement_name,
                  TimeseriesIndex &ret_ts_index);
 
-private:
-  common::SimpleList<ChunkGroupMeta *> &chunk_group_meta_list_;
-  common::SimpleList<ChunkGroupMeta *>::Iterator chunk_group_meta_iter_;
-  common::SimpleList<ChunkMeta *>::Iterator chunk_meta_iter_;
+   private:
+    common::SimpleList<ChunkGroupMeta *> &chunk_group_meta_list_;
+    common::SimpleList<ChunkGroupMeta *>::Iterator chunk_group_meta_iter_;
+    common::SimpleList<ChunkMeta *>::Iterator chunk_meta_iter_;
 
     // timeseries measurenemnt chunk meta info
     // map <device_name, <measurement_name, vector<chunk_meta>>>
@@ -651,10 +651,10 @@
     virtual int compare(const IComparable &other) {
         if (this->operator<(other)) {
             return -1;
-        } else if (this->operator>(other)) {
-            return 1;
-        } else {
+        } else if (this->operator==(other)) {
             return 0;
+        } else {
+            return 1;
         }
     }
     virtual std::string to_string() const = 0;
@@ -670,24 +670,22 @@
         const auto *other_device =
             dynamic_cast<const DeviceIDComparable *>(&other);
         if (!other_device) throw std::runtime_error("Incompatible comparison");
-        return device_id_->get_device_name() <
-               other_device->device_id_->get_device_name();
+        return *device_id_ < *other_device->device_id_;
     }
 
     bool operator>(const IComparable &other) const override {
         const auto *other_device =
             dynamic_cast<const DeviceIDComparable *>(&other);
         if (!other_device) throw std::runtime_error("Incompatible comparison");
-        return device_id_->get_device_name() >
-               other_device->device_id_->get_device_name();
+        return *device_id_ != *other_device->device_id_ &&
+               !(*device_id_ < *other_device->device_id_);
     }
 
     bool operator==(const IComparable &other) const override {
         const auto *other_device =
             dynamic_cast<const DeviceIDComparable *>(&other);
         if (!other_device) throw std::runtime_error("Incompatible comparison");
-        return device_id_->get_device_name() ==
-               other_device->device_id_->get_device_name();
+        return *device_id_ == *other_device->device_id_;
     }
 
     std::string to_string() const override {
@@ -768,6 +766,12 @@
 
     ~DeviceMetaIndexEntry() override = default;
 
+    static void self_deleter(DeviceMetaIndexEntry *ptr) {
+        if (ptr) {
+            ptr->~DeviceMetaIndexEntry();
+        }
+    }
+
     int serialize_to(common::ByteStream &out) override {
         int ret = common::E_OK;
         if (RET_FAIL(device_id_->serialize(out))) {
@@ -908,10 +912,10 @@
         }
     }
 
-    int binary_search_children(std::shared_ptr<IComparable> key,
-                               bool exact_search,
-                               std::shared_ptr<IMetaIndexEntry> &ret_index_entry,
-                               int64_t &ret_end_offset);
+    int binary_search_children(
+        std::shared_ptr<IComparable> key, bool exact_search,
+        std::shared_ptr<IMetaIndexEntry> &ret_index_entry,
+        int64_t &ret_end_offset);
 
     int serialize_to(common::ByteStream &out) {
         int ret = common::E_OK;
@@ -1000,9 +1004,9 @@
             if (IS_NULL(entry_buf)) {
                 return common::E_OOM;
             }
-            // auto entry = new (entry_buf) DeviceMetaIndexEntry;
-            auto entry = std::make_shared<DeviceMetaIndexEntry>();
-
+            auto *entry_ptr = new (entry_buf) DeviceMetaIndexEntry();
+            auto entry = std::shared_ptr<DeviceMetaIndexEntry>(
+                entry_ptr, DeviceMetaIndexEntry::self_deleter);
             if (RET_FAIL(entry->deserialize_from(in, pa_))) {
             } else {
                 children_.push_back(entry);
@@ -1069,7 +1073,7 @@
         DeviceNodeMap;
     std::map<std::string, std::shared_ptr<MetaIndexNode>>
         table_metadata_index_node_map_;
-    std::unordered_map<std::string, std::string> tsfile_properties_;
+    std::unordered_map<std::string, std::string *> tsfile_properties_;
     typedef std::unordered_map<std::string, std::shared_ptr<TableSchema>>
         TableSchemasMap;
     TableSchemasMap table_schemas_;
@@ -1107,6 +1111,11 @@
         if (bloom_filter_ != nullptr) {
             bloom_filter_->destroy();
         }
+        for (auto properties : tsfile_properties_) {
+            if (properties.second != nullptr) {
+                delete properties.second;
+            }
+        }
         table_metadata_index_node_map_.clear();
         table_schemas_.clear();
     }
diff --git a/cpp/src/compress/gzip_compressor.cc b/cpp/src/compress/gzip_compressor.cc
index 4586975..9b4f9cd 100644
--- a/cpp/src/compress/gzip_compressor.cc
+++ b/cpp/src/compress/gzip_compressor.cc
@@ -19,271 +19,248 @@
 
 #include "gzip_compressor.h"
 
-using  namespace common;
+using namespace common;
 
+namespace storage {
 
+GzipCompressor::GzipCompressor() : compressed_buf() { zstream_valid_ = false; }
 
-namespace storage
-{
+GzipCompressor::~GzipCompressor() { end_zstream(); }
 
-GzipCompressor::GzipCompressor() : compressed_buf()
-{
-  zstream_valid_ = false;
+int GzipCompressor::reset() {
+    int ret = E_OK;
+    if (RET_FAIL(end_zstream())) {
+    } else if (RET_FAIL(init_zstream())) {
+    }
+    return ret;
 }
 
-GzipCompressor::~GzipCompressor()
-{
-  end_zstream();
-}
+int GzipCompressor::init_zstream() {
+    if (zstream_valid_) {
+        return E_OK;
+    }
+    compress_stream_.zalloc = (alloc_func)0;  // Z_NULL
+    compress_stream_.zfree = (free_func)0;
+    compress_stream_.opaque = (voidpf)0;
+    compress_stream_.next_in = 0;
+    compress_stream_.avail_in = 0;
+    compress_stream_.next_out = 0;
+    compress_stream_.avail_out = 0;
 
-int GzipCompressor::reset()
-{
-  int ret = E_OK;
-  if (RET_FAIL(end_zstream())) {
-  } else if (RET_FAIL(init_zstream())) {
-  }
-  return ret;
-}
+    memset(compressed_buf, 0, DEFLATE_BUFFER_SIZE);
 
-int GzipCompressor::init_zstream()
-{
-  if (zstream_valid_) {
+    if (deflateInit2(&compress_stream_, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 31,
+                     8, Z_DEFAULT_STRATEGY) != Z_OK) {
+        // log_err("gzip deflateInit2 failed");
+        return E_COMPRESS_ERR;
+    }
+    zstream_valid_ = true;
     return E_OK;
-  }
-  compress_stream_.zalloc = (alloc_func)0;  // Z_NULL
-  compress_stream_.zfree = (free_func)0;
-  compress_stream_.opaque = (voidpf)0;
-  compress_stream_.next_in = 0;
-  compress_stream_.avail_in = 0;
-  compress_stream_.next_out = 0;
-  compress_stream_.avail_out = 0;
-
-  memset(compressed_buf, 0, DEFLATE_BUFFER_SIZE);
-
-  if (deflateInit2(&compress_stream_,
-                   Z_DEFAULT_COMPRESSION,
-                   Z_DEFLATED,
-                   31,
-                   8,
-                   Z_DEFAULT_STRATEGY) != Z_OK) {
-    //log_err("gzip deflateInit2 failed");
-    return E_COMPRESS_ERR;
-  }
-  zstream_valid_ = true;
-  return E_OK;
 }
 
-int GzipCompressor::end_zstream()
-{
-  if (!zstream_valid_) {
+int GzipCompressor::end_zstream() {
+    if (!zstream_valid_) {
+        return E_OK;
+    }
+    if (deflateEnd(&compress_stream_) != Z_OK) {
+        // log_err("deflateEnd failed");
+        return E_COMPRESS_ERR;
+    }
+    zstream_valid_ = false;
     return E_OK;
-  }
-  if(deflateEnd(&compress_stream_) != Z_OK) {
-    //log_err("deflateEnd failed");
-    return E_COMPRESS_ERR;
-  }
-  zstream_valid_ = false;
-  return E_OK;
 }
 
 int GzipCompressor::compress_into_bytestream(char *uncompressed_buf,
                                              uint32_t uncompressed_buf_len,
-                                             ByteStream &out)
-{
-  int ret = Z_OK;
+                                             ByteStream &out) {
+    int ret = Z_OK;
 
-  compress_stream_.next_in = (Bytef *)uncompressed_buf;
-  compress_stream_.avail_in = uncompressed_buf_len;
-  compress_stream_.next_out = (Bytef *)compressed_buf;
-  compress_stream_.avail_out = DEFLATE_BUFFER_SIZE;
+    compress_stream_.next_in = (Bytef *)uncompressed_buf;
+    compress_stream_.avail_in = uncompressed_buf_len;
+    compress_stream_.next_out = (Bytef *)compressed_buf;
+    compress_stream_.avail_out = DEFLATE_BUFFER_SIZE;
 
-  if (uncompressed_buf == nullptr || uncompressed_buf_len == 0) {  // no more
-    if (compress_stream_.next_out) {
-      while (ret != Z_STREAM_END) {
-        ret = deflate(&compress_stream_, Z_FINISH);
-        if(ret != Z_OK && ret != Z_STREAM_END) {
-          //log_err("deflate failed");
-          return E_COMPRESS_ERR;
+    if (uncompressed_buf == nullptr || uncompressed_buf_len == 0) {  // no more
+        if (compress_stream_.next_out) {
+            while (ret != Z_STREAM_END) {
+                ret = deflate(&compress_stream_, Z_FINISH);
+                if (ret != Z_OK && ret != Z_STREAM_END) {
+                    // log_err("deflate failed");
+                    return E_COMPRESS_ERR;
+                }
+                out.write_buf(compressed_buf,
+                              DEFLATE_BUFFER_SIZE - compress_stream_.avail_out);
+                compress_stream_.next_out = (Bytef *)compressed_buf;
+                compress_stream_.avail_out = DEFLATE_BUFFER_SIZE;
+            }
         }
-        out.write_buf(compressed_buf, DEFLATE_BUFFER_SIZE -
-        compress_stream_.avail_out); compress_stream_.next_out = (Bytef
-        *)compressed_buf; compress_stream_.avail_out = DEFLATE_BUFFER_SIZE;
-      }
+        return E_OK;
     }
+
+    for (;;) {
+        ret = deflate(&compress_stream_, Z_NO_FLUSH);
+        if (ret != Z_OK) {
+            // log_err("deflate failed");
+            return E_COMPRESS_ERR;
+        }
+
+        if (compress_stream_.avail_in == 0) {  // current input data are all
+            out.write_buf(compressed_buf,
+                          DEFLATE_BUFFER_SIZE - compress_stream_.avail_out);
+            compress_stream_.next_out = (Bytef *)compressed_buf;
+            compress_stream_.avail_out = DEFLATE_BUFFER_SIZE;
+            break;
+        } else if (compress_stream_.avail_out ==
+                   0) {  // no more space for output
+            out.write_buf(compressed_buf, DEFLATE_BUFFER_SIZE);
+            compress_stream_.next_out = (Bytef *)compressed_buf;
+            compress_stream_.avail_out = DEFLATE_BUFFER_SIZE;
+        }
+    }
+
     return E_OK;
-  }
-
-  for (;;) {
-    ret = deflate(&compress_stream_, Z_NO_FLUSH);
-    if (ret != Z_OK) {
-      //log_err("deflate failed");
-      return E_COMPRESS_ERR;
-    }
-
-    if (compress_stream_.avail_in == 0) {  // current input data are all
-      out.write_buf(compressed_buf, DEFLATE_BUFFER_SIZE -
-      compress_stream_.avail_out); compress_stream_.next_out = (Bytef
-      *)compressed_buf; compress_stream_.avail_out = DEFLATE_BUFFER_SIZE;
-      break;
-    }
-    else if (compress_stream_.avail_out == 0) {  // no more space for output
-      out.write_buf(compressed_buf, DEFLATE_BUFFER_SIZE);
-      compress_stream_.next_out = (Bytef *)compressed_buf;
-      compress_stream_.avail_out = DEFLATE_BUFFER_SIZE;
-    }
-  }
-
-  return E_OK;
 }
 
 int GzipCompressor::compress(char *uncompressed_buf,
                              uint32_t uncompressed_buf_len,
                              char *&compressed_buf,
-                             uint32_t &compressed_buf_len)
-{
-  int ret = E_OK;
-  ByteStream out(DEFLATE_BUFFER_SIZE, MOD_COMPRESSOR_OBJ);
-  if (RET_FAIL(compress_into_bytestream(uncompressed_buf,
-  uncompressed_buf_len, out))) {
+                             uint32_t &compressed_buf_len) {
+    int ret = E_OK;
+    ByteStream out(DEFLATE_BUFFER_SIZE, MOD_COMPRESSOR_OBJ);
+    if (RET_FAIL(compress_into_bytestream(uncompressed_buf,
+                                          uncompressed_buf_len, out))) {
+        return ret;
+    }
+    if (RET_FAIL(compress_into_bytestream(nullptr, 0, out))) {
+        return ret;
+    }
+    compressed_buf = get_bytes_from_bytestream(out);
+    compressed_buf_len = out.total_size();
+    out.destroy();
     return ret;
-  }
-  if (RET_FAIL(compress_into_bytestream(nullptr, 0, out))) {
+}
+
+GzipDeCompressor::GzipDeCompressor() : decompressed_buf() {
+    zstream_valid_ = false;
+}
+
+GzipDeCompressor::~GzipDeCompressor() { end_zstream(); }
+
+int GzipDeCompressor::init_zstream() {
+    if (zstream_valid_) {
+        return E_OK;
+    }
+    decompress_stream_.zalloc = (alloc_func)0;  // Z_NULL
+    decompress_stream_.zfree = (free_func)0;
+    decompress_stream_.opaque = (voidpf)0;
+    decompress_stream_.next_in = 0;
+    decompress_stream_.avail_in = 0;
+    decompress_stream_.next_out = 0;
+    decompress_stream_.avail_out = 0;
+
+    memset(decompressed_buf, 0, INFLATE_BUFFER_SIZE);
+
+    if (inflateInit2(&decompress_stream_, 31) != Z_OK) {
+        // log_err("inflateInit2 failed");
+        return E_COMPRESS_ERR;
+    }
+    zstream_valid_ = true;
+    return E_OK;
+}
+
+int GzipDeCompressor::end_zstream() {
+    if (!zstream_valid_) {
+        return E_OK;
+    }
+    if (inflateEnd(&decompress_stream_) != Z_OK) {
+        // log_err("inflateEnd failed");
+        return E_COMPRESS_ERR;
+    }
+    zstream_valid_ = false;
+    return E_OK;
+}
+
+int GzipDeCompressor::reset() {
+    int ret = E_OK;
+    if (RET_FAIL(end_zstream())) {
+    } else if (RET_FAIL(init_zstream())) {
+    }
     return ret;
-  }
-  compressed_buf = get_bytes_from_bytestream(out);
-  compressed_buf_len = out.total_size();
-  out.destroy();
-  return ret;
-}
-
-GzipDeCompressor::GzipDeCompressor() : decompressed_buf()
-{
-  zstream_valid_ = false;
-}
-
-GzipDeCompressor::~GzipDeCompressor()
-{
-  end_zstream();
-}
-
-int GzipDeCompressor::init_zstream()
-{
-  if (zstream_valid_) {
-    return E_OK;
-  }
-  decompress_stream_.zalloc = (alloc_func)0;  // Z_NULL
-  decompress_stream_.zfree = (free_func)0;
-  decompress_stream_.opaque = (voidpf)0;
-  decompress_stream_.next_in = 0;
-  decompress_stream_.avail_in = 0;
-  decompress_stream_.next_out = 0;
-  decompress_stream_.avail_out = 0;
-
-  memset(decompressed_buf, 0, INFLATE_BUFFER_SIZE);
-
-  if (inflateInit2(&decompress_stream_, 31) != Z_OK) {
-    //log_err("inflateInit2 failed");
-    return E_COMPRESS_ERR;
-  }
-  zstream_valid_ = true;
-  return E_OK;
-}
-
-int GzipDeCompressor::end_zstream()
-{
-  if (!zstream_valid_) {
-    return E_OK;
-  }
-  if(inflateEnd(&decompress_stream_) != Z_OK) {
-    //log_err("inflateEnd failed");
-    return E_COMPRESS_ERR;
-  }
-  zstream_valid_ = false;
-  return E_OK;
-}
-
-int GzipDeCompressor::reset()
-{
-  int ret = E_OK;
-  if (RET_FAIL(end_zstream())) {
-  } else if (RET_FAIL(init_zstream())) {
-  }
-  return ret;
 }
 
 int GzipDeCompressor::decompress_into_bytestream(char *compressed_buf,
                                                  uint32_t compressed_buf_len,
-                                                 ByteStream &out)
-{
-  int ret = Z_OK;
+                                                 ByteStream &out) {
+    int ret = Z_OK;
 
-  decompress_stream_.next_in = (Bytef *)compressed_buf;
-  decompress_stream_.avail_in = compressed_buf_len;
-  decompress_stream_.next_out = (Bytef *)decompressed_buf;
-  decompress_stream_.avail_out = INFLATE_BUFFER_SIZE;
+    decompress_stream_.next_in = (Bytef *)compressed_buf;
+    decompress_stream_.avail_in = compressed_buf_len;
+    decompress_stream_.next_out = (Bytef *)decompressed_buf;
+    decompress_stream_.avail_out = INFLATE_BUFFER_SIZE;
 
-  if (compressed_buf == nullptr || compressed_buf_len == 0) {
-    if (decompress_stream_.next_out) {
-      while (ret != Z_STREAM_END) {
-        ret = inflate(&decompress_stream_, Z_FINISH);
-        if(ret != Z_OK && ret != Z_STREAM_END) {
-          //log_err("inflate failed");
-          return E_COMPRESS_ERR;
+    if (compressed_buf == nullptr || compressed_buf_len == 0) {
+        if (decompress_stream_.next_out) {
+            while (ret != Z_STREAM_END) {
+                ret = inflate(&decompress_stream_, Z_FINISH);
+                if (ret != Z_OK && ret != Z_STREAM_END) {
+                    // log_err("inflate failed");
+                    return E_COMPRESS_ERR;
+                }
+                out.write_buf(
+                    decompressed_buf,
+                    INFLATE_BUFFER_SIZE - decompress_stream_.avail_out);
+                decompress_stream_.next_out = (Bytef *)decompressed_buf;
+                decompress_stream_.avail_out = INFLATE_BUFFER_SIZE;
+            }
         }
-        out.write_buf(decompressed_buf, INFLATE_BUFFER_SIZE -
-        decompress_stream_.avail_out); decompress_stream_.next_out = (Bytef
-        *)decompressed_buf; decompress_stream_.avail_out =
-        INFLATE_BUFFER_SIZE;
-      }
+        return E_OK;
     }
+
+    for (;;) {
+        ret = inflate(&decompress_stream_, Z_NO_FLUSH);
+        if (ret == Z_STREAM_END) {
+            out.write_buf(decompressed_buf,
+                          INFLATE_BUFFER_SIZE - decompress_stream_.avail_out);
+            break;
+        }
+        if (ret != Z_OK) {
+            // log_err("inflate failed");
+            return E_COMPRESS_ERR;
+        }
+        if (decompress_stream_.avail_in == 0) {
+            out.write_buf(decompressed_buf,
+                          INFLATE_BUFFER_SIZE - decompress_stream_.avail_out);
+            decompress_stream_.next_out = (Bytef *)decompressed_buf;
+            decompress_stream_.avail_out = INFLATE_BUFFER_SIZE;
+            break;
+        } else if (decompress_stream_.avail_out == 0) {
+            out.write_buf(decompressed_buf, INFLATE_BUFFER_SIZE);
+            decompress_stream_.next_out = (Bytef *)decompressed_buf;
+            decompress_stream_.avail_out = INFLATE_BUFFER_SIZE;
+        }
+    }
+
     return E_OK;
-  }
-
-  for (;;) {
-    ret = inflate(&decompress_stream_, Z_NO_FLUSH);
-    if (ret == Z_STREAM_END) {
-      out.write_buf(decompressed_buf, INFLATE_BUFFER_SIZE -
-      decompress_stream_.avail_out); break;
-    }
-    if (ret != Z_OK) {
-      //log_err("inflate failed");
-      return E_COMPRESS_ERR;
-    }
-    if (decompress_stream_.avail_in == 0) {
-      out.write_buf(decompressed_buf, INFLATE_BUFFER_SIZE -
-      decompress_stream_.avail_out); decompress_stream_.next_out = (Bytef
-      *)decompressed_buf; decompress_stream_.avail_out = INFLATE_BUFFER_SIZE;
-      break;
-    }
-    else if (decompress_stream_.avail_out == 0) {
-      out.write_buf(decompressed_buf, INFLATE_BUFFER_SIZE);
-      decompress_stream_.next_out = (Bytef *)decompressed_buf;
-      decompress_stream_.avail_out = INFLATE_BUFFER_SIZE;
-    }
-  }
-
-  return E_OK;
 }
 
 int GzipDeCompressor::uncompress(char *compressed_buf,
                                  uint32_t compressed_buf_len,
                                  char *&uncompressed_buf,
-                                 uint32_t &uncompressed_buf_len)
-{
-  int ret = E_OK;
-  ByteStream out(INFLATE_BUFFER_SIZE, MOD_COMPRESSOR_OBJ);
-  if(RET_FAIL(decompress_into_bytestream(compressed_buf, compressed_buf_len,
-  out))) {
+                                 uint32_t &uncompressed_buf_len) {
+    int ret = E_OK;
+    ByteStream out(INFLATE_BUFFER_SIZE, MOD_COMPRESSOR_OBJ);
+    if (RET_FAIL(decompress_into_bytestream(compressed_buf, compressed_buf_len,
+                                            out))) {
+        return ret;
+    }
+    if (RET_FAIL(decompress_into_bytestream(nullptr, 0, out))) {
+        return ret;
+    }
+    uncompressed_buf = get_bytes_from_bytestream(out);
+    uncompressed_buf_len = out.total_size();
+    //   uncompressed_buf[uncompressed_buf_len] = '\0';
+    out.destroy();
     return ret;
-  }
-  if (RET_FAIL(decompress_into_bytestream(nullptr, 0, out))) {
-    return ret;
-  }
-  uncompressed_buf = get_bytes_from_bytestream(out);
-  uncompressed_buf_len = out.total_size();
-//   uncompressed_buf[uncompressed_buf_len] = '\0';
-  out.destroy();
-  return ret;
 }
 
-} // end namespace storage
+}  // end namespace storage
diff --git a/cpp/src/compress/gzip_compressor.h b/cpp/src/compress/gzip_compressor.h
index f014ca1..6968042 100644
--- a/cpp/src/compress/gzip_compressor.h
+++ b/cpp/src/compress/gzip_compressor.h
@@ -20,135 +20,112 @@
 #ifndef COMPRESS_GZIP_COMPRESSOR_H
 #define COMPRESS_GZIP_COMPRESSOR_H
 
-#include "compressor.h"
-#include "utils/errno_define.h"
-#include "utils/util_define.h"
-#include "common/logger/elog.h"
-#include "common/allocator/byte_stream.h"
+#include <errno.h>
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
-#include <errno.h>
 #include <zlib.h>
 
+#include "common/allocator/byte_stream.h"
+#include "common/logger/elog.h"
+#include "compressor.h"
+#include "utils/errno_define.h"
+#include "utils/util_define.h"
+
 #define DEFLATE_BUFFER_SIZE 512
 #define INFLATE_BUFFER_SIZE 512
 
+namespace storage {
 
-
-namespace storage
-{
-
-class GzipCompressor
-{
-public:
-  GzipCompressor();
-  ~GzipCompressor();
-  int reset();
-  void destroy() { end_zstream(); }
-  int compress(char *uncompressed_buf,
-               uint32_t uncompressed_buf_len,
-               char *&compressed_buf,
-               uint32_t &compressed_buf_len);
-  void after_compress(char *compressed_buf)
-  {
-    ::free(compressed_buf);
-  }
-  int compress_into_bytestream(char *uncompressed_buf,
-                               uint32_t uncompressed_buf_len,
-                               common::ByteStream &out);
-private:
-  int init_zstream();
-  int end_zstream();
-private:
-  z_stream compress_stream_;
-  char compressed_buf[DEFLATE_BUFFER_SIZE];
-  bool zstream_valid_;
-};
-
-class GzipDeCompressor
-{
-public:
-  GzipDeCompressor();
-  ~GzipDeCompressor();
-  int reset();
-  void destroy() { end_zstream(); }
-  int uncompress(char *compressed_buf,
-                 uint32_t compressed_buf_len,
-                 char *&uncompressed_buf,
-                 uint32_t &uncompressed_buf_len);
-  void after_uncompress(char *uncompressed_buf)
-  {
-    ::free(uncompressed_buf);
-  }
-  int decompress_into_bytestream(char *compressed_buf,
-                                 uint32_t compressed_buf_len,
+class GzipCompressor {
+   public:
+    GzipCompressor();
+    ~GzipCompressor();
+    int reset();
+    void destroy() { end_zstream(); }
+    int compress(char *uncompressed_buf, uint32_t uncompressed_buf_len,
+                 char *&compressed_buf, uint32_t &compressed_buf_len);
+    void after_compress(char *compressed_buf) { ::free(compressed_buf); }
+    int compress_into_bytestream(char *uncompressed_buf,
+                                 uint32_t uncompressed_buf_len,
                                  common::ByteStream &out);
-private:
-  int init_zstream();
-  int end_zstream();
-private:
-  z_stream decompress_stream_;
-  char decompressed_buf[INFLATE_BUFFER_SIZE];
-  bool zstream_valid_;
+
+   private:
+    int init_zstream();
+    int end_zstream();
+
+   private:
+    z_stream compress_stream_;
+    char compressed_buf[DEFLATE_BUFFER_SIZE];
+    bool zstream_valid_;
 };
 
-class GZIPCompressor : public Compressor
-{
-public:
-  GZIPCompressor() : gzip_compressor_(),
-                     gzip_decompressor_() {}
-  int reset(bool for_compress) OVERRIDE
-  {
-    if (for_compress) {
-      return gzip_compressor_.reset();
-    } else {
-      return gzip_decompressor_.reset();
+class GzipDeCompressor {
+   public:
+    GzipDeCompressor();
+    ~GzipDeCompressor();
+    int reset();
+    void destroy() { end_zstream(); }
+    int uncompress(char *compressed_buf, uint32_t compressed_buf_len,
+                   char *&uncompressed_buf, uint32_t &uncompressed_buf_len);
+    void after_uncompress(char *uncompressed_buf) { ::free(uncompressed_buf); }
+    int decompress_into_bytestream(char *compressed_buf,
+                                   uint32_t compressed_buf_len,
+                                   common::ByteStream &out);
+
+   private:
+    int init_zstream();
+    int end_zstream();
+
+   private:
+    z_stream decompress_stream_;
+    char decompressed_buf[INFLATE_BUFFER_SIZE];
+    bool zstream_valid_;
+};
+
+class GZIPCompressor : public Compressor {
+   public:
+    GZIPCompressor() : gzip_compressor_(), gzip_decompressor_() {}
+    int reset(bool for_compress) OVERRIDE {
+        if (for_compress) {
+            return gzip_compressor_.reset();
+        } else {
+            return gzip_decompressor_.reset();
+        }
     }
-  }
 
-  void destroy() OVERRIDE
-  {
-    gzip_compressor_.destroy();
-    gzip_decompressor_.destroy();
-  }
+    void destroy() OVERRIDE {
+        gzip_compressor_.destroy();
+        gzip_decompressor_.destroy();
+    }
 
-  int compress(char *uncompressed_buf,
-               uint32_t uncompressed_buf_len,
-               char *&compressed_buf,
-               uint32_t &compressed_buf_len) OVERRIDE
-  {
-    return gzip_compressor_.compress(uncompressed_buf,
-                                     uncompressed_buf_len,
-                                     compressed_buf,
-                                     compressed_buf_len);
-  }
-  void after_compress(char *compressed_buf) OVERRIDE
-  {
-    gzip_compressor_.after_compress(compressed_buf);
-  }
+    int compress(char *uncompressed_buf, uint32_t uncompressed_buf_len,
+                 char *&compressed_buf, uint32_t &compressed_buf_len) OVERRIDE {
+        return gzip_compressor_.compress(uncompressed_buf, uncompressed_buf_len,
+                                         compressed_buf, compressed_buf_len);
+    }
+    void after_compress(char *compressed_buf) OVERRIDE {
+        gzip_compressor_.after_compress(compressed_buf);
+    }
 
-  int uncompress(char *compressed_buf,
-                 uint32_t compressed_buf_len,
-                 char *&uncompressed_buf,
-                 uint32_t &uncompressed_buf_len) OVERRIDE
-  {
-    return gzip_decompressor_.uncompress(compressed_buf,
-                                         compressed_buf_len,
-                                         uncompressed_buf,
-                                         uncompressed_buf_len);
-  }
-  void after_uncompress(char *uncompressed_buf) OVERRIDE
-  {
-    gzip_decompressor_.after_uncompress(uncompressed_buf);
-  }
-private:
-  GzipCompressor gzip_compressor_;
-  GzipDeCompressor gzip_decompressor_;
+    int uncompress(char *compressed_buf, uint32_t compressed_buf_len,
+                   char *&uncompressed_buf,
+                   uint32_t &uncompressed_buf_len) OVERRIDE {
+        return gzip_decompressor_.uncompress(compressed_buf, compressed_buf_len,
+                                             uncompressed_buf,
+                                             uncompressed_buf_len);
+    }
+    void after_uncompress(char *uncompressed_buf) OVERRIDE {
+        gzip_decompressor_.after_uncompress(uncompressed_buf);
+    }
+
+   private:
+    GzipCompressor gzip_compressor_;
+    GzipDeCompressor gzip_decompressor_;
 };
 
-} // end namespace storage
- // end
+}  // end namespace storage
+   // end
 
-#endif // COMPRESS_GZIP_COMPRESSOR_H
+#endif  // COMPRESS_GZIP_COMPRESSOR_H
diff --git a/cpp/src/compress/lz4.c b/cpp/src/compress/lz4.c
new file mode 100644
index 0000000..4d449e6
--- /dev/null
+++ b/cpp/src/compress/lz4.c
@@ -0,0 +1,3162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/*
+   LZ4 - Fast LZ compression algorithm
+   Copyright (C) 2011-2020, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+    - LZ4 homepage : http://www.lz4.org
+    - LZ4 source repository : https://github.com/lz4/lz4
+*/
+
+/*-************************************
+ *  Tuning parameters
+ **************************************/
+/*
+ * LZ4_HEAPMODE :
+ * Select how stateless compression functions like `LZ4_compress_default()`
+ * allocate memory for their hash table,
+ * in memory stack (0:default, fastest), or in memory heap (1:requires
+ * malloc()).
+ */
+#ifndef LZ4_HEAPMODE
+#define LZ4_HEAPMODE 0
+#endif
+
+/*
+ * LZ4_ACCELERATION_DEFAULT :
+ * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
+ */
+#define LZ4_ACCELERATION_DEFAULT 1
+/*
+ * LZ4_ACCELERATION_MAX :
+ * Any "acceleration" value higher than this threshold
+ * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
+ */
+#define LZ4_ACCELERATION_MAX 65537
+
+/*-************************************
+ *  CPU Feature Detection
+ **************************************/
+/* LZ4_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is
+ * safe and portable. Unfortunately, on some target/compiler combinations, the
+ * generated assembly is sub-optimal. The below switch allow to select different
+ * access method for improved performance. Method 0 (default) : use `memcpy()`.
+ * Safe and portable. Method 1 : `__packed` statement. It depends on compiler
+ * extension (ie, not portable). This method is safe if your compiler supports
+ * it, and *generally* as fast or faster than `memcpy`. Method 2 : direct
+ * access. This method is portable but violate C standard. It can generate buggy
+ * code on targets which assembly generation depends on alignment. But in some
+ * circumstances, it's the only known way to get the most performance (ie GCC +
+ * ARMv6) See
+ * https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html
+ * for details. Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
+#if defined(__GNUC__) &&                                     \
+    (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) ||  \
+     defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
+     defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__))
+#define LZ4_FORCE_MEMORY_ACCESS 2
+#elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
+#define LZ4_FORCE_MEMORY_ACCESS 1
+#endif
+#endif
+
+/*
+ * LZ4_FORCE_SW_BITCOUNT
+ * Define this parameter if your target system or compiler does not support
+ * hardware bit count
+ */
+#if defined(_MSC_VER) &&                                                    \
+    defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware \
+                           bit count */
+#undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
+#define LZ4_FORCE_SW_BITCOUNT
+#endif
+
+/*-************************************
+ *  Dependency
+ **************************************/
+/*
+ * LZ4_SRC_INCLUDED:
+ * Amalgamation flag, whether lz4.c is included
+ */
+#ifndef LZ4_SRC_INCLUDED
+#define LZ4_SRC_INCLUDED 1
+#endif
+
+#ifndef LZ4_STATIC_LINKING_ONLY
+#define LZ4_STATIC_LINKING_ONLY
+#endif
+
+#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
+#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to \
+                                          LZ4_decompress_safe_withPrefix64k */
+#endif
+
+#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
+#include "lz4.h"
+/* see also "memory routines" below */
+
+/*-************************************
+ *  Compiler Options
+ **************************************/
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
+#include <intrin.h>                         /* only present in VS2005+ */
+#pragma warning( \
+    disable : 4127) /* disable: C4127: conditional expression is constant */
+#pragma warning( \
+    disable : 6237) /* disable: C6237: conditional expression is always 0 */
+#endif              /* _MSC_VER */
+
+#ifndef LZ4_FORCE_INLINE
+#ifdef _MSC_VER /* Visual Studio */
+#define LZ4_FORCE_INLINE static __forceinline
+#else
+#if defined(__cplusplus) || \
+    defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+#ifdef __GNUC__
+#define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
+#else
+#define LZ4_FORCE_INLINE static inline
+#endif
+#else
+#define LZ4_FORCE_INLINE static
+#endif /* __STDC_VERSION__ */
+#endif /* _MSC_VER */
+#endif /* LZ4_FORCE_INLINE */
+
+/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
+ * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
+ * together with a simple 8-byte copy loop as a fall-back path.
+ * However, this optimization hurts the decompression speed by >30%,
+ * because the execution does not go to the optimized loop
+ * for typical compressible data, and all of the preamble checks
+ * before going to the fall-back path become useless overhead.
+ * This optimization happens only with the -O3 flag, and -O2 generates
+ * a simple 8-byte copy loop.
+ * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
+ * functions are annotated with __attribute__((optimize("O2"))),
+ * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
+ * of LZ4_wildCopy8 does not affect the compression speed.
+ */
+#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && \
+    !defined(__clang__)
+#define LZ4_FORCE_O2 __attribute__((optimize("O2")))
+#undef LZ4_FORCE_INLINE
+#define LZ4_FORCE_INLINE \
+    static __inline __attribute__((optimize("O2"), always_inline))
+#else
+#define LZ4_FORCE_O2
+#endif
+
+#if (defined(__GNUC__) && (__GNUC__ >= 3)) ||                   \
+    (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || \
+    defined(__clang__)
+#define expect(expr, value) (__builtin_expect((expr), (value)))
+#else
+#define expect(expr, value) (expr)
+#endif
+
+#ifndef likely
+#define likely(expr) expect((expr) != 0, 1)
+#endif
+#ifndef unlikely
+#define unlikely(expr) expect((expr) != 0, 0)
+#endif
+
+/* Should the alignment test prove unreliable, for some reason,
+ * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
+#ifndef LZ4_ALIGN_TEST /* can be externally provided */
+#define LZ4_ALIGN_TEST 1
+#endif
+
+/*-************************************
+ *  Memory routines
+ **************************************/
+
+/*! LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION :
+ *  Disable relatively high-level LZ4/HC functions that use dynamic memory
+ *  allocation functions (malloc(), calloc(), free()).
+ *
+ *  Note that this is a compile-time switch. And since it disables
+ *  public/stable LZ4 v1 API functions, we don't recommend using this
+ *  symbol to generate a library for distribution.
+ *
+ *  The following public functions are removed when this symbol is defined.
+ *  - lz4   : LZ4_createStream, LZ4_freeStream,
+ *            LZ4_createStreamDecode, LZ4_freeStreamDecode, LZ4_create
+ * (deprecated)
+ *  - lz4hc : LZ4_createStreamHC, LZ4_freeStreamHC,
+ *            LZ4_createHC (deprecated), LZ4_freeHC  (deprecated)
+ *  - lz4frame, lz4file : All LZ4F_* functions
+ */
+#if defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+#define ALLOC(s) lz4_error_memory_allocation_is_disabled
+#define ALLOC_AND_ZERO(s) lz4_error_memory_allocation_is_disabled
+#define FREEMEM(p) lz4_error_memory_allocation_is_disabled
+#elif defined(LZ4_USER_MEMORY_FUNCTIONS)
+/* memory management functions can be customized by user project.
+ * Below functions must exist somewhere in the Project
+ * and be available at link time */
+void* LZ4_malloc(size_t s);
+void* LZ4_calloc(size_t n, size_t s);
+void LZ4_free(void* p);
+#define ALLOC(s) LZ4_malloc(s)
+#define ALLOC_AND_ZERO(s) LZ4_calloc(1, s)
+#define FREEMEM(p) LZ4_free(p)
+#else
+#include <stdlib.h> /* malloc, calloc, free */
+#define ALLOC(s) malloc(s)
+#define ALLOC_AND_ZERO(s) calloc(1, s)
+#define FREEMEM(p) free(p)
+#endif
+
+#if !LZ4_FREESTANDING
+#include <string.h> /* memset, memcpy */
+#endif
+#if !defined(LZ4_memset)
+#define LZ4_memset(p, v, s) memset((p), (v), (s))
+#endif
+#define MEM_INIT(p, v, s) LZ4_memset((p), (v), (s))
+
+/*-************************************
+ *  Common Constants
+ **************************************/
+#define MINMATCH 4
+
+#define WILDCOPYLENGTH 8
+#define LASTLITERALS                                                      \
+    5              /* see ../doc/lz4_Block_format.md#parsing-restrictions \
+                    */
+#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MATCH_SAFEGUARD_DISTANCE                                            \
+    ((2 * WILDCOPYLENGTH) -                                                 \
+     MINMATCH) /* ensure it's possible to writer 2 x wildcopyLength without \
+                  overflowing output buffer */
+#define FASTLOOP_SAFE_DISTANCE 64
+static const int LZ4_minLength = (MFLIMIT + 1);
+
+#define KB *(1 << 10)
+#define MB *(1 << 20)
+#define GB *(1U << 30)
+
+#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
+#if (LZ4_DISTANCE_MAX > \
+     LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
+#error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
+#endif
+
+#define ML_BITS 4
+#define ML_MASK ((1U << ML_BITS) - 1)
+#define RUN_BITS (8 - ML_BITS)
+#define RUN_MASK ((1U << RUN_BITS) - 1)
+
+/*-************************************
+ *  Error detection
+ **************************************/
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 1)
+#include <assert.h>
+#else
+#ifndef assert
+#define assert(condition) ((void)0)
+#endif
+#endif
+
+#define LZ4_STATIC_ASSERT(c)                           \
+    {                                                  \
+        enum { LZ4_static_assert = 1 / (int)(!!(c)) }; \
+    } /* use after variable declarations */
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 2)
+#include <stdio.h>
+static int g_debuglog_enable = 1;
+#define DEBUGLOG(l, ...)                                 \
+    {                                                    \
+        if ((g_debuglog_enable) && (l <= LZ4_DEBUG)) {   \
+            fprintf(stderr, __FILE__ " %i: ", __LINE__); \
+            fprintf(stderr, __VA_ARGS__);                \
+            fprintf(stderr, " \n");                      \
+        }                                                \
+    }
+#else
+#define DEBUGLOG(l, ...) \
+    {} /* disabled */
+#endif
+
+static int LZ4_isAligned(const void* ptr, size_t alignment) {
+    return ((size_t)ptr & (alignment - 1)) == 0;
+}
+
+/*-************************************
+ *  Types
+ **************************************/
+#include <limits.h>
+#if defined(__cplusplus) || \
+    (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#include <stdint.h>
+typedef uint8_t BYTE;
+typedef uint16_t U16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+typedef uintptr_t uptrval;
+#else
+#if UINT_MAX != 4294967295UL
+#error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
+#endif
+typedef unsigned char BYTE;
+typedef unsigned short U16;
+typedef unsigned int U32;
+typedef signed int S32;
+typedef unsigned long long U64;
+typedef size_t uptrval; /* generally true, except OpenVMS-64 */
+#endif
+
+#if defined(__x86_64__)
+typedef U64 reg_t; /* 64-bits in x32 mode */
+#else
+typedef size_t reg_t;   /* 32-bits in x32 mode */
+#endif
+
+typedef enum {
+    notLimited = 0,
+    limitedOutput = 1,
+    fillOutput = 2
+} limitedOutput_directive;
+
+/*-************************************
+ *  Reading and writing into memory
+ **************************************/
+
+/**
+ * LZ4 relies on memcpy with a constant size being inlined. In freestanding
+ * environments, the compiler can't assume the implementation of memcpy() is
+ * standard compliant, so it can't apply its specialized memcpy() inlining
+ * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
+ * memcpy() as if it were standard compliant, so it can inline it in
+ * freestanding environments. This is needed when decompressing the Linux
+ * Kernel, for example.
+ */
+#if !defined(LZ4_memcpy)
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
+#else
+#define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
+#endif
+#endif
+
+#if !defined(LZ4_memmove)
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#define LZ4_memmove __builtin_memmove
+#else
+#define LZ4_memmove memmove
+#endif
+#endif
+
+static unsigned LZ4_isLittleEndian(void) {
+    const union {
+        U32 u;
+        BYTE c[4];
+    } one = {1}; /* don't use static : performance detrimental */
+    return one.c[0];
+}
+
+#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 2)
+/* lie to the compiler about data alignment; use with caution */
+
+static U16 LZ4_read16(const void* memPtr) { return *(const U16*)memPtr; }
+static U32 LZ4_read32(const void* memPtr) { return *(const U32*)memPtr; }
+static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*)memPtr; }
+
+static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
+
+#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS == 1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially
+ * problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef struct {
+    U16 u16;
+} __attribute__((packed)) LZ4_unalign16;
+typedef struct {
+    U32 u32;
+} __attribute__((packed)) LZ4_unalign32;
+typedef struct {
+    reg_t uArch;
+} __attribute__((packed)) LZ4_unalignST;
+
+static U16 LZ4_read16(const void* ptr) {
+    return ((const LZ4_unalign16*)ptr)->u16;
+}
+static U32 LZ4_read32(const void* ptr) {
+    return ((const LZ4_unalign32*)ptr)->u32;
+}
+static reg_t LZ4_read_ARCH(const void* ptr) {
+    return ((const LZ4_unalignST*)ptr)->uArch;
+}
+
+static void LZ4_write16(void* memPtr, U16 value) {
+    ((LZ4_unalign16*)memPtr)->u16 = value;
+}
+static void LZ4_write32(void* memPtr, U32 value) {
+    ((LZ4_unalign32*)memPtr)->u32 = value;
+}
+
+#else /* safe and portable access using memcpy() */
+
+static U16 LZ4_read16(const void* memPtr) {
+    U16 val;
+    LZ4_memcpy(&val, memPtr, sizeof(val));
+    return val;
+}
+
+static U32 LZ4_read32(const void* memPtr) {
+    U32 val;
+    LZ4_memcpy(&val, memPtr, sizeof(val));
+    return val;
+}
+
+static reg_t LZ4_read_ARCH(const void* memPtr) {
+    reg_t val;
+    LZ4_memcpy(&val, memPtr, sizeof(val));
+    return val;
+}
+
+static void LZ4_write16(void* memPtr, U16 value) {
+    LZ4_memcpy(memPtr, &value, sizeof(value));
+}
+
+static void LZ4_write32(void* memPtr, U32 value) {
+    LZ4_memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* LZ4_FORCE_MEMORY_ACCESS */
+
+static U16 LZ4_readLE16(const void* memPtr) {
+    if (LZ4_isLittleEndian()) {
+        return LZ4_read16(memPtr);
+    } else {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U16)((U16)p[0] + (p[1] << 8));
+    }
+}
+
+static void LZ4_writeLE16(void* memPtr, U16 value) {
+    if (LZ4_isLittleEndian()) {
+        LZ4_write16(memPtr, value);
+    } else {
+        BYTE* p = (BYTE*)memPtr;
+        p[0] = (BYTE)value;
+        p[1] = (BYTE)(value >> 8);
+    }
+}
+
+/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd
+ */
+LZ4_FORCE_INLINE
+void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd) {
+    BYTE* d = (BYTE*)dstPtr;
+    const BYTE* s = (const BYTE*)srcPtr;
+    BYTE* const e = (BYTE*)dstEnd;
+
+    do {
+        LZ4_memcpy(d, s, 8);
+        d += 8;
+        s += 8;
+    } while (d < e);
+}
+
+static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
+static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
+
+#ifndef LZ4_FAST_DEC_LOOP
+#if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
+#define LZ4_FAST_DEC_LOOP 1
+#elif defined(__aarch64__) && defined(__APPLE__)
+#define LZ4_FAST_DEC_LOOP 1
+#elif defined(__aarch64__) && !defined(__clang__)
+/* On non-Apple aarch64, we disable this optimization for clang because
+ * on certain mobile chipsets, performance is reduced with clang. For
+ * more information refer to https://github.com/lz4/lz4/pull/707 */
+#define LZ4_FAST_DEC_LOOP 1
+#else
+#define LZ4_FAST_DEC_LOOP 0
+#endif
+#endif
+
+#if LZ4_FAST_DEC_LOOP
+
+LZ4_FORCE_INLINE void LZ4_memcpy_using_offset_base(BYTE* dstPtr,
+                                                   const BYTE* srcPtr,
+                                                   BYTE* dstEnd,
+                                                   const size_t offset) {
+    assert(srcPtr + offset == dstPtr);
+    if (offset < 8) {
+        LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
+        dstPtr[0] = srcPtr[0];
+        dstPtr[1] = srcPtr[1];
+        dstPtr[2] = srcPtr[2];
+        dstPtr[3] = srcPtr[3];
+        srcPtr += inc32table[offset];
+        LZ4_memcpy(dstPtr + 4, srcPtr, 4);
+        srcPtr -= dec64table[offset];
+        dstPtr += 8;
+    } else {
+        LZ4_memcpy(dstPtr, srcPtr, 8);
+        dstPtr += 8;
+        srcPtr += 8;
+    }
+
+    LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
+}
+
+/* customized variant of memcpy, which can overwrite up to 32 bytes beyond
+ * dstEnd this version copies two times 16 bytes (instead of one time 32 bytes)
+ * because it must be compatible with offsets >= 16. */
+LZ4_FORCE_INLINE void LZ4_wildCopy32(void* dstPtr, const void* srcPtr,
+                                     void* dstEnd) {
+    BYTE* d = (BYTE*)dstPtr;
+    const BYTE* s = (const BYTE*)srcPtr;
+    BYTE* const e = (BYTE*)dstEnd;
+
+    do {
+        LZ4_memcpy(d, s, 16);
+        LZ4_memcpy(d + 16, s + 16, 16);
+        d += 32;
+        s += 32;
+    } while (d < e);
+}
+
+/* LZ4_memcpy_using_offset()  presumes :
+ * - dstEnd >= dstPtr + MINMATCH
+ * - there is at least 8 bytes available to writer after dstEnd */
+LZ4_FORCE_INLINE void LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr,
+                                              BYTE* dstEnd,
+                                              const size_t offset) {
+    BYTE v[8];
+
+    assert(dstEnd >= dstPtr + MINMATCH);
+
+    switch (offset) {
+        case 1:
+            MEM_INIT(v, *srcPtr, 8);
+            break;
+        case 2:
+            LZ4_memcpy(v, srcPtr, 2);
+            LZ4_memcpy(&v[2], srcPtr, 2);
+#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier \
+                                             */
+#pragma warning(push)
+#pragma warning( \
+    disable : 6385) /* warning C6385: Reading invalid data from 'v'. */
+#endif
+            LZ4_memcpy(&v[4], v, 4);
+#if defined(_MSC_VER) && (_MSC_VER <= 1933) /* MSVC 2022 ver 17.3 or earlier \
+                                             */
+#pragma warning(pop)
+#endif
+            break;
+        case 4:
+            LZ4_memcpy(v, srcPtr, 4);
+            LZ4_memcpy(&v[4], srcPtr, 4);
+            break;
+        default:
+            LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
+            return;
+    }
+
+    LZ4_memcpy(dstPtr, v, 8);
+    dstPtr += 8;
+    while (dstPtr < dstEnd) {
+        LZ4_memcpy(dstPtr, v, 8);
+        dstPtr += 8;
+    }
+}
+#endif
+
+/*-************************************
+ *  Common functions
+ **************************************/
+static unsigned LZ4_NbCommonBytes(reg_t val) {
+    assert(val != 0);
+    if (LZ4_isLittleEndian()) {
+        if (sizeof(val) == 8) {
+#if defined(_MSC_VER) && (_MSC_VER >= 1800) &&     \
+    (defined(_M_AMD64) && !defined(_M_ARM64EC)) && \
+    !defined(LZ4_FORCE_SW_BITCOUNT)
+/*-*************************************************************************************************
+ * ARM64EC is a Microsoft-designed ARM64 ABI compatible with AMD64 applications
+ *on ARM64 Windows 11. The ARM64EC ABI does not support AVX/AVX2/AVX512
+ *instructions, nor their relevant intrinsics including _tzcnt_u64. Therefore,
+ *we need to neuter the _tzcnt_u64 code path for ARM64EC.
+ ****************************************************************************************************/
+#if defined(__clang__) && (__clang_major__ < 10)
+            /* Avoid undefined clang-cl intrinsics issue.
+             * See https://github.com/lz4/lz4/pull/1017 for details. */
+            return (unsigned)__builtin_ia32_tzcnt_u64(val) >> 3;
+#else
+            /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
+            return (unsigned)_tzcnt_u64(val) >> 3;
+#endif
+#elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+            unsigned long r = 0;
+            _BitScanForward64(&r, (U64)val);
+            return (unsigned)r >> 3;
+#elif (defined(__clang__) ||                                                \
+       (defined(__GNUC__) &&                                                \
+        ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+    !defined(LZ4_FORCE_SW_BITCOUNT)
+            return (unsigned)__builtin_ctzll((U64)val) >> 3;
+#else
+            const U64 m = 0x0101010101010101ULL;
+            val ^= val - 1;
+            return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
+#endif
+        } else /* 32 bits */ {
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
+            unsigned long r;
+            _BitScanForward(&r, (U32)val);
+            return (unsigned)r >> 3;
+#elif (defined(__clang__) ||                                                \
+       (defined(__GNUC__) &&                                                \
+        ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+    !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
+            return (unsigned)__builtin_ctz((U32)val) >> 3;
+#else
+            const U32 m = 0x01010101;
+            return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
+#endif
+        }
+    } else /* Big Endian CPU */ {
+        if (sizeof(val) == 8) {
+#if (defined(__clang__) ||                                                \
+     (defined(__GNUC__) &&                                                \
+      ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+    !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
+            return (unsigned)__builtin_clzll((U64)val) >> 3;
+#else
+#if 1
+            /* this method is probably faster,
+             * but adds a 128 bytes lookup table */
+            static const unsigned char ctz7_tab[128] = {
+                7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1,
+                0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0,
+                1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3,
+                0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0,
+                2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1,
+                0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0,
+                1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+            };
+            U64 const mask = 0x0101010101010101ULL;
+            U64 const t = (((val >> 8) - mask) | val) & mask;
+            return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
+#else
+            /* this method doesn't consume memory space like the previous one,
+             * but it contains several branches,
+             * that may end up slowing execution */
+            static const U32 by32 =
+                sizeof(val) *
+                4; /* 32 on 64 bits (goal), 16 on 32 bits.
+Just to avoid some static analyzer complaining about shift by 32 on 32-bits
+target. Note that this code path is never triggered in 32-bits mode. */
+            unsigned r;
+            if (!(val >> by32)) {
+                r = 4;
+            } else {
+                r = 0;
+                val >>= by32;
+            }
+            if (!(val >> 16)) {
+                r += 2;
+                val >>= 8;
+            } else {
+                val >>= 24;
+            }
+            r += (!val);
+            return r;
+#endif
+#endif
+        } else /* 32 bits */ {
+#if (defined(__clang__) ||                                                \
+     (defined(__GNUC__) &&                                                \
+      ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+    !defined(LZ4_FORCE_SW_BITCOUNT)
+            return (unsigned)__builtin_clz((U32)val) >> 3;
+#else
+            val >>= 8;
+            val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
+                   (val + 0x00FF0000)) >>
+                  24;
+            return (unsigned)val ^ 3;
+#endif
+        }
+    }
+}
+
+#define STEPSIZE sizeof(reg_t)
+LZ4_FORCE_INLINE
+unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) {
+    const BYTE* const pStart = pIn;
+
+    if (likely(pIn < pInLimit - (STEPSIZE - 1))) {
+        reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+        if (!diff) {
+            pIn += STEPSIZE;
+            pMatch += STEPSIZE;
+        } else {
+            return LZ4_NbCommonBytes(diff);
+        }
+    }
+
+    while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
+        reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+        if (!diff) {
+            pIn += STEPSIZE;
+            pMatch += STEPSIZE;
+            continue;
+        }
+        pIn += LZ4_NbCommonBytes(diff);
+        return (unsigned)(pIn - pStart);
+    }
+
+    if ((STEPSIZE == 8) && (pIn < (pInLimit - 3)) &&
+        (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
+        pIn += 4;
+        pMatch += 4;
+    }
+    if ((pIn < (pInLimit - 1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
+        pIn += 2;
+        pMatch += 2;
+    }
+    if ((pIn < pInLimit) && (*pMatch == *pIn)) pIn++;
+    return (unsigned)(pIn - pStart);
+}
+
+#ifndef LZ4_COMMONDEFS_ONLY
+/*-************************************
+ *  Local Constants
+ **************************************/
+static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT - 1));
+static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run
+                                         slower on incompressible data */
+
+/*-************************************
+ *  Local Structures and types
+ **************************************/
+typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
+
+/**
+ * This enum distinguishes several different modes of accessing previous
+ * content in the stream.
+ *
+ * - noDict        : There is no preceding content.
+ * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
+ *                   blob being compressed are valid and refer to the preceding
+ *                   content (of length ctx->dictSize), which is available
+ *                   contiguously preceding in memory the content currently
+ *                   being compressed.
+ * - usingExtDict  : Like withPrefix64k, but the preceding content is somewhere
+ *                   else in memory, starting at ctx->dictionary with length
+ *                   ctx->dictSize.
+ * - usingDictCtx  : Everything concerning the preceding content is
+ *                   in a separate context, pointed to by ctx->dictCtx.
+ *                   ctx->dictionary, ctx->dictSize, and table entries
+ *                   in the current context that refer to positions
+ *                   preceding the beginning of the current compression are
+ *                   ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
+ *                   ->dictSize describe the location and size of the preceding
+ *                   content, and matches are found by looking in the ctx
+ *                   ->dictCtx->hashTable.
+ */
+typedef enum {
+    noDict = 0,
+    withPrefix64k,
+    usingExtDict,
+    usingDictCtx
+} dict_directive;
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
+
+/*-************************************
+ *  Local Utils
+ **************************************/
+int LZ4_versionNumber(void) { return LZ4_VERSION_NUMBER; }
+const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
+int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
+int LZ4_sizeofState(void) { return sizeof(LZ4_stream_t); }
+
+/*-****************************************
+ *  Internal Definitions, used only in Tests
+ *******************************************/
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+int LZ4_compress_forceExtDict(LZ4_stream_t* LZ4_dict, const char* source,
+                              char* dest, int srcSize);
+
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
+                                     int compressedSize, int maxOutputSize,
+                                     const void* dictStart, size_t dictSize);
+int LZ4_decompress_safe_partial_forceExtDict(
+    const char* source, char* dest, int compressedSize, int targetOutputSize,
+    int dstCapacity, const void* dictStart, size_t dictSize);
+#if defined(__cplusplus)
+}
+#endif
+
+/*-******************************
+ *  Compression functions
+ ********************************/
+LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType) {
+    if (tableType == byU16)
+        return ((sequence * 2654435761U) >>
+                ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
+    else
+        return ((sequence * 2654435761U) >> ((MINMATCH * 8) - LZ4_HASHLOG));
+}
+
+LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType) {
+    const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG + 1 : LZ4_HASHLOG;
+    if (LZ4_isLittleEndian()) {
+        const U64 prime5bytes = 889523592379ULL;
+        return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
+    } else {
+        const U64 prime8bytes = 11400714785074694791ULL;
+        return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
+    }
+}
+
+LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p,
+                                      tableType_t const tableType) {
+    if ((sizeof(reg_t) == 8) && (tableType != byU16))
+        return LZ4_hash5(LZ4_read_ARCH(p), tableType);
+    return LZ4_hash4(LZ4_read32(p), tableType);
+}
+
+LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase,
+                                    tableType_t const tableType) {
+    switch (tableType) {
+        default:             /* fallthrough */
+        case clearedTable: { /* illegal! */
+            assert(0);
+            return;
+        }
+        case byPtr: {
+            const BYTE** hashTable = (const BYTE**)tableBase;
+            hashTable[h] = NULL;
+            return;
+        }
+        case byU32: {
+            U32* hashTable = (U32*)tableBase;
+            hashTable[h] = 0;
+            return;
+        }
+        case byU16: {
+            U16* hashTable = (U16*)tableBase;
+            hashTable[h] = 0;
+            return;
+        }
+    }
+}
+
+LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase,
+                                         tableType_t const tableType) {
+    switch (tableType) {
+        default:           /* fallthrough */
+        case clearedTable: /* fallthrough */
+        case byPtr: {      /* illegal! */
+            assert(0);
+            return;
+        }
+        case byU32: {
+            U32* hashTable = (U32*)tableBase;
+            hashTable[h] = idx;
+            return;
+        }
+        case byU16: {
+            U16* hashTable = (U16*)tableBase;
+            assert(idx < 65536);
+            hashTable[h] = (U16)idx;
+            return;
+        }
+    }
+}
+
+/* LZ4_putPosition*() : only used in byPtr mode */
+LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
+                                            void* tableBase,
+                                            tableType_t const tableType) {
+    const BYTE** const hashTable = (const BYTE**)tableBase;
+    assert(tableType == byPtr);
+    (void)tableType;
+    hashTable[h] = p;
+}
+
+LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase,
+                                      tableType_t tableType) {
+    U32 const h = LZ4_hashPosition(p, tableType);
+    LZ4_putPositionOnHash(p, h, tableBase, tableType);
+}
+
+/* LZ4_getIndexOnHash() :
+ * Index of match position registered in hash table.
+ * hash position must be calculated by using base+index, or dictBase+index.
+ * Assumption 1 : only valid if tableType == byU32 or byU16.
+ * Assumption 2 : h is presumed valid (within limits of hash table)
+ */
+LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase,
+                                        tableType_t tableType) {
+    LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
+    if (tableType == byU32) {
+        const U32* const hashTable = (const U32*)tableBase;
+        assert(h < (1U << (LZ4_MEMORY_USAGE - 2)));
+        return hashTable[h];
+    }
+    if (tableType == byU16) {
+        const U16* const hashTable = (const U16*)tableBase;
+        assert(h < (1U << (LZ4_MEMORY_USAGE - 1)));
+        return hashTable[h];
+    }
+    assert(0);
+    return 0; /* forbidden case */
+}
+
+static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase,
+                                         tableType_t tableType) {
+    assert(tableType == byPtr);
+    (void)tableType;
+    {
+        const BYTE* const* hashTable = (const BYTE* const*)tableBase;
+        return hashTable[h];
+    }
+}
+
+LZ4_FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p,
+                                             const void* tableBase,
+                                             tableType_t tableType) {
+    U32 const h = LZ4_hashPosition(p, tableType);
+    return LZ4_getPositionOnHash(h, tableBase, tableType);
+}
+
+LZ4_FORCE_INLINE void LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
+                                       const int inputSize,
+                                       const tableType_t tableType) {
+    /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
+     * therefore safe to use no matter what mode we're in. Otherwise, we figure
+     * out if it's safe to leave as is or whether it needs to be reset.
+     */
+    if ((tableType_t)cctx->tableType != clearedTable) {
+        assert(inputSize >= 0);
+        if ((tableType_t)cctx->tableType != tableType ||
+            ((tableType == byU16) &&
+             cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU) ||
+            ((tableType == byU32) && cctx->currentOffset > 1 GB) ||
+            tableType == byPtr || inputSize >= 4 KB) {
+            DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
+            MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
+            cctx->currentOffset = 0;
+            cctx->tableType = (U32)clearedTable;
+        } else {
+            DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
+        }
+    }
+
+    /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back,
+     * is faster than compressing without a gap.
+     * However, compressing with currentOffset == 0 is faster still,
+     * so we preserve that case.
+     */
+    if (cctx->currentOffset != 0 && tableType == byU32) {
+        DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
+        cctx->currentOffset += 64 KB;
+    }
+
+    /* Finally, clear history */
+    cctx->dictCtx = NULL;
+    cctx->dictionary = NULL;
+    cctx->dictSize = 0;
+}
+
+/** LZ4_compress_generic() :
+ *  inlined, to ensure branches are decided at compilation time.
+ *  The following conditions are presumed already validated:
+ *  - source != NULL
+ *  - inputSize > 0
+ */
+LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
+    LZ4_stream_t_internal* const cctx, const char* const source,
+    char* const dest, const int inputSize,
+    int* inputConsumed, /* only written when outputDirective == fillOutput */
+    const int maxOutputSize, const limitedOutput_directive outputDirective,
+    const tableType_t tableType, const dict_directive dictDirective,
+    const dictIssue_directive dictIssue, const int acceleration) {
+    int result;
+    const BYTE* ip = (const BYTE*)source;
+
+    U32 const startIndex = cctx->currentOffset;
+    const BYTE* base = (const BYTE*)source - startIndex;
+    const BYTE* lowLimit;
+
+    const LZ4_stream_t_internal* dictCtx =
+        (const LZ4_stream_t_internal*)cctx->dictCtx;
+    const BYTE* const dictionary =
+        dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
+    const U32 dictSize =
+        dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
+    const U32 dictDelta = (dictDirective == usingDictCtx)
+                              ? startIndex - dictCtx->currentOffset
+                              : 0; /* make indexes in dictCtx comparable with
+                                      indexes in current context */
+
+    int const maybe_extMem =
+        (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
+    U32 const prefixIdxLimit =
+        startIndex - dictSize; /* used when dictDirective == dictSmall */
+    const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
+    const BYTE* anchor = (const BYTE*)source;
+    const BYTE* const iend = ip + inputSize;
+    const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
+    const BYTE* const matchlimit = iend - LASTLITERALS;
+
+    /* the dictCtx currentOffset is indexed on the start of the dictionary,
+     * while a dictionary in the current context precedes the currentOffset */
+    const BYTE* dictBase = (dictionary == NULL) ? NULL
+                           : (dictDirective == usingDictCtx)
+                               ? dictionary + dictSize - dictCtx->currentOffset
+                               : dictionary + dictSize - startIndex;
+
+    BYTE* op = (BYTE*)dest;
+    BYTE* const olimit = op + maxOutputSize;
+
+    U32 offset = 0;
+    U32 forwardH;
+
+    DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u",
+             inputSize, tableType);
+    assert(ip != NULL);
+    if (tableType == byU16)
+        assert(inputSize <
+               LZ4_64Klimit); /* Size too large (not within 64K limit) */
+    if (tableType == byPtr)
+        assert(dictDirective ==
+               noDict); /* only supported use case with byPtr */
+    /* If init conditions are not met, we don't have to mark stream
+     * as having dirty context, since no action was taken yet */
+    if (outputDirective == fillOutput && maxOutputSize < 1) {
+        return 0;
+    } /* Impossible to store anything */
+    assert(acceleration >= 1);
+
+    lowLimit =
+        (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
+
+    /* Update context state */
+    if (dictDirective == usingDictCtx) {
+        /* Subsequent linked blocks can't use the dictionary. */
+        /* Instead, they use the block we just compressed. */
+        cctx->dictCtx = NULL;
+        cctx->dictSize = (U32)inputSize;
+    } else {
+        cctx->dictSize += (U32)inputSize;
+    }
+    cctx->currentOffset += (U32)inputSize;
+    cctx->tableType = (U32)tableType;
+
+    if (inputSize < LZ4_minLength)
+        goto _last_literals; /* Input too small, no compression (all literals)
+                              */
+
+    /* First Byte */
+    {
+        U32 const h = LZ4_hashPosition(ip, tableType);
+        if (tableType == byPtr) {
+            LZ4_putPositionOnHash(ip, h, cctx->hashTable, byPtr);
+        } else {
+            LZ4_putIndexOnHash(startIndex, h, cctx->hashTable, tableType);
+        }
+    }
+    ip++;
+    forwardH = LZ4_hashPosition(ip, tableType);
+
+    /* Main Loop */
+    for (;;) {
+        const BYTE* match;
+        BYTE* token;
+        const BYTE* filledIp;
+
+        /* Find a match */
+        if (tableType == byPtr) {
+            const BYTE* forwardIp = ip;
+            int step = 1;
+            int searchMatchNb = acceleration << LZ4_skipTrigger;
+            do {
+                U32 const h = forwardH;
+                ip = forwardIp;
+                forwardIp += step;
+                step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+                if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
+                assert(ip < mflimitPlusOne);
+
+                match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType);
+                forwardH = LZ4_hashPosition(forwardIp, tableType);
+                LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType);
+
+            } while ((match + LZ4_DISTANCE_MAX < ip) ||
+                     (LZ4_read32(match) != LZ4_read32(ip)));
+
+        } else { /* byU32, byU16 */
+
+            const BYTE* forwardIp = ip;
+            int step = 1;
+            int searchMatchNb = acceleration << LZ4_skipTrigger;
+            do {
+                U32 const h = forwardH;
+                U32 const current = (U32)(forwardIp - base);
+                U32 matchIndex =
+                    LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
+                assert(matchIndex <= current);
+                assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
+                ip = forwardIp;
+                forwardIp += step;
+                step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+                if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
+                assert(ip < mflimitPlusOne);
+
+                if (dictDirective == usingDictCtx) {
+                    if (matchIndex < startIndex) {
+                        /* there was no match, try the dictionary */
+                        assert(tableType == byU32);
+                        matchIndex =
+                            LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
+                        match = dictBase + matchIndex;
+                        matchIndex +=
+                            dictDelta; /* make dictCtx index comparable with
+                                          current context */
+                        lowLimit = dictionary;
+                    } else {
+                        match = base + matchIndex;
+                        lowLimit = (const BYTE*)source;
+                    }
+                } else if (dictDirective == usingExtDict) {
+                    if (matchIndex < startIndex) {
+                        DEBUGLOG(7,
+                                 "extDict candidate: matchIndex=%5u  <  "
+                                 "startIndex=%5u",
+                                 matchIndex, startIndex);
+                        assert(startIndex - matchIndex >= MINMATCH);
+                        assert(dictBase);
+                        match = dictBase + matchIndex;
+                        lowLimit = dictionary;
+                    } else {
+                        match = base + matchIndex;
+                        lowLimit = (const BYTE*)source;
+                    }
+                } else { /* single continuous memory segment */
+                    match = base + matchIndex;
+                }
+                forwardH = LZ4_hashPosition(forwardIp, tableType);
+                LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+
+                DEBUGLOG(7, "candidate at pos=%u  (offset=%u \n", matchIndex,
+                         current - matchIndex);
+                if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) {
+                    continue;
+                } /* match outside of valid area */
+                assert(matchIndex < current);
+                if (((tableType != byU16) ||
+                     (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX)) &&
+                    (matchIndex + LZ4_DISTANCE_MAX < current)) {
+                    continue;
+                } /* too far */
+                assert(
+                    (current - matchIndex) <=
+                    LZ4_DISTANCE_MAX); /* match now expected within distance */
+
+                if (LZ4_read32(match) == LZ4_read32(ip)) {
+                    if (maybe_extMem) offset = current - matchIndex;
+                    break; /* match found */
+                }
+
+            } while (1);
+        }
+
+        /* Catch up */
+        filledIp = ip;
+        while (((ip > anchor) & (match > lowLimit)) &&
+               (unlikely(ip[-1] == match[-1]))) {
+            ip--;
+            match--;
+        }
+
+        /* Encode Literals */
+        {
+            unsigned const litLength = (unsigned)(ip - anchor);
+            token = op++;
+            if ((outputDirective ==
+                 limitedOutput) && /* Check output buffer overflow */
+                (unlikely(op + litLength + (2 + 1 + LASTLITERALS) +
+                              (litLength / 255) >
+                          olimit))) {
+                return 0; /* cannot compress within `dst` budget. Stored indexes
+                             in hash table are nonetheless fine */
+            }
+            if ((outputDirective == fillOutput) &&
+                (unlikely(op + (litLength + 240) / 255 /* litlen */ +
+                              litLength /* literals */ + 2 /* offset */ +
+                              1 /* token */ + MFLIMIT -
+                              MINMATCH /* min last literals so last match is <=
+                                          end - MFLIMIT */
+                          > olimit))) {
+                op--;
+                goto _last_literals;
+            }
+            if (litLength >= RUN_MASK) {
+                int len = (int)(litLength - RUN_MASK);
+                *token = (RUN_MASK << ML_BITS);
+                for (; len >= 255; len -= 255) *op++ = 255;
+                *op++ = (BYTE)len;
+            } else
+                *token = (BYTE)(litLength << ML_BITS);
+
+            /* Copy Literals */
+            LZ4_wildCopy8(op, anchor, op + litLength);
+            op += litLength;
+            DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+                     (int)(anchor - (const BYTE*)source), litLength,
+                     (int)(ip - (const BYTE*)source));
+        }
+
+    _next_match:
+        /* at this stage, the following variables must be correctly set :
+         * - ip : at start of LZ operation
+         * - match : at start of previous pattern occurrence; can be within
+         * current prefix, or within extDict
+         * - offset : if maybe_ext_memSegment==1 (constant)
+         * - lowLimit : must be == dictionary to mean "match is within extDict";
+         * must be == source otherwise
+         * - token and *token : position to writer 4-bits for match length;
+         * higher 4-bits for literal length supposed already written
+         */
+
+        if ((outputDirective == fillOutput) &&
+            (op + 2 /* offset */ + 1 /* token */ + MFLIMIT -
+                 MINMATCH /* min last literals so last match is <= end - MFLIMIT
+                           */
+             > olimit)) {
+            /* the match was too close to the end, rewind and go to last
+             * literals */
+            op = token;
+            goto _last_literals;
+        }
+
+        /* Encode Offset */
+        if (maybe_extMem) { /* static test */
+            DEBUGLOG(6, "             with offset=%u  (ext if > %i)", offset,
+                     (int)(ip - (const BYTE*)source));
+            assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
+            LZ4_writeLE16(op, (U16)offset);
+            op += 2;
+        } else {
+            DEBUGLOG(6, "             with offset=%u  (same segment)",
+                     (U32)(ip - match));
+            assert(ip - match <= LZ4_DISTANCE_MAX);
+            LZ4_writeLE16(op, (U16)(ip - match));
+            op += 2;
+        }
+
+        /* Encode MatchLength */
+        {
+            unsigned matchCode;
+
+            if ((dictDirective == usingExtDict ||
+                 dictDirective == usingDictCtx) &&
+                (lowLimit == dictionary) /* match within extDict */) {
+                const BYTE* limit = ip + (dictEnd - match);
+                assert(dictEnd > match);
+                if (limit > matchlimit) limit = matchlimit;
+                matchCode = LZ4_count(ip + MINMATCH, match + MINMATCH, limit);
+                ip += (size_t)matchCode + MINMATCH;
+                if (ip == limit) {
+                    unsigned const more =
+                        LZ4_count(limit, (const BYTE*)source, matchlimit);
+                    matchCode += more;
+                    ip += more;
+                }
+                DEBUGLOG(6,
+                         "             with matchLength=%u starting in extDict",
+                         matchCode + MINMATCH);
+            } else {
+                matchCode =
+                    LZ4_count(ip + MINMATCH, match + MINMATCH, matchlimit);
+                ip += (size_t)matchCode + MINMATCH;
+                DEBUGLOG(6, "             with matchLength=%u",
+                         matchCode + MINMATCH);
+            }
+
+            if ((outputDirective) && /* Check output buffer overflow */
+                (unlikely(op + (1 + LASTLITERALS) + (matchCode + 240) / 255 >
+                          olimit))) {
+                if (outputDirective == fillOutput) {
+                    /* Match description too long : reduce it */
+                    U32 newMatchCode =
+                        15 /* in token */ -
+                        1 /* to avoid needing a zero byte */ +
+                        ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
+                    ip -= matchCode - newMatchCode;
+                    assert(newMatchCode < matchCode);
+                    matchCode = newMatchCode;
+                    if (unlikely(ip <= filledIp)) {
+                        /* We have already filled up to filledIp so if ip ends
+                         * up less than filledIp we have positions in the hash
+                         * table beyond the current position. This is a problem
+                         * if we reuse the hash table. So we have to remove
+                         * these positions from the hash table.
+                         */
+                        const BYTE* ptr;
+                        DEBUGLOG(5, "Clearing %u positions",
+                                 (U32)(filledIp - ip));
+                        for (ptr = ip; ptr <= filledIp; ++ptr) {
+                            U32 const h = LZ4_hashPosition(ptr, tableType);
+                            LZ4_clearHash(h, cctx->hashTable, tableType);
+                        }
+                    }
+                } else {
+                    assert(outputDirective == limitedOutput);
+                    return 0; /* cannot compress within `dst` budget. Stored
+                                 indexes in hash table are nonetheless fine */
+                }
+            }
+            if (matchCode >= ML_MASK) {
+                *token += ML_MASK;
+                matchCode -= ML_MASK;
+                LZ4_write32(op, 0xFFFFFFFF);
+                while (matchCode >= 4 * 255) {
+                    op += 4;
+                    LZ4_write32(op, 0xFFFFFFFF);
+                    matchCode -= 4 * 255;
+                }
+                op += matchCode / 255;
+                *op++ = (BYTE)(matchCode % 255);
+            } else
+                *token += (BYTE)(matchCode);
+        }
+        /* Ensure we have enough space for the last literals. */
+        assert(
+            !(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
+
+        anchor = ip;
+
+        /* Test end of chunk */
+        if (ip >= mflimitPlusOne) break;
+
+        /* Fill table */
+        {
+            U32 const h = LZ4_hashPosition(ip - 2, tableType);
+            if (tableType == byPtr) {
+                LZ4_putPositionOnHash(ip - 2, h, cctx->hashTable, byPtr);
+            } else {
+                U32 const idx = (U32)((ip - 2) - base);
+                LZ4_putIndexOnHash(idx, h, cctx->hashTable, tableType);
+            }
+        }
+
+        /* Test next position */
+        if (tableType == byPtr) {
+            match = LZ4_getPosition(ip, cctx->hashTable, tableType);
+            LZ4_putPosition(ip, cctx->hashTable, tableType);
+            if ((match + LZ4_DISTANCE_MAX >= ip) &&
+                (LZ4_read32(match) == LZ4_read32(ip))) {
+                token = op++;
+                *token = 0;
+                goto _next_match;
+            }
+
+        } else { /* byU32, byU16 */
+
+            U32 const h = LZ4_hashPosition(ip, tableType);
+            U32 const current = (U32)(ip - base);
+            U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
+            assert(matchIndex < current);
+            if (dictDirective == usingDictCtx) {
+                if (matchIndex < startIndex) {
+                    /* there was no match, try the dictionary */
+                    assert(tableType == byU32);
+                    matchIndex =
+                        LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
+                    match = dictBase + matchIndex;
+                    lowLimit =
+                        dictionary; /* required for match length counter */
+                    matchIndex += dictDelta;
+                } else {
+                    match = base + matchIndex;
+                    lowLimit = (const BYTE*)
+                        source; /* required for match length counter */
+                }
+            } else if (dictDirective == usingExtDict) {
+                if (matchIndex < startIndex) {
+                    assert(dictBase);
+                    match = dictBase + matchIndex;
+                    lowLimit =
+                        dictionary; /* required for match length counter */
+                } else {
+                    match = base + matchIndex;
+                    lowLimit = (const BYTE*)
+                        source; /* required for match length counter */
+                }
+            } else { /* single memory segment */
+                match = base + matchIndex;
+            }
+            LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+            assert(matchIndex < current);
+            if (((dictIssue == dictSmall) ? (matchIndex >= prefixIdxLimit)
+                                          : 1) &&
+                (((tableType == byU16) &&
+                  (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX))
+                     ? 1
+                     : (matchIndex + LZ4_DISTANCE_MAX >= current)) &&
+                (LZ4_read32(match) == LZ4_read32(ip))) {
+                token = op++;
+                *token = 0;
+                if (maybe_extMem) offset = current - matchIndex;
+                DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+                         (int)(anchor - (const BYTE*)source), 0,
+                         (int)(ip - (const BYTE*)source));
+                goto _next_match;
+            }
+        }
+
+        /* Prepare next loop */
+        forwardH = LZ4_hashPosition(++ip, tableType);
+    }
+
+_last_literals:
+    /* Encode Last Literals */
+    {
+        size_t lastRun = (size_t)(iend - anchor);
+        if ((outputDirective) && /* Check output buffer overflow */
+            (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) > olimit)) {
+            if (outputDirective == fillOutput) {
+                /* adapt lastRun to fill 'dst' */
+                assert(olimit >= op);
+                lastRun = (size_t)(olimit - op) - 1 /*token*/;
+                lastRun -= (lastRun + 256 - RUN_MASK) /
+                           256; /*additional length tokens*/
+            } else {
+                assert(outputDirective == limitedOutput);
+                return 0; /* cannot compress within `dst` budget. Stored indexes
+                             in hash table are nonetheless fine */
+            }
+        }
+        DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
+        if (lastRun >= RUN_MASK) {
+            size_t accumulator = lastRun - RUN_MASK;
+            *op++ = RUN_MASK << ML_BITS;
+            for (; accumulator >= 255; accumulator -= 255) *op++ = 255;
+            *op++ = (BYTE)accumulator;
+        } else {
+            *op++ = (BYTE)(lastRun << ML_BITS);
+        }
+        LZ4_memcpy(op, anchor, lastRun);
+        ip = anchor + lastRun;
+        op += lastRun;
+    }
+
+    if (outputDirective == fillOutput) {
+        *inputConsumed = (int)(((const char*)ip) - source);
+    }
+    result = (int)(((char*)op) - dest);
+    assert(result > 0);
+    DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes",
+             inputSize, result);
+    return result;
+}
+
+/** LZ4_compress_generic() :
+ *  inlined, to ensure branches are decided at compilation time;
+ *  takes care of src == (NULL, 0)
+ *  and forward the rest to LZ4_compress_generic_validated */
+LZ4_FORCE_INLINE int LZ4_compress_generic(
+    LZ4_stream_t_internal* const cctx, const char* const src, char* const dst,
+    const int srcSize,
+    int* inputConsumed, /* only written when outputDirective == fillOutput */
+    const int dstCapacity, const limitedOutput_directive outputDirective,
+    const tableType_t tableType, const dict_directive dictDirective,
+    const dictIssue_directive dictIssue, const int acceleration) {
+    DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i", srcSize,
+             dstCapacity);
+
+    if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) {
+        return 0;
+    }                   /* Unsupported srcSize, too large (or negative) */
+    if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
+        if (outputDirective != notLimited && dstCapacity <= 0)
+            return 0; /* no output, can't writer anything */
+        DEBUGLOG(5, "Generating an empty block");
+        assert(outputDirective == notLimited || dstCapacity >= 1);
+        assert(dst != NULL);
+        dst[0] = 0;
+        if (outputDirective == fillOutput) {
+            assert(inputConsumed != NULL);
+            *inputConsumed = 0;
+        }
+        return 1;
+    }
+    assert(src != NULL);
+
+    return LZ4_compress_generic_validated(
+        cctx, src, dst, srcSize,
+        inputConsumed, /* only written into if outputDirective == fillOutput */
+        dstCapacity, outputDirective, tableType, dictDirective, dictIssue,
+        acceleration);
+}
+
+int LZ4_compress_fast_extState(void* state, const char* source, char* dest,
+                               int inputSize, int maxOutputSize,
+                               int acceleration) {
+    LZ4_stream_t_internal* const ctx =
+        &LZ4_initStream(state, sizeof(LZ4_stream_t))->internal_donotuse;
+    assert(ctx != NULL);
+    if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+    if (acceleration > LZ4_ACCELERATION_MAX)
+        acceleration = LZ4_ACCELERATION_MAX;
+    if (maxOutputSize >= LZ4_compressBound(inputSize)) {
+        if (inputSize < LZ4_64Klimit) {
+            return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0,
+                                        notLimited, byU16, noDict, noDictIssue,
+                                        acceleration);
+        } else {
+            const tableType_t tableType =
+                ((sizeof(void*) == 4) && ((uptrval)source > LZ4_DISTANCE_MAX))
+                    ? byPtr
+                    : byU32;
+            return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0,
+                                        notLimited, tableType, noDict,
+                                        noDictIssue, acceleration);
+        }
+    } else {
+        if (inputSize < LZ4_64Klimit) {
+            return LZ4_compress_generic(ctx, source, dest, inputSize, NULL,
+                                        maxOutputSize, limitedOutput, byU16,
+                                        noDict, noDictIssue, acceleration);
+        } else {
+            const tableType_t tableType =
+                ((sizeof(void*) == 4) && ((uptrval)source > LZ4_DISTANCE_MAX))
+                    ? byPtr
+                    : byU32;
+            return LZ4_compress_generic(ctx, source, dest, inputSize, NULL,
+                                        maxOutputSize, limitedOutput, tableType,
+                                        noDict, noDictIssue, acceleration);
+        }
+    }
+}
+
+/**
+ * LZ4_compress_fast_extState_fastReset() :
+ * A variant of LZ4_compress_fast_extState().
+ *
+ * Using this variant avoids an expensive initialization step. It is only safe
+ * to call if the state buffer is known to be correctly initialized already
+ * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
+ * "correctly initialized").
+ */
+int LZ4_compress_fast_extState_fastReset(void* state, const char* src,
+                                         char* dst, int srcSize,
+                                         int dstCapacity, int acceleration) {
+    LZ4_stream_t_internal* const ctx =
+        &((LZ4_stream_t*)state)->internal_donotuse;
+    if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+    if (acceleration > LZ4_ACCELERATION_MAX)
+        acceleration = LZ4_ACCELERATION_MAX;
+    assert(ctx != NULL);
+
+    if (dstCapacity >= LZ4_compressBound(srcSize)) {
+        if (srcSize < LZ4_64Klimit) {
+            const tableType_t tableType = byU16;
+            LZ4_prepareTable(ctx, srcSize, tableType);
+            if (ctx->currentOffset) {
+                return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0,
+                                            notLimited, tableType, noDict,
+                                            dictSmall, acceleration);
+            } else {
+                return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0,
+                                            notLimited, tableType, noDict,
+                                            noDictIssue, acceleration);
+            }
+        } else {
+            const tableType_t tableType =
+                ((sizeof(void*) == 4) && ((uptrval)src > LZ4_DISTANCE_MAX))
+                    ? byPtr
+                    : byU32;
+            LZ4_prepareTable(ctx, srcSize, tableType);
+            return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0,
+                                        notLimited, tableType, noDict,
+                                        noDictIssue, acceleration);
+        }
+    } else {
+        if (srcSize < LZ4_64Klimit) {
+            const tableType_t tableType = byU16;
+            LZ4_prepareTable(ctx, srcSize, tableType);
+            if (ctx->currentOffset) {
+                return LZ4_compress_generic(
+                    ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput,
+                    tableType, noDict, dictSmall, acceleration);
+            } else {
+                return LZ4_compress_generic(
+                    ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput,
+                    tableType, noDict, noDictIssue, acceleration);
+            }
+        } else {
+            const tableType_t tableType =
+                ((sizeof(void*) == 4) && ((uptrval)src > LZ4_DISTANCE_MAX))
+                    ? byPtr
+                    : byU32;
+            LZ4_prepareTable(ctx, srcSize, tableType);
+            return LZ4_compress_generic(ctx, src, dst, srcSize, NULL,
+                                        dstCapacity, limitedOutput, tableType,
+                                        noDict, noDictIssue, acceleration);
+        }
+    }
+}
+
+int LZ4_compress_fast(const char* src, char* dest, int srcSize, int dstCapacity,
+                      int acceleration) {
+    int result;
+#if (LZ4_HEAPMODE)
+    LZ4_stream_t* const ctxPtr = (LZ4_stream_t*)ALLOC(
+        sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+    if (ctxPtr == NULL) return 0;
+#else
+    LZ4_stream_t ctx;
+    LZ4_stream_t* const ctxPtr = &ctx;
+#endif
+    result = LZ4_compress_fast_extState(ctxPtr, src, dest, srcSize, dstCapacity,
+                                        acceleration);
+
+#if (LZ4_HEAPMODE)
+    FREEMEM(ctxPtr);
+#endif
+    return result;
+}
+
+int LZ4_compress_default(const char* src, char* dst, int srcSize,
+                         int dstCapacity) {
+    return LZ4_compress_fast(src, dst, srcSize, dstCapacity, 1);
+}
+
+/* Note!: This function leaves the stream in an unclean/broken state!
+ * It is not safe to subsequently use the same state with a _fastReset() or
+ * _continue() call without resetting it. */
+static int LZ4_compress_destSize_extState(LZ4_stream_t* state, const char* src,
+                                          char* dst, int* srcSizePtr,
+                                          int targetDstSize) {
+    void* const s = LZ4_initStream(state, sizeof(*state));
+    assert(s != NULL);
+    (void)s;
+
+    if (targetDstSize >=
+        LZ4_compressBound(
+            *srcSizePtr)) { /* compression success is guaranteed */
+        return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr,
+                                          targetDstSize, 1);
+    } else {
+        if (*srcSizePtr < LZ4_64Klimit) {
+            return LZ4_compress_generic(
+                &state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr,
+                targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
+        } else {
+            tableType_t const addrMode =
+                ((sizeof(void*) == 4) && ((uptrval)src > LZ4_DISTANCE_MAX))
+                    ? byPtr
+                    : byU32;
+            return LZ4_compress_generic(
+                &state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr,
+                targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
+        }
+    }
+}
+
+int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr,
+                          int targetDstSize) {
+#if (LZ4_HEAPMODE)
+    LZ4_stream_t* const ctx = (LZ4_stream_t*)ALLOC(
+        sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+    if (ctx == NULL) return 0;
+#else
+    LZ4_stream_t ctxBody;
+    LZ4_stream_t* const ctx = &ctxBody;
+#endif
+
+    int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr,
+                                                targetDstSize);
+
+#if (LZ4_HEAPMODE)
+    FREEMEM(ctx);
+#endif
+    return result;
+}
+
+/*-******************************
+ *  Streaming functions
+ ********************************/
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4_stream_t* LZ4_createStream(void) {
+    LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
+    LZ4_STATIC_ASSERT(sizeof(LZ4_stream_t) >= sizeof(LZ4_stream_t_internal));
+    DEBUGLOG(4, "LZ4_createStream %p", lz4s);
+    if (lz4s == NULL) return NULL;
+    LZ4_initStream(lz4s, sizeof(*lz4s));
+    return lz4s;
+}
+#endif
+
+static size_t LZ4_stream_t_alignment(void) {
+#if LZ4_ALIGN_TEST
+    typedef struct {
+        char c;
+        LZ4_stream_t t;
+    } t_a;
+    return sizeof(t_a) - sizeof(LZ4_stream_t);
+#else
+    return 1; /* effectively disabled */
+#endif
+}
+
+LZ4_stream_t* LZ4_initStream(void* buffer, size_t size) {
+    DEBUGLOG(5, "LZ4_initStream");
+    if (buffer == NULL) {
+        return NULL;
+    }
+    if (size < sizeof(LZ4_stream_t)) {
+        return NULL;
+    }
+    if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
+    MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
+    return (LZ4_stream_t*)buffer;
+}
+
+/* resetStream is now deprecated,
+ * prefer initStream() which is more general */
+void LZ4_resetStream(LZ4_stream_t* LZ4_stream) {
+    DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
+    MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
+}
+
+void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
+    LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
+}
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+int LZ4_freeStream(LZ4_stream_t* LZ4_stream) {
+    if (!LZ4_stream) return 0; /* support free on NULL */
+    DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
+    FREEMEM(LZ4_stream);
+    return (0);
+}
+#endif
+
+#define HASH_UNIT sizeof(reg_t)
+int LZ4_loadDict(LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) {
+    LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
+    const tableType_t tableType = byU32;
+    const BYTE* p = (const BYTE*)dictionary;
+    const BYTE* const dictEnd = p + dictSize;
+    U32 idx32;
+
+    DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary,
+             LZ4_dict);
+
+    /* It's necessary to reset the context,
+     * and not just continue it with prepareTable()
+     * to avoid any risk of generating overflowing matchIndex
+     * when compressing using this dictionary */
+    LZ4_resetStream(LZ4_dict);
+
+    /* We always increment the offset by 64 KB, since, if the dict is longer,
+     * we truncate it to the last 64k, and if it's shorter, we still want to
+     * advance by a whole window length so we can provide the guarantee that
+     * there are only valid offsets in the window, which allows an optimization
+     * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
+     * dictionary isn't a full 64k. */
+    dict->currentOffset += 64 KB;
+
+    if (dictSize < (int)HASH_UNIT) {
+        return 0;
+    }
+
+    if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
+    dict->dictionary = p;
+    dict->dictSize = (U32)(dictEnd - p);
+    dict->tableType = (U32)tableType;
+    idx32 = dict->currentOffset - dict->dictSize;
+
+    while (p <= dictEnd - HASH_UNIT) {
+        U32 const h = LZ4_hashPosition(p, tableType);
+        LZ4_putIndexOnHash(idx32, h, dict->hashTable, tableType);
+        p += 3;
+        idx32 += 3;
+    }
+
+    return (int)dict->dictSize;
+}
+
+void LZ4_attach_dictionary(LZ4_stream_t* workingStream,
+                           const LZ4_stream_t* dictionaryStream) {
+    const LZ4_stream_t_internal* dictCtx =
+        (dictionaryStream == NULL) ? NULL
+                                   : &(dictionaryStream->internal_donotuse);
+
+    DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)", workingStream,
+             dictionaryStream, dictCtx != NULL ? dictCtx->dictSize : 0);
+
+    if (dictCtx != NULL) {
+        /* If the current offset is zero, we will never look in the
+         * external dictionary context, since there is no value a table
+         * entry can take that indicate a miss. In that case, we need
+         * to bump the offset to something non-zero.
+         */
+        if (workingStream->internal_donotuse.currentOffset == 0) {
+            workingStream->internal_donotuse.currentOffset = 64 KB;
+        }
+
+        /* Don't actually attach an empty dictionary.
+         */
+        if (dictCtx->dictSize == 0) {
+            dictCtx = NULL;
+        }
+    }
+    workingStream->internal_donotuse.dictCtx = dictCtx;
+}
+
+static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize) {
+    assert(nextSize >= 0);
+    if (LZ4_dict->currentOffset + (unsigned)nextSize >
+        0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
+        /* rescale hash table */
+        U32 const delta = LZ4_dict->currentOffset - 64 KB;
+        const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
+        int i;
+        DEBUGLOG(4, "LZ4_renormDictT");
+        for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
+            if (LZ4_dict->hashTable[i] < delta)
+                LZ4_dict->hashTable[i] = 0;
+            else
+                LZ4_dict->hashTable[i] -= delta;
+        }
+        LZ4_dict->currentOffset = 64 KB;
+        if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
+        LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
+    }
+}
+
+int LZ4_compress_fast_continue(LZ4_stream_t* LZ4_stream, const char* source,
+                               char* dest, int inputSize, int maxOutputSize,
+                               int acceleration) {
+    const tableType_t tableType = byU32;
+    LZ4_stream_t_internal* const streamPtr = &LZ4_stream->internal_donotuse;
+    const char* dictEnd =
+        streamPtr->dictSize
+            ? (const char*)streamPtr->dictionary + streamPtr->dictSize
+            : NULL;
+
+    DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i, dictSize=%u)",
+             inputSize, streamPtr->dictSize);
+
+    LZ4_renormDictT(streamPtr, inputSize); /* fix index overflow */
+    if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+    if (acceleration > LZ4_ACCELERATION_MAX)
+        acceleration = LZ4_ACCELERATION_MAX;
+
+    /* invalidate tiny dictionaries */
+    if ((streamPtr->dictSize < 4) /* tiny dictionary : not enough for a hash */
+        && (dictEnd != source)    /* prefix mode */
+        && (inputSize > 0) /* tolerance : don't lose history, in case next
+                              invocation would use prefix mode */
+        && (streamPtr->dictCtx == NULL) /* usingDictCtx */
+    ) {
+        DEBUGLOG(
+            5,
+            "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small",
+            streamPtr->dictSize, streamPtr->dictionary);
+        /* remove dictionary existence from history, to employ faster prefix
+         * mode */
+        streamPtr->dictSize = 0;
+        streamPtr->dictionary = (const BYTE*)source;
+        dictEnd = source;
+    }
+
+    /* Check overlapping input/dictionary space */
+    {
+        const char* const sourceEnd = source + inputSize;
+        if ((sourceEnd > (const char*)streamPtr->dictionary) &&
+            (sourceEnd < dictEnd)) {
+            streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
+            if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
+            if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
+            streamPtr->dictionary = (const BYTE*)dictEnd - streamPtr->dictSize;
+        }
+    }
+
+    /* prefix mode : source data follows dictionary */
+    if (dictEnd == source) {
+        if ((streamPtr->dictSize < 64 KB) &&
+            (streamPtr->dictSize < streamPtr->currentOffset))
+            return LZ4_compress_generic(streamPtr, source, dest, inputSize,
+                                        NULL, maxOutputSize, limitedOutput,
+                                        tableType, withPrefix64k, dictSmall,
+                                        acceleration);
+        else
+            return LZ4_compress_generic(streamPtr, source, dest, inputSize,
+                                        NULL, maxOutputSize, limitedOutput,
+                                        tableType, withPrefix64k, noDictIssue,
+                                        acceleration);
+    }
+
+    /* external dictionary mode */
+    {
+        int result;
+        if (streamPtr->dictCtx) {
+            /* We depend here on the fact that dictCtx'es (produced by
+             * LZ4_loadDict) guarantee that their tables contain no references
+             * to offsets between dictCtx->currentOffset - 64 KB and
+             * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
+             * to use noDictIssue even when the dict isn't a full 64 KB.
+             */
+            if (inputSize > 4 KB) {
+                /* For compressing large blobs, it is faster to pay the setup
+                 * cost to copy the dictionary's tables into the active context,
+                 * so that the compression loop is only looking into one table.
+                 */
+                LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
+                result = LZ4_compress_generic(
+                    streamPtr, source, dest, inputSize, NULL, maxOutputSize,
+                    limitedOutput, tableType, usingExtDict, noDictIssue,
+                    acceleration);
+            } else {
+                result = LZ4_compress_generic(
+                    streamPtr, source, dest, inputSize, NULL, maxOutputSize,
+                    limitedOutput, tableType, usingDictCtx, noDictIssue,
+                    acceleration);
+            }
+        } else { /* small data <= 4 KB */
+            if ((streamPtr->dictSize < 64 KB) &&
+                (streamPtr->dictSize < streamPtr->currentOffset)) {
+                result = LZ4_compress_generic(
+                    streamPtr, source, dest, inputSize, NULL, maxOutputSize,
+                    limitedOutput, tableType, usingExtDict, dictSmall,
+                    acceleration);
+            } else {
+                result = LZ4_compress_generic(
+                    streamPtr, source, dest, inputSize, NULL, maxOutputSize,
+                    limitedOutput, tableType, usingExtDict, noDictIssue,
+                    acceleration);
+            }
+        }
+        streamPtr->dictionary = (const BYTE*)source;
+        streamPtr->dictSize = (U32)inputSize;
+        return result;
+    }
+}
+
+/* Hidden debug function, to force-test external dictionary mode */
+int LZ4_compress_forceExtDict(LZ4_stream_t* LZ4_dict, const char* source,
+                              char* dest, int srcSize) {
+    LZ4_stream_t_internal* const streamPtr = &LZ4_dict->internal_donotuse;
+    int result;
+
+    LZ4_renormDictT(streamPtr, srcSize);
+
+    if ((streamPtr->dictSize < 64 KB) &&
+        (streamPtr->dictSize < streamPtr->currentOffset)) {
+        result =
+            LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0,
+                                 notLimited, byU32, usingExtDict, dictSmall, 1);
+    } else {
+        result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0,
+                                      notLimited, byU32, usingExtDict,
+                                      noDictIssue, 1);
+    }
+
+    streamPtr->dictionary = (const BYTE*)source;
+    streamPtr->dictSize = (U32)srcSize;
+
+    return result;
+}
+
+/*! LZ4_saveDict() :
+ *  If previously compressed data block is not guaranteed to remain available at
+ * its memory location, save it into a safer place (char* safeBuffer). Note : no
+ * need to call LZ4_loadDict() afterwards, dictionary is immediately usable, one
+ * can therefore call LZ4_compress_fast_continue() right after.
+ * @return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if
+ * error.
+ */
+int LZ4_saveDict(LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) {
+    LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
+
+    DEBUGLOG(5, "LZ4_saveDict : dictSize=%i, safeBuffer=%p", dictSize,
+             safeBuffer);
+
+    if ((U32)dictSize > 64 KB) {
+        dictSize = 64 KB;
+    } /* useless to define a dictionary > 64 KB */
+    if ((U32)dictSize > dict->dictSize) {
+        dictSize = (int)dict->dictSize;
+    }
+
+    if (safeBuffer == NULL) assert(dictSize == 0);
+    if (dictSize > 0) {
+        const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
+        assert(dict->dictionary);
+        LZ4_memmove(safeBuffer, previousDictEnd - dictSize, (size_t)dictSize);
+    }
+
+    dict->dictionary = (const BYTE*)safeBuffer;
+    dict->dictSize = (U32)dictSize;
+
+    return dictSize;
+}
+
+/*-*******************************
+ *  Decompression functions
+ ********************************/
+
+typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
+
+#undef MIN
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+/* variant for decompress_unsafe()
+ * does not know end of input
+ * presumes input is well formed
+ * note : will consume at least one byte */
+static size_t read_long_length_no_check(const BYTE** pp) {
+    size_t b, l = 0;
+    do {
+        b = **pp;
+        (*pp)++;
+        l += b;
+    } while (b == 255);
+    DEBUGLOG(6, "read_long_length_no_check: +length=%zu using %zu input bytes",
+             l, l / 255 + 1)
+    return l;
+}
+
+/* core decoder variant for LZ4_decompress_fast*()
+ * for legacy support only : these entry points are deprecated.
+ * - Presumes input is correctly formed (no defense vs malformed inputs)
+ * - Does not know input size (presume input buffer is "large enough")
+ * - Decompress a full block (only)
+ * @return : nb of bytes reader from input.
+ * Note : this variant is not optimized for speed, just for maintenance.
+ *        the goal is to remove support of decompress_fast*() variants by v2.0
+ **/
+LZ4_FORCE_INLINE int LZ4_decompress_unsafe_generic(
+    const BYTE* const istart, BYTE* const ostart, int decompressedSize,
+
+    size_t prefixSize,
+    const BYTE* const dictStart, /* only if dict==usingExtDict */
+    const size_t dictSize        /* note: =0 if dictStart==NULL */
+) {
+    const BYTE* ip = istart;
+    BYTE* op = (BYTE*)ostart;
+    BYTE* const oend = ostart + decompressedSize;
+    const BYTE* const prefixStart = ostart - prefixSize;
+
+    DEBUGLOG(5, "LZ4_decompress_unsafe_generic");
+    if (dictStart == NULL) assert(dictSize == 0);
+
+    while (1) {
+        /* start new sequence */
+        unsigned token = *ip++;
+
+        /* literals */
+        {
+            size_t ll = token >> ML_BITS;
+            if (ll == 15) {
+                /* long literal length */
+                ll += read_long_length_no_check(&ip);
+            }
+            if ((size_t)(oend - op) < ll)
+                return -1;           /* output buffer overflow */
+            LZ4_memmove(op, ip, ll); /* support in-place decompression */
+            op += ll;
+            ip += ll;
+            if ((size_t)(oend - op) < MFLIMIT) {
+                if (op == oend) break; /* end of block */
+                DEBUGLOG(
+                    5,
+                    "invalid: literals end at distance %zi from end of block",
+                    oend - op);
+                /* incorrect end of block :
+                 * last match must start at least MFLIMIT==12 bytes before end
+                 * of output block */
+                return -1;
+            }
+        }
+
+        /* match */
+        {
+            size_t ml = token & 15;
+            size_t const offset = LZ4_readLE16(ip);
+            ip += 2;
+
+            if (ml == 15) {
+                /* long literal length */
+                ml += read_long_length_no_check(&ip);
+            }
+            ml += MINMATCH;
+
+            if ((size_t)(oend - op) < ml)
+                return -1; /* output buffer overflow */
+
+            {
+                const BYTE* match = op - offset;
+
+                /* out of range */
+                if (offset > (size_t)(op - prefixStart) + dictSize) {
+                    DEBUGLOG(6, "offset out of range");
+                    return -1;
+                }
+
+                /* check special case : extDict */
+                if (offset > (size_t)(op - prefixStart)) {
+                    /* extDict scenario */
+                    const BYTE* const dictEnd = dictStart + dictSize;
+                    const BYTE* extMatch =
+                        dictEnd - (offset - (size_t)(op - prefixStart));
+                    size_t const extml = (size_t)(dictEnd - extMatch);
+                    if (extml > ml) {
+                        /* match entirely within extDict */
+                        LZ4_memmove(op, extMatch, ml);
+                        op += ml;
+                        ml = 0;
+                    } else {
+                        /* match split between extDict & prefix */
+                        LZ4_memmove(op, extMatch, extml);
+                        op += extml;
+                        ml -= extml;
+                    }
+                    match = prefixStart;
+                }
+
+                /* match copy - slow variant, supporting overlap copy */
+                {
+                    size_t u;
+                    for (u = 0; u < ml; u++) {
+                        op[u] = match[u];
+                    }
+                }
+            }
+            op += ml;
+            if ((size_t)(oend - op) < LASTLITERALS) {
+                DEBUGLOG(
+                    5, "invalid: match ends at distance %zi from end of block",
+                    oend - op);
+                /* incorrect end of block :
+                 * last match must stop at least LASTLITERALS==5 bytes before
+                 * end of output block */
+                return -1;
+            }
+        } /* match */
+    }     /* main loop */
+    return (int)(ip - istart);
+}
+
+/* Read the variable-length literal or match length.
+ *
+ * @ip : input pointer
+ * @ilimit : position after which if length is not decoded, the input is
+ *necessarily corrupted.
+ * @initial_check - check ip >= ipmax before start of loop.  Returns
+ *initial_error if so.
+ * @error (output) - error code.  Must be set to 0 before call.
+ **/
+typedef size_t Rvl_t;
+static const Rvl_t rvl_error = (Rvl_t)(-1);
+LZ4_FORCE_INLINE Rvl_t read_variable_length(const BYTE** ip, const BYTE* ilimit,
+                                            int initial_check) {
+    Rvl_t s, length = 0;
+    assert(ip != NULL);
+    assert(*ip != NULL);
+    assert(ilimit != NULL);
+    if (initial_check && unlikely((*ip) >= ilimit)) { /* reader limit reached */
+        return rvl_error;
+    }
+    do {
+        s = **ip;
+        (*ip)++;
+        length += s;
+        if (unlikely((*ip) > ilimit)) { /* reader limit reached */
+            return rvl_error;
+        }
+        /* accumulator overflow detection (32-bit mode only) */
+        if ((sizeof(length) < 8) && unlikely(length > ((Rvl_t)(-1) / 2))) {
+            return rvl_error;
+        }
+    } while (s == 255);
+
+    return length;
+}
+
+/*! LZ4_decompress_generic() :
+ *  This generic decompression function covers all use cases.
+ *  It shall be instantiated several times, using different sets of directives.
+ *  Note that it is important for performance that this function really get
+ * inlined, in order to remove useless branches during compilation optimization.
+ */
+LZ4_FORCE_INLINE int LZ4_decompress_generic(
+    const char* const src, char* const dst, int srcSize,
+    int outputSize, /* If endOnInput==endOnInputSize, this value is
+                       `dstCapacity` */
+
+    earlyEnd_directive partialDecoding, /* full, partial */
+    dict_directive dict,         /* noDict, withPrefix64k, usingExtDict */
+    const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
+    const BYTE* const dictStart, /* only if dict==usingExtDict */
+    const size_t dictSize        /* note : = 0 if noDict */
+) {
+    if ((src == NULL) || (outputSize < 0)) {
+        return -1;
+    }
+
+    {
+        const BYTE* ip = (const BYTE*)src;
+        const BYTE* const iend = ip + srcSize;
+
+        BYTE* op = (BYTE*)dst;
+        BYTE* const oend = op + outputSize;
+        BYTE* cpy;
+
+        const BYTE* const dictEnd =
+            (dictStart == NULL) ? NULL : dictStart + dictSize;
+
+        const int checkOffset = (dictSize < (int)(64 KB));
+
+        /* Set up the "end" pointers for the shortcut. */
+        const BYTE* const shortiend = iend - 14 /*maxLL*/ - 2 /*offset*/;
+        const BYTE* const shortoend = oend - 14 /*maxLL*/ - 18 /*maxML*/;
+
+        const BYTE* match;
+        size_t offset;
+        unsigned token;
+        size_t length;
+
+        DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize,
+                 outputSize);
+
+        /* Special cases */
+        assert(lowPrefix <= op);
+        if (unlikely(outputSize == 0)) {
+            /* Empty output buffer */
+            if (partialDecoding) return 0;
+            return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
+        }
+        if (unlikely(srcSize == 0)) {
+            return -1;
+        }
+
+        /* LZ4_FAST_DEC_LOOP:
+         * designed for modern OoO performance cpus,
+         * where copying reliably 32-bytes is preferable to an unpredictable
+         * branch. note : fast loop may show a regression for some client arm
+         * chips. */
+#if LZ4_FAST_DEC_LOOP
+        if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
+            DEBUGLOG(6, "skip fast decode loop");
+            goto safe_decode;
+        }
+
+        /* Fast loop : decode sequences as long as output <
+         * oend-FASTLOOP_SAFE_DISTANCE */
+        DEBUGLOG(6, "using fast decode loop");
+        while (1) {
+            /* Main fastloop assertion: We can always wildcopy
+             * FASTLOOP_SAFE_DISTANCE
+             */
+            assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
+            assert(ip < iend);
+            token = *ip++;
+            length = token >> ML_BITS; /* literal length */
+
+            /* decode literal length */
+            if (length == RUN_MASK) {
+                size_t const addl =
+                    read_variable_length(&ip, iend - RUN_MASK, 1);
+                if (addl == rvl_error) {
+                    DEBUGLOG(6, "error reading long literal length");
+                    goto _output_error;
+                }
+                length += addl;
+                if (unlikely((uptrval)(op) + length < (uptrval)(op))) {
+                    goto _output_error;
+                } /* overflow detection */
+                if (unlikely((uptrval)(ip) + length < (uptrval)(ip))) {
+                    goto _output_error;
+                } /* overflow detection */
+
+                /* copy literals */
+                cpy = op + length;
+                LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+                if ((cpy > oend - 32) || (ip + length > iend - 32)) {
+                    goto safe_literal_copy;
+                }
+                LZ4_wildCopy32(op, ip, cpy);
+                ip += length;
+                op = cpy;
+            } else {
+                cpy = op + length;
+                DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe",
+                         (unsigned)length);
+                /* We don't need to check oend, since we check it once for each
+                 * loop below */
+                if (ip > iend - (16 + 1 /*max lit + offset + nextToken*/)) {
+                    goto safe_literal_copy;
+                }
+                /* Literals can only be <= 14, but hope compilers optimize
+                 * better when copy by a register size */
+                LZ4_memcpy(op, ip, 16);
+                ip += length;
+                op = cpy;
+            }
+
+            /* get offset */
+            offset = LZ4_readLE16(ip);
+            ip += 2;
+            DEBUGLOG(6, " offset = %zu", offset);
+            match = op - offset;
+            assert(match <= op); /* overflow check */
+
+            /* get matchlength */
+            length = token & ML_MASK;
+
+            if (length == ML_MASK) {
+                size_t const addl =
+                    read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
+                if (addl == rvl_error) {
+                    DEBUGLOG(6, "error reading long match length");
+                    goto _output_error;
+                }
+                length += addl;
+                length += MINMATCH;
+                if (unlikely((uptrval)(op) + length < (uptrval)op)) {
+                    goto _output_error;
+                } /* overflow detection */
+                if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
+                    DEBUGLOG(6, "Error : offset outside buffers");
+                    goto _output_error;
+                }
+                if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
+                    goto safe_match_copy;
+                }
+            } else {
+                length += MINMATCH;
+                if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
+                    goto safe_match_copy;
+                }
+
+                /* Fastpath check: skip LZ4_wildCopy32 when true */
+                if ((dict == withPrefix64k) || (match >= lowPrefix)) {
+                    if (offset >= 8) {
+                        assert(match >= lowPrefix);
+                        assert(match <= op);
+                        assert(op + 18 <= oend);
+
+                        LZ4_memcpy(op, match, 8);
+                        LZ4_memcpy(op + 8, match + 8, 8);
+                        LZ4_memcpy(op + 16, match + 16, 2);
+                        op += length;
+                        continue;
+                    }
+                }
+            }
+
+            if (checkOffset && (unlikely(match + dictSize < lowPrefix))) {
+                DEBUGLOG(6, "Error : pos=%zi, offset=%zi => outside buffers",
+                         op - lowPrefix, op - match);
+                goto _output_error;
+            }
+            /* match starting within external dictionary */
+            if ((dict == usingExtDict) && (match < lowPrefix)) {
+                assert(dictEnd != NULL);
+                if (unlikely(op + length > oend - LASTLITERALS)) {
+                    if (partialDecoding) {
+                        DEBUGLOG(7,
+                                 "partialDecoding: dictionary match, close to "
+                                 "dstEnd");
+                        length = MIN(length, (size_t)(oend - op));
+                    } else {
+                        DEBUGLOG(6, "end-of-block condition violated")
+                        goto _output_error;
+                    }
+                }
+
+                if (length <= (size_t)(lowPrefix - match)) {
+                    /* match fits entirely within external dictionary : just
+                     * copy */
+                    LZ4_memmove(op, dictEnd - (lowPrefix - match), length);
+                    op += length;
+                } else {
+                    /* match stretches into both external dictionary and current
+                     * block */
+                    size_t const copySize = (size_t)(lowPrefix - match);
+                    size_t const restSize = length - copySize;
+                    LZ4_memcpy(op, dictEnd - copySize, copySize);
+                    op += copySize;
+                    if (restSize >
+                        (size_t)(op - lowPrefix)) { /* overlap copy */
+                        BYTE* const endOfMatch = op + restSize;
+                        const BYTE* copyFrom = lowPrefix;
+                        while (op < endOfMatch) {
+                            *op++ = *copyFrom++;
+                        }
+                    } else {
+                        LZ4_memcpy(op, lowPrefix, restSize);
+                        op += restSize;
+                    }
+                }
+                continue;
+            }
+
+            /* copy match within block */
+            cpy = op + length;
+
+            assert((op <= oend) && (oend - op >= 32));
+            if (unlikely(offset < 16)) {
+                LZ4_memcpy_using_offset(op, match, cpy, offset);
+            } else {
+                LZ4_wildCopy32(op, match, cpy);
+            }
+
+            op = cpy; /* wildcopy correction */
+        }
+    safe_decode:
+#endif
+
+        /* Main Loop : decode remaining sequences where output <
+         * FASTLOOP_SAFE_DISTANCE */
+        DEBUGLOG(6, "using safe decode loop");
+        while (1) {
+            assert(ip < iend);
+            token = *ip++;
+            length = token >> ML_BITS; /* literal length */
+
+            /* A two-stage shortcut for the most common case:
+             * 1) If the literal length is 0..14, and there is enough space,
+             * enter the shortcut and copy 16 bytes on behalf of the literals
+             * (in the fast mode, only 8 bytes can be safely copied this way).
+             * 2) Further if the match length is 4..18, copy 18 bytes in a
+             * similar manner; but we ensure that there's enough space in the
+             * output for those 18 bytes earlier, upon entering the shortcut (in
+             * other words, there is a combined check for both stages).
+             */
+            if ((length != RUN_MASK)
+                /* strictly "less than" on input, to re-enter the loop with at
+                   least one byte */
+                && likely((ip < shortiend) & (op <= shortoend))) {
+                /* Copy the literals */
+                LZ4_memcpy(op, ip, 16);
+                op += length;
+                ip += length;
+
+                /* The second stage: prepare for match copying, decode full
+                 * info. If it doesn't work out, the info won't be wasted. */
+                length = token & ML_MASK; /* match length */
+                offset = LZ4_readLE16(ip);
+                ip += 2;
+                match = op - offset;
+                assert(match <= op); /* check overflow */
+
+                /* Do not deal with overlapping matches. */
+                if ((length != ML_MASK) && (offset >= 8) &&
+                    (dict == withPrefix64k || match >= lowPrefix)) {
+                    /* Copy the match. */
+                    LZ4_memcpy(op + 0, match + 0, 8);
+                    LZ4_memcpy(op + 8, match + 8, 8);
+                    LZ4_memcpy(op + 16, match + 16, 2);
+                    op += length + MINMATCH;
+                    /* Both stages worked, load the next token. */
+                    continue;
+                }
+
+                /* The second stage didn't work out, but the info is ready.
+                 * Propel it right to the point of match copying. */
+                goto _copy_match;
+            }
+
+            /* decode literal length */
+            if (length == RUN_MASK) {
+                size_t const addl =
+                    read_variable_length(&ip, iend - RUN_MASK, 1);
+                if (addl == rvl_error) {
+                    goto _output_error;
+                }
+                length += addl;
+                if (unlikely((uptrval)(op) + length < (uptrval)(op))) {
+                    goto _output_error;
+                } /* overflow detection */
+                if (unlikely((uptrval)(ip) + length < (uptrval)(ip))) {
+                    goto _output_error;
+                } /* overflow detection */
+            }
+
+            /* copy literals */
+            cpy = op + length;
+#if LZ4_FAST_DEC_LOOP
+        safe_literal_copy:
+#endif
+            LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+            if ((cpy > oend - MFLIMIT) ||
+                (ip + length > iend - (2 + 1 + LASTLITERALS))) {
+                /* We've either hit the input parsing restriction or the output
+                 * parsing restriction. In the normal scenario, decoding a full
+                 * block, it must be the last sequence, otherwise it's an error
+                 * (invalid input or dimensions). In partialDecoding scenario,
+                 * it's necessary to ensure there is no buffer overflow.
+                 */
+                if (partialDecoding) {
+                    /* Since we are partial decoding we may be in this block
+                     * because of the output parsing restriction, which is not
+                     * valid since the output buffer is allowed to be
+                     * undersized.
+                     */
+                    DEBUGLOG(7,
+                             "partialDecoding: copying literals, close to "
+                             "input or output end")
+                    DEBUGLOG(7, "partialDecoding: literal length = %u",
+                             (unsigned)length);
+                    DEBUGLOG(
+                        7, "partialDecoding: remaining space in dstBuffer : %i",
+                        (int)(oend - op));
+                    DEBUGLOG(
+                        7, "partialDecoding: remaining space in srcBuffer : %i",
+                        (int)(iend - ip));
+                    /* Finishing in the middle of a literals segment,
+                     * due to lack of input.
+                     */
+                    if (ip + length > iend) {
+                        length = (size_t)(iend - ip);
+                        cpy = op + length;
+                    }
+                    /* Finishing in the middle of a literals segment,
+                     * due to lack of output space.
+                     */
+                    if (cpy > oend) {
+                        cpy = oend;
+                        assert(op <= oend);
+                        length = (size_t)(oend - op);
+                    }
+                } else {
+                    /* We must be on the last sequence (or invalid) because of
+                     * the parsing limitations so check that we exactly consume
+                     * the input and don't overrun the output buffer.
+                     */
+                    if ((ip + length != iend) || (cpy > oend)) {
+                        DEBUGLOG(6, "should have been last run of literals")
+                        DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip,
+                                 (int)length, ip + length, iend);
+                        DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
+                        goto _output_error;
+                    }
+                }
+                LZ4_memmove(op, ip,
+                            length); /* supports overlapping memory regions, for
+                                        in-place decompression scenarios */
+                ip += length;
+                op += length;
+                /* Necessarily EOF when !partialDecoding.
+                 * When partialDecoding, it is EOF if we've either
+                 * filled the output buffer or
+                 * can't proceed with reading an offset for following match.
+                 */
+                if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2))) {
+                    break;
+                }
+            } else {
+                LZ4_wildCopy8(op, ip,
+                              cpy); /* can overwrite up to 8 bytes beyond cpy */
+                ip += length;
+                op = cpy;
+            }
+
+            /* get offset */
+            offset = LZ4_readLE16(ip);
+            ip += 2;
+            match = op - offset;
+
+            /* get matchlength */
+            length = token & ML_MASK;
+
+        _copy_match:
+            if (length == ML_MASK) {
+                size_t const addl =
+                    read_variable_length(&ip, iend - LASTLITERALS + 1, 0);
+                if (addl == rvl_error) {
+                    goto _output_error;
+                }
+                length += addl;
+                if (unlikely((uptrval)(op) + length < (uptrval)op))
+                    goto _output_error; /* overflow detection */
+            }
+            length += MINMATCH;
+
+#if LZ4_FAST_DEC_LOOP
+        safe_match_copy:
+#endif
+            if ((checkOffset) && (unlikely(match + dictSize < lowPrefix)))
+                goto _output_error; /* Error : offset outside buffers */
+            /* match starting within external dictionary */
+            if ((dict == usingExtDict) && (match < lowPrefix)) {
+                assert(dictEnd != NULL);
+                if (unlikely(op + length > oend - LASTLITERALS)) {
+                    if (partialDecoding)
+                        length = MIN(length, (size_t)(oend - op));
+                    else
+                        goto _output_error; /* doesn't respect parsing
+                                               restriction */
+                }
+
+                if (length <= (size_t)(lowPrefix - match)) {
+                    /* match fits entirely within external dictionary : just
+                     * copy */
+                    LZ4_memmove(op, dictEnd - (lowPrefix - match), length);
+                    op += length;
+                } else {
+                    /* match stretches into both external dictionary and current
+                     * block */
+                    size_t const copySize = (size_t)(lowPrefix - match);
+                    size_t const restSize = length - copySize;
+                    LZ4_memcpy(op, dictEnd - copySize, copySize);
+                    op += copySize;
+                    if (restSize >
+                        (size_t)(op - lowPrefix)) { /* overlap copy */
+                        BYTE* const endOfMatch = op + restSize;
+                        const BYTE* copyFrom = lowPrefix;
+                        while (op < endOfMatch) *op++ = *copyFrom++;
+                    } else {
+                        LZ4_memcpy(op, lowPrefix, restSize);
+                        op += restSize;
+                    }
+                }
+                continue;
+            }
+            assert(match >= lowPrefix);
+
+            /* copy match within block */
+            cpy = op + length;
+
+            /* partialDecoding : may end anywhere within the block */
+            assert(op <= oend);
+            if (partialDecoding && (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
+                size_t const mlen = MIN(length, (size_t)(oend - op));
+                const BYTE* const matchEnd = match + mlen;
+                BYTE* const copyEnd = op + mlen;
+                if (matchEnd > op) { /* overlap copy */
+                    while (op < copyEnd) {
+                        *op++ = *match++;
+                    }
+                } else {
+                    LZ4_memcpy(op, match, mlen);
+                }
+                op = copyEnd;
+                if (op == oend) {
+                    break;
+                }
+                continue;
+            }
+
+            if (unlikely(offset < 8)) {
+                LZ4_write32(op, 0); /* silence msan warning when offset==0 */
+                op[0] = match[0];
+                op[1] = match[1];
+                op[2] = match[2];
+                op[3] = match[3];
+                match += inc32table[offset];
+                LZ4_memcpy(op + 4, match, 4);
+                match -= dec64table[offset];
+            } else {
+                LZ4_memcpy(op, match, 8);
+                match += 8;
+            }
+            op += 8;
+
+            if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
+                BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
+                if (cpy > oend - LASTLITERALS) {
+                    goto _output_error;
+                } /* Error : last LASTLITERALS bytes must be literals
+                     (uncompressed) */
+                if (op < oCopyLimit) {
+                    LZ4_wildCopy8(op, match, oCopyLimit);
+                    match += oCopyLimit - op;
+                    op = oCopyLimit;
+                }
+                while (op < cpy) {
+                    *op++ = *match++;
+                }
+            } else {
+                LZ4_memcpy(op, match, 8);
+                if (length > 16) {
+                    LZ4_wildCopy8(op + 8, match + 8, cpy);
+                }
+            }
+            op = cpy; /* wildcopy correction */
+        }
+
+        /* end of decoding */
+        DEBUGLOG(5, "decoded %i bytes", (int)(((char*)op) - dst));
+        return (int)(((char*)op) - dst); /* Nb of output bytes decoded */
+
+        /* Overflow error detected */
+    _output_error:
+        return (int)(-(((const char*)ip) - src)) - 1;
+    }
+}
+
+/*===== Instantiate the API decoding functions. =====*/
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe(const char* source, char* dest, int compressedSize,
+                        int maxDecompressedSize) {
+    return LZ4_decompress_generic(source, dest, compressedSize,
+                                  maxDecompressedSize, decode_full_block,
+                                  noDict, (BYTE*)dest, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize,
+                                int targetOutputSize, int dstCapacity) {
+    dstCapacity = MIN(targetOutputSize, dstCapacity);
+    return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
+                                  partial_decode, noDict, (BYTE*)dst, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_fast(const char* source, char* dest, int originalSize) {
+    DEBUGLOG(5, "LZ4_decompress_fast");
+    return LZ4_decompress_unsafe_generic((const BYTE*)source, (BYTE*)dest,
+                                         originalSize, 0, NULL, 0);
+}
+
+/*===== Instantiate a few more decoding cases, used more than once. =====*/
+
+LZ4_FORCE_O2 /* Exported, an obsolete API function. */
+    int
+    LZ4_decompress_safe_withPrefix64k(const char* source, char* dest,
+                                      int compressedSize, int maxOutputSize) {
+    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+                                  decode_full_block, withPrefix64k,
+                                  (BYTE*)dest - 64 KB, NULL, 0);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_safe_partial_withPrefix64k(const char* source,
+                                                     char* dest,
+                                                     int compressedSize,
+                                                     int targetOutputSize,
+                                                     int dstCapacity) {
+    dstCapacity = MIN(targetOutputSize, dstCapacity);
+    return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
+                                  partial_decode, withPrefix64k,
+                                  (BYTE*)dest - 64 KB, NULL, 0);
+}
+
+/* Another obsolete API function, paired with the previous one. */
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest,
+                                      int originalSize) {
+    return LZ4_decompress_unsafe_generic((const BYTE*)source, (BYTE*)dest,
+                                         originalSize, 64 KB, NULL, 0);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest,
+                                               int compressedSize,
+                                               int maxOutputSize,
+                                               size_t prefixSize) {
+    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+                                  decode_full_block, noDict,
+                                  (BYTE*)dest - prefixSize, NULL, 0);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_safe_partial_withSmallPrefix(
+    const char* source, char* dest, int compressedSize, int targetOutputSize,
+    int dstCapacity, size_t prefixSize) {
+    dstCapacity = MIN(targetOutputSize, dstCapacity);
+    return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
+                                  partial_decode, noDict,
+                                  (BYTE*)dest - prefixSize, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
+                                     int compressedSize, int maxOutputSize,
+                                     const void* dictStart, size_t dictSize) {
+    DEBUGLOG(5, "LZ4_decompress_safe_forceExtDict");
+    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+                                  decode_full_block, usingExtDict, (BYTE*)dest,
+                                  (const BYTE*)dictStart, dictSize);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_partial_forceExtDict(
+    const char* source, char* dest, int compressedSize, int targetOutputSize,
+    int dstCapacity, const void* dictStart, size_t dictSize) {
+    dstCapacity = MIN(targetOutputSize, dstCapacity);
+    return LZ4_decompress_generic(source, dest, compressedSize, dstCapacity,
+                                  partial_decode, usingExtDict, (BYTE*)dest,
+                                  (const BYTE*)dictStart, dictSize);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_fast_extDict(const char* source, char* dest,
+                                       int originalSize, const void* dictStart,
+                                       size_t dictSize) {
+    return LZ4_decompress_unsafe_generic((const BYTE*)source, (BYTE*)dest,
+                                         originalSize, 0,
+                                         (const BYTE*)dictStart, dictSize);
+}
+
+/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
+ * of the dictionary is passed as prefix, and the second via dictStart +
+ * dictSize. These routines are used only once, in LZ4_decompress_*_continue().
+ */
+LZ4_FORCE_INLINE
+int LZ4_decompress_safe_doubleDict(const char* source, char* dest,
+                                   int compressedSize, int maxOutputSize,
+                                   size_t prefixSize, const void* dictStart,
+                                   size_t dictSize) {
+    return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+                                  decode_full_block, usingExtDict,
+                                  (BYTE*)dest - prefixSize,
+                                  (const BYTE*)dictStart, dictSize);
+}
+
+/*===== streaming decompression functions =====*/
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4_streamDecode_t* LZ4_createStreamDecode(void) {
+    LZ4_STATIC_ASSERT(sizeof(LZ4_streamDecode_t) >=
+                      sizeof(LZ4_streamDecode_t_internal));
+    return (LZ4_streamDecode_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
+}
+
+int LZ4_freeStreamDecode(LZ4_streamDecode_t* LZ4_stream) {
+    if (LZ4_stream == NULL) {
+        return 0;
+    } /* support free on NULL */
+    FREEMEM(LZ4_stream);
+    return 0;
+}
+#endif
+
+/*! LZ4_setStreamDecode() :
+ *  Use this function to instruct where to find the dictionary.
+ *  This function is not necessary if previous data is still available where it
+ * was decoded. Loading a size of 0 is allowed (same effect as no dictionary).
+ * @return : 1 if OK, 0 if error
+ */
+int LZ4_setStreamDecode(LZ4_streamDecode_t* LZ4_streamDecode,
+                        const char* dictionary, int dictSize) {
+    LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+    lz4sd->prefixSize = (size_t)dictSize;
+    if (dictSize) {
+        assert(dictionary != NULL);
+        lz4sd->prefixEnd = (const BYTE*)dictionary + dictSize;
+    } else {
+        lz4sd->prefixEnd = (const BYTE*)dictionary;
+    }
+    lz4sd->externalDict = NULL;
+    lz4sd->extDictSize = 0;
+    return 1;
+}
+
+/*! LZ4_decoderRingBufferSize() :
+ *  when setting a ring buffer for streaming decompression (optional scenario),
+ *  provides the minimum size of this ring buffer
+ *  to be compatible with any source respecting maxBlockSize condition.
+ *  Note : in a ring buffer scenario,
+ *  blocks are presumed decompressed next to each other.
+ *  When not enough space remains for next block (remainingSize < maxBlockSize),
+ *  decoding resumes from beginning of ring buffer.
+ * @return : minimum ring buffer size,
+ *           or 0 if there is an error (invalid maxBlockSize).
+ */
+int LZ4_decoderRingBufferSize(int maxBlockSize) {
+    if (maxBlockSize < 0) return 0;
+    if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
+    if (maxBlockSize < 16) maxBlockSize = 16;
+    return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
+}
+
+/*
+*_continue() :
+    These decoding functions allow decompression of multiple blocks in
+"streaming" mode. Previously decoded blocks must still be available at the
+memory position where they were decoded. If it's not possible, save the relevant
+part of decoded data into a safe buffer, and indicate where it stands using
+LZ4_setStreamDecode()
+*/
+LZ4_FORCE_O2
+int LZ4_decompress_safe_continue(LZ4_streamDecode_t* LZ4_streamDecode,
+                                 const char* source, char* dest,
+                                 int compressedSize, int maxOutputSize) {
+    LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+    int result;
+
+    if (lz4sd->prefixSize == 0) {
+        /* The first call, no dictionary yet. */
+        assert(lz4sd->extDictSize == 0);
+        result =
+            LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
+        if (result <= 0) return result;
+        lz4sd->prefixSize = (size_t)result;
+        lz4sd->prefixEnd = (BYTE*)dest + result;
+    } else if (lz4sd->prefixEnd == (BYTE*)dest) {
+        /* They're rolling the current segment. */
+        if (lz4sd->prefixSize >= 64 KB - 1)
+            result = LZ4_decompress_safe_withPrefix64k(
+                source, dest, compressedSize, maxOutputSize);
+        else if (lz4sd->extDictSize == 0)
+            result = LZ4_decompress_safe_withSmallPrefix(
+                source, dest, compressedSize, maxOutputSize, lz4sd->prefixSize);
+        else
+            result = LZ4_decompress_safe_doubleDict(
+                source, dest, compressedSize, maxOutputSize, lz4sd->prefixSize,
+                lz4sd->externalDict, lz4sd->extDictSize);
+        if (result <= 0) return result;
+        lz4sd->prefixSize += (size_t)result;
+        lz4sd->prefixEnd += result;
+    } else {
+        /* The buffer wraps around, or they're switching to another buffer. */
+        lz4sd->extDictSize = lz4sd->prefixSize;
+        lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+        result = LZ4_decompress_safe_forceExtDict(
+            source, dest, compressedSize, maxOutputSize, lz4sd->externalDict,
+            lz4sd->extDictSize);
+        if (result <= 0) return result;
+        lz4sd->prefixSize = (size_t)result;
+        lz4sd->prefixEnd = (BYTE*)dest + result;
+    }
+
+    return result;
+}
+
+LZ4_FORCE_O2 int LZ4_decompress_fast_continue(
+    LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest,
+    int originalSize) {
+    LZ4_streamDecode_t_internal* const lz4sd =
+        (assert(LZ4_streamDecode != NULL),
+         &LZ4_streamDecode->internal_donotuse);
+    int result;
+
+    DEBUGLOG(5, "LZ4_decompress_fast_continue (toDecodeSize=%i)", originalSize);
+    assert(originalSize >= 0);
+
+    if (lz4sd->prefixSize == 0) {
+        DEBUGLOG(5, "first invocation : no prefix nor extDict");
+        assert(lz4sd->extDictSize == 0);
+        result = LZ4_decompress_fast(source, dest, originalSize);
+        if (result <= 0) return result;
+        lz4sd->prefixSize = (size_t)originalSize;
+        lz4sd->prefixEnd = (BYTE*)dest + originalSize;
+    } else if (lz4sd->prefixEnd == (BYTE*)dest) {
+        DEBUGLOG(5, "continue using existing prefix");
+        result = LZ4_decompress_unsafe_generic(
+            (const BYTE*)source, (BYTE*)dest, originalSize, lz4sd->prefixSize,
+            lz4sd->externalDict, lz4sd->extDictSize);
+        if (result <= 0) return result;
+        lz4sd->prefixSize += (size_t)originalSize;
+        lz4sd->prefixEnd += originalSize;
+    } else {
+        DEBUGLOG(5, "prefix becomes extDict");
+        lz4sd->extDictSize = lz4sd->prefixSize;
+        lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+        result = LZ4_decompress_fast_extDict(source, dest, originalSize,
+                                             lz4sd->externalDict,
+                                             lz4sd->extDictSize);
+        if (result <= 0) return result;
+        lz4sd->prefixSize = (size_t)originalSize;
+        lz4sd->prefixEnd = (BYTE*)dest + originalSize;
+    }
+
+    return result;
+}
+
+/*
+Advanced decoding functions :
+*_usingDict() :
+    These decoding functions work the same as "_continue" ones,
+    the dictionary must be explicitly provided within parameters
+*/
+
+int LZ4_decompress_safe_usingDict(const char* source, char* dest,
+                                  int compressedSize, int maxOutputSize,
+                                  const char* dictStart, int dictSize) {
+    if (dictSize == 0)
+        return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
+    if (dictStart + dictSize == dest) {
+        if (dictSize >= 64 KB - 1) {
+            return LZ4_decompress_safe_withPrefix64k(
+                source, dest, compressedSize, maxOutputSize);
+        }
+        assert(dictSize >= 0);
+        return LZ4_decompress_safe_withSmallPrefix(
+            source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
+    }
+    assert(dictSize >= 0);
+    return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize,
+                                            maxOutputSize, dictStart,
+                                            (size_t)dictSize);
+}
+
+int LZ4_decompress_safe_partial_usingDict(const char* source, char* dest,
+                                          int compressedSize,
+                                          int targetOutputSize, int dstCapacity,
+                                          const char* dictStart, int dictSize) {
+    if (dictSize == 0)
+        return LZ4_decompress_safe_partial(source, dest, compressedSize,
+                                           targetOutputSize, dstCapacity);
+    if (dictStart + dictSize == dest) {
+        if (dictSize >= 64 KB - 1) {
+            return LZ4_decompress_safe_partial_withPrefix64k(
+                source, dest, compressedSize, targetOutputSize, dstCapacity);
+        }
+        assert(dictSize >= 0);
+        return LZ4_decompress_safe_partial_withSmallPrefix(
+            source, dest, compressedSize, targetOutputSize, dstCapacity,
+            (size_t)dictSize);
+    }
+    assert(dictSize >= 0);
+    return LZ4_decompress_safe_partial_forceExtDict(
+        source, dest, compressedSize, targetOutputSize, dstCapacity, dictStart,
+        (size_t)dictSize);
+}
+
+int LZ4_decompress_fast_usingDict(const char* source, char* dest,
+                                  int originalSize, const char* dictStart,
+                                  int dictSize) {
+    if (dictSize == 0 || dictStart + dictSize == dest)
+        return LZ4_decompress_unsafe_generic((const BYTE*)source, (BYTE*)dest,
+                                             originalSize, (size_t)dictSize,
+                                             NULL, 0);
+    assert(dictSize >= 0);
+    return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart,
+                                       (size_t)dictSize);
+}
+
+/*=*************************************************
+ *  Obsolete Functions
+ ***************************************************/
+/* obsolete compression functions */
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize,
+                               int maxOutputSize) {
+    return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
+}
+int LZ4_compress(const char* src, char* dest, int srcSize) {
+    return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
+}
+int LZ4_compress_limitedOutput_withState(void* state, const char* src,
+                                         char* dst, int srcSize, int dstSize) {
+    return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
+}
+int LZ4_compress_withState(void* state, const char* src, char* dst,
+                           int srcSize) {
+    return LZ4_compress_fast_extState(state, src, dst, srcSize,
+                                      LZ4_compressBound(srcSize), 1);
+}
+int LZ4_compress_limitedOutput_continue(LZ4_stream_t* LZ4_stream,
+                                        const char* src, char* dst, int srcSize,
+                                        int dstCapacity) {
+    return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize,
+                                      dstCapacity, 1);
+}
+int LZ4_compress_continue(LZ4_stream_t* LZ4_stream, const char* source,
+                          char* dest, int inputSize) {
+    return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize,
+                                      LZ4_compressBound(inputSize), 1);
+}
+
+/*
+These decompression functions are deprecated and should no longer be used.
+They are only provided here for compatibility with older user programs.
+- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
+- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
+*/
+int LZ4_uncompress(const char* source, char* dest, int outputSize) {
+    return LZ4_decompress_fast(source, dest, outputSize);
+}
+int LZ4_uncompress_unknownOutputSize(const char* source, char* dest, int isize,
+                                     int maxOutputSize) {
+    return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
+}
+
+/* Obsolete Streaming functions */
+
+int LZ4_sizeofStreamState(void) { return sizeof(LZ4_stream_t); }
+
+int LZ4_resetStreamState(void* state, char* inputBuffer) {
+    (void)inputBuffer;
+    LZ4_resetStream((LZ4_stream_t*)state);
+    return 0;
+}
+
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+void* LZ4_create(char* inputBuffer) {
+    (void)inputBuffer;
+    return LZ4_createStream();
+}
+#endif
+
+char* LZ4_slideInputBuffer(void* state) {
+    /* avoid const char * -> char * conversion warning */
+    return (char*)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
+}
+
+#endif /* LZ4_COMMONDEFS_ONLY */
diff --git a/cpp/src/compress/lz4.h b/cpp/src/compress/lz4.h
new file mode 100644
index 0000000..699f52a
--- /dev/null
+++ b/cpp/src/compress/lz4.h
@@ -0,0 +1,1000 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+/*
+ *  LZ4 - Fast LZ compression algorithm
+ *  Header File
+ *  Copyright (C) 2011-2020, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+    - LZ4 homepage : http://www.lz4.org
+    - LZ4 source repository : https://github.com/lz4/lz4
+*/
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#ifndef LZ4_H_2983827168210
+#define LZ4_H_2983827168210
+
+/* --- Dependency --- */
+#include <stddef.h> /* size_t */
+
+/**
+  Introduction
+
+  LZ4 is lossless compression algorithm, providing compression speed >500 MB/s
+  per core, scalable with multi-cores CPU. It features an extremely fast
+  decoder, with speed in multiple GB/s per core, typically reaching RAM speed
+  limits on multi-core systems.
+
+  The LZ4 compression library provides in-memory compression and decompression
+  functions. It gives full buffer control to user. Compression can be done in:
+    - a single step (described as Simple Functions)
+    - a single step, reusing a context (described in Advanced Functions)
+    - unbounded multiple steps (described as Streaming compression)
+
+  lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md).
+  Decompressing such a compressed block requires additional metadata.
+  Exact metadata depends on exact decompression function.
+  For the typical case of LZ4_decompress_safe(),
+  metadata includes block's compressed size, and maximum bound of decompressed
+  size. Each application is free to encode and pass such metadata in whichever
+  way it wants.
+
+  lz4.h only handle blocks, it can not generate Frames.
+
+  Blocks are different from Frames (doc/lz4_Frame_format.md).
+  Frames bundle both blocks and metadata in a specified manner.
+  Embedding metadata is required for compressed data to be self-contained and
+  portable. Frame format is delivered through a companion API, declared in
+  lz4frame.h. The `lz4` CLI can only manage frames.
+*/
+
+/*^***************************************************************
+ *  Export parameters
+ *****************************************************************/
+/*
+ *  LZ4_DLL_EXPORT :
+ *  Enable exporting of functions when building a Windows DLL
+ *  LZ4LIB_VISIBILITY :
+ *  Control library symbols visibility.
+ */
+#ifndef LZ4LIB_VISIBILITY
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#define LZ4LIB_VISIBILITY __attribute__((visibility("default")))
+#else
+#define LZ4LIB_VISIBILITY
+#endif
+#endif
+#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT == 1)
+#define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY
+#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT == 1)
+#define LZ4LIB_API                                                             \
+    __declspec(dllimport)                                                      \
+        LZ4LIB_VISIBILITY /* It isn't required but allows to generate better   \
+                             code, saving a function pointer load from the IAT \
+                             and an indirect jump.*/
+#else
+#define LZ4LIB_API LZ4LIB_VISIBILITY
+#endif
+
+/*! LZ4_FREESTANDING :
+ *  When this macro is set to 1, it enables "freestanding mode" that is
+ *  suitable for typical freestanding environment which doesn't support
+ *  standard C library.
+ *
+ *  - LZ4_FREESTANDING is a compile-time switch.
+ *  - It requires the following macros to be defined:
+ *    LZ4_memcpy, LZ4_memmove, LZ4_memset.
+ *  - It only enables LZ4/HC functions which don't use heap.
+ *    All LZ4F_* functions are not supported.
+ *  - See tests/freestanding.c to check its basic setup.
+ */
+#if defined(LZ4_FREESTANDING) && (LZ4_FREESTANDING == 1)
+#define LZ4_HEAPMODE 0
+#define LZ4HC_HEAPMODE 0
+#define LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION 1
+#if !defined(LZ4_memcpy)
+#error "LZ4_FREESTANDING requires macro 'LZ4_memcpy'."
+#endif
+#if !defined(LZ4_memset)
+#error "LZ4_FREESTANDING requires macro 'LZ4_memset'."
+#endif
+#if !defined(LZ4_memmove)
+#error "LZ4_FREESTANDING requires macro 'LZ4_memmove'."
+#endif
+#elif !defined(LZ4_FREESTANDING)
+#define LZ4_FREESTANDING 0
+#endif
+
+/*------   Version   ------*/
+#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes  */
+#define LZ4_VERSION_MINOR                                                      \
+    9                         /* for new (non-breaking) interface capabilities \
+                               */
+#define LZ4_VERSION_RELEASE 4 /* for tweaks, bug-fixes, or development */
+
+#define LZ4_VERSION_NUMBER                                     \
+    (LZ4_VERSION_MAJOR * 100 * 100 + LZ4_VERSION_MINOR * 100 + \
+     LZ4_VERSION_RELEASE)
+
+#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE
+#define LZ4_QUOTE(str) #str
+#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str)
+#define LZ4_VERSION_STRING \
+    LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) /* requires v1.7.3+ */
+
+LZ4LIB_API int LZ4_versionNumber(
+    void); /**< library version number; useful to check dll version; requires
+              v1.3.0+ */
+LZ4LIB_API const char* LZ4_versionString(
+    void); /**< library version string; useful to check dll version; requires
+              v1.7.5+ */
+
+/*-************************************
+ *  Tuning parameter
+ **************************************/
+#define LZ4_MEMORY_USAGE_MIN 10
+#define LZ4_MEMORY_USAGE_DEFAULT 14
+#define LZ4_MEMORY_USAGE_MAX 20
+
+/*!
+ * LZ4_MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 ->
+ * 64KB; 20 -> 1MB; ) Increasing memory usage improves compression ratio, at the
+ * cost of speed. Reduced memory usage may improve speed at the cost of ratio,
+ * thanks to better cache locality. Default value is 14, for 16KB, which nicely
+ * fits into Intel x86 L1 cache
+ */
+#ifndef LZ4_MEMORY_USAGE
+#define LZ4_MEMORY_USAGE LZ4_MEMORY_USAGE_DEFAULT
+#endif
+
+#if (LZ4_MEMORY_USAGE < LZ4_MEMORY_USAGE_MIN)
+#error "LZ4_MEMORY_USAGE is too small !"
+#endif
+
+#if (LZ4_MEMORY_USAGE > LZ4_MEMORY_USAGE_MAX)
+#error "LZ4_MEMORY_USAGE is too large !"
+#endif
+
+/*-************************************
+ *  Simple Functions
+ **************************************/
+/*! LZ4_compress_default() :
+ *  Compresses 'srcSize' bytes from buffer 'src'
+ *  into already allocated 'dst' buffer of size 'dstCapacity'.
+ *  Compression is guaranteed to succeed if 'dstCapacity' >=
+ * LZ4_compressBound(srcSize). It also runs faster, so it's a recommended
+ * setting. If the function cannot compress 'src' into a more limited 'dst'
+ * budget, compression stops *immediately*, and the function result is zero. In
+ * which case, 'dst' content is undefined (invalid). srcSize : max supported
+ * value is LZ4_MAX_INPUT_SIZE. dstCapacity : size of buffer 'dst' (which must
+ * be already allocated)
+ *     @return  : the number of bytes written into buffer 'dst' (necessarily <=
+ * dstCapacity) or 0 if compression fails Note : This function is protected
+ * against buffer overflow scenarios (never writes outside 'dst' buffer, nor
+ * reader outside 'source' buffer).
+ */
+LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize,
+                                    int dstCapacity);
+
+/*! LZ4_decompress_safe() :
+ * @compressedSize : is the exact complete size of the compressed block.
+ * @dstCapacity : is the size of destination buffer (which must be already
+ * allocated), is an upper bound of decompressed size.
+ * @return : the number of bytes decompressed into destination buffer
+ * (necessarily <= dstCapacity) If destination buffer is not large enough,
+ * decoding will stop and output an error code (negative value). If the source
+ * stream is detected malformed, the function will stop decoding and return a
+ * negative result. Note 1 : This function is protected against malicious data
+ * packets : it will never writes outside 'dst' buffer, nor reader outside
+ * 'source' buffer, even if the compressed block is maliciously modified to
+ * order the decoder to do these actions. In such case, the decoder stops
+ * immediately, and considers the compressed block malformed. Note 2 :
+ * compressedSize and dstCapacity must be provided to the function, the
+ * compressed block does not contain them. The implementation is free to send /
+ * store / derive this information in whichever way is most beneficial. If there
+ * is a need for a different format which bundles together both compressed data
+ * and its metadata, consider looking at lz4frame.h instead.
+ */
+LZ4LIB_API int LZ4_decompress_safe(const char* src, char* dst,
+                                   int compressedSize, int dstCapacity);
+
+/*-************************************
+ *  Advanced Functions
+ **************************************/
+#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
+#define LZ4_COMPRESSBOUND(isize)                      \
+    ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE \
+         ? 0                                          \
+         : (isize) + ((isize) / 255) + 16)
+
+/*! LZ4_compressBound() :
+    Provides the maximum size that LZ4 compression may output in a "worst case"
+   scenario (input data not compressible) This function is primarily useful for
+   memory allocation purposes (destination buffer size). Macro
+   LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack
+   memory allocation for example). Note that LZ4_compress_default() compresses
+   faster when dstCapacity is >= LZ4_compressBound(srcSize) inputSize  : max
+   supported value is LZ4_MAX_INPUT_SIZE return : maximum output size in a
+   "worst case" scenario or 0, if input size is incorrect (too large or
+   negative)
+*/
+LZ4LIB_API int LZ4_compressBound(int inputSize);
+
+/*! LZ4_compress_fast() :
+    Same as LZ4_compress_default(), but allows selection of "acceleration"
+   factor. The larger the acceleration value, the faster the algorithm, but also
+   the lesser the compression. It's a trade-off. It can be fine tuned, with each
+   successive value providing roughly +~3% to speed. An acceleration value of
+   "1" is the same as regular LZ4_compress_default() Values <= 0 will be
+   replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c). Values >
+   LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently ==
+   65537, see lz4.c).
+*/
+LZ4LIB_API int LZ4_compress_fast(const char* src, char* dst, int srcSize,
+                                 int dstCapacity, int acceleration);
+
+/*! LZ4_compress_fast_extState() :
+ *  Same as LZ4_compress_fast(), using an externally allocated memory space for
+ * its state. Use LZ4_sizeofState() to know how much memory must be allocated,
+ *  and allocate it on 8-bytes boundaries (using `malloc()` typically).
+ *  Then, provide this buffer as `void* state` to compression function.
+ */
+LZ4LIB_API int LZ4_sizeofState(void);
+LZ4LIB_API int LZ4_compress_fast_extState(void* state, const char* src,
+                                          char* dst, int srcSize,
+                                          int dstCapacity, int acceleration);
+
+/*! LZ4_compress_destSize() :
+ *  Reverse the logic : compresses as much data as possible from 'src' buffer
+ *  into already allocated buffer 'dst', of size >= 'targetDestSize'.
+ *  This function either compresses the entire 'src' content into 'dst' if it's
+ * large enough, or fill 'dst' buffer completely with as much data as possible
+ * from 'src'. note: acceleration parameter is fixed to "default".
+ *
+ * *srcSizePtr : will be modified to indicate how many bytes where reader from
+ * 'src' to fill 'dst'. New value is necessarily <= input value.
+ * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
+ *           or 0 if compression fails.
+ *
+ * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+):
+ *        the produced compressed content could, in specific circumstances,
+ *        require to be decompressed into a destination buffer larger
+ *        by at least 1 byte than the content to decompress.
+ *        If an application uses `LZ4_compress_destSize()`,
+ *        it's highly recommended to update liblz4 to v1.9.2 or better.
+ *        If this can't be done or ensured,
+ *        the receiving decompression function should provide
+ *        a dstCapacity which is > decompressedSize, by at least 1 byte.
+ *        See https://github.com/lz4/lz4/issues/859 for details
+ */
+LZ4LIB_API int LZ4_compress_destSize(const char* src, char* dst,
+                                     int* srcSizePtr, int targetDstSize);
+
+/*! LZ4_decompress_safe_partial() :
+ *  Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
+ *  into destination buffer 'dst' of size 'dstCapacity'.
+ *  Up to 'targetOutputSize' bytes will be decoded.
+ *  The function stops decoding on reaching this objective.
+ *  This can be useful to boost performance
+ *  whenever only the beginning of a block is required.
+ *
+ * @return : the number of bytes decoded in `dst` (necessarily <=
+ * targetOutputSize) If source stream is detected malformed, function returns a
+ * negative result.
+ *
+ *  Note 1 : @return can be < targetOutputSize, if compressed block contains
+ * less data.
+ *
+ *  Note 2 : targetOutputSize must be <= dstCapacity
+ *
+ *  Note 3 : this function effectively stops decoding on reaching
+ * targetOutputSize, so dstCapacity is kind of redundant. This is because in
+ * older versions of this function, decoding operation would still writer
+ * complete sequences. Therefore, there was no guarantee that it would stop
+ * writing at exactly targetOutputSize, it could writer more bytes, though only
+ * up to dstCapacity. Some "margin" used to be required for this operation to
+ * work properly. Thankfully, this is no longer necessary. The function
+ * nonetheless keeps the same signature, in an effort to preserve API
+ * compatibility.
+ *
+ *  Note 4 : If srcSize is the exact size of the block,
+ *           then targetOutputSize can be any value,
+ *           including larger than the block's decompressed size.
+ *           The function will, at most, generate block's decompressed size.
+ *
+ *  Note 5 : If srcSize is _larger_ than block's compressed size,
+ *           then targetOutputSize **MUST** be <= block's decompressed size.
+ *           Otherwise, *silent corruption will occur*.
+ */
+LZ4LIB_API int LZ4_decompress_safe_partial(const char* src, char* dst,
+                                           int srcSize, int targetOutputSize,
+                                           int dstCapacity);
+
+/*-*********************************************
+ *  Streaming Compression Functions
+ ***********************************************/
+typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
+
+/**
+ Note about RC_INVOKED
+
+ - RC_INVOKED is predefined symbol of rc.exe (the resource compiler which is
+ part of MSVC/Visual Studio).
+   https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros
+
+ - Since rc.exe is a legacy compiler, it truncates long symbol (> 30 chars)
+   and reports warning "RC4011: identifier truncated".
+
+ - To eliminate the warning, we surround long preprocessor symbol with
+   "#if !defined(RC_INVOKED) ... #endif" block that means
+   "skip this block when rc.exe is trying to reader it".
+*/
+#if !defined(                                                                              \
+    RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros \
+                 */
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4LIB_API LZ4_stream_t* LZ4_createStream(void);
+LZ4LIB_API int LZ4_freeStream(LZ4_stream_t* streamPtr);
+#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
+#endif
+
+/*! LZ4_resetStream_fast() : v1.9.0+
+ *  Use this to prepare an LZ4_stream_t for a new chain of dependent blocks
+ *  (e.g., LZ4_compress_fast_continue()).
+ *
+ *  An LZ4_stream_t must be initialized once before usage.
+ *  This is automatically done when created by LZ4_createStream().
+ *  However, should the LZ4_stream_t be simply declared on stack (for example),
+ *  it's necessary to initialize it first, using LZ4_initStream().
+ *
+ *  After init, start any new stream with LZ4_resetStream_fast().
+ *  A same LZ4_stream_t can be re-used multiple times consecutively
+ *  and compress multiple streams,
+ *  provided that it starts each new stream with LZ4_resetStream_fast().
+ *
+ *  LZ4_resetStream_fast() is much faster than LZ4_initStream(),
+ *  but is not compatible with memory regions containing garbage data.
+ *
+ *  Note: it's only useful to call LZ4_resetStream_fast()
+ *        in the context of streaming compression.
+ *        The *extState* functions perform their own resets.
+ *        Invoking LZ4_resetStream_fast() before is redundant, and even
+ * counterproductive.
+ */
+LZ4LIB_API void LZ4_resetStream_fast(LZ4_stream_t* streamPtr);
+
+/*! LZ4_loadDict() :
+ *  Use this function to reference a static dictionary into LZ4_stream_t.
+ *  The dictionary must remain available during compression.
+ *  LZ4_loadDict() triggers a reset, so any previous data will be forgotten.
+ *  The same dictionary will have to be loaded on decompression side for
+ * successful decoding. Dictionary are useful for better compression of small
+ * data (KB range). While LZ4 accept any input as dictionary, results are
+ * generally better when using Zstandard's Dictionary Builder. Loading a size of
+ * 0 is allowed, and is the same as reset.
+ * @return : loaded dictionary size, in bytes (necessarily <= 64 KB)
+ */
+LZ4LIB_API int LZ4_loadDict(LZ4_stream_t* streamPtr, const char* dictionary,
+                            int dictSize);
+
+/*! LZ4_compress_fast_continue() :
+ *  Compress 'src' content using data from previously compressed blocks, for
+ * better compression ratio. 'dst' buffer must be already allocated. If
+ * dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to
+ * succeed, and runs faster.
+ *
+ * @return : size of compressed block
+ *           or 0 if there is an error (typically, cannot fit into 'dst').
+ *
+ *  Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new
+ * block. Each block has precise boundaries. Each block must be decompressed
+ * separately, calling LZ4_decompress_*() with relevant metadata. It's not
+ * possible to append blocks together and expect a single invocation of
+ * LZ4_decompress_*() to decompress them together.
+ *
+ *  Note 2 : The previous 64KB of source data is __assumed__ to remain present,
+ * unmodified, at same address in memory !
+ *
+ *  Note 3 : When input is structured as a double-buffer, each buffer can have
+ * any size, including < 64 KB. Make sure that buffers are separated, by at
+ * least one byte. This construction ensures that each block only depends on
+ * previous block.
+ *
+ *  Note 4 : If input buffer is a ring-buffer, it can have any size, including <
+ * 64 KB.
+ *
+ *  Note 5 : After an error, the stream status is undefined (invalid), it can
+ * only be reset or freed.
+ */
+LZ4LIB_API int LZ4_compress_fast_continue(LZ4_stream_t* streamPtr,
+                                          const char* src, char* dst,
+                                          int srcSize, int dstCapacity,
+                                          int acceleration);
+
+/*! LZ4_saveDict() :
+ *  If last 64KB data cannot be guaranteed to remain available at its current
+ * memory location, save it into a safer place (char* safeBuffer). This is
+ * schematically equivalent to a memcpy() followed by LZ4_loadDict(), but is
+ * much faster, because LZ4_saveDict() doesn't need to rebuild tables.
+ * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0
+ * if error.
+ */
+LZ4LIB_API int LZ4_saveDict(LZ4_stream_t* streamPtr, char* safeBuffer,
+                            int maxDictSize);
+
+/*-**********************************************
+ *  Streaming Decompression Functions
+ *  Bufferless synchronous API
+ ************************************************/
+typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */
+
+/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() :
+ *  creation / destruction of streaming decompression tracking context.
+ *  A tracking context can be re-used multiple times.
+ */
+#if !defined(                                                                              \
+    RC_INVOKED) /* https://docs.microsoft.com/en-us/windows/win32/menurc/predefined-macros \
+                 */
+#if !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION)
+LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void);
+LZ4LIB_API int LZ4_freeStreamDecode(LZ4_streamDecode_t* LZ4_stream);
+#endif /* !defined(LZ4_STATIC_LINKING_ONLY_DISABLE_MEMORY_ALLOCATION) */
+#endif
+
+/*! LZ4_setStreamDecode() :
+ *  An LZ4_streamDecode_t context can be allocated once and re-used multiple
+ * times. Use this function to start decompression of a new stream of blocks. A
+ * dictionary can optionally be set. Use NULL or size 0 for a reset order.
+ *  Dictionary is presumed stable : it must remain accessible and unmodified
+ * during next decompression.
+ * @return : 1 if OK, 0 if error
+ */
+LZ4LIB_API int LZ4_setStreamDecode(LZ4_streamDecode_t* LZ4_streamDecode,
+                                   const char* dictionary, int dictSize);
+
+/*! LZ4_decoderRingBufferSize() : v1.8.2+
+ *  Note : in a ring buffer scenario (optional),
+ *  blocks are presumed decompressed next to each other
+ *  up to the moment there is not enough remaining space for next block
+ * (remainingSize < maxBlockSize), at which stage it resumes from beginning of
+ * ring buffer. When setting such a ring buffer for streaming decompression,
+ *  provides the minimum size of this ring buffer
+ *  to be compatible with any source respecting maxBlockSize condition.
+ * @return : minimum ring buffer size,
+ *           or 0 if there is an error (invalid maxBlockSize).
+ */
+LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize);
+#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) \
+    (65536 + 14 +                                  \
+     (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */
+
+/*! LZ4_decompress_safe_continue() :
+ *  This decoding function allows decompression of consecutive blocks in
+ * "streaming" mode. The difference with the usual independent blocks is that
+ *  new blocks are allowed to find references into former blocks.
+ *  A block is an unsplittable entity, and must be presented entirely to the
+ * decompression function. LZ4_decompress_safe_continue() only accepts one block
+ * at a time. It's modeled after `LZ4_decompress_safe()` and behaves similarly.
+ *
+ * @LZ4_streamDecode : decompression state, tracking the position in memory of
+ * past data
+ * @compressedSize : exact complete size of one compressed block.
+ * @dstCapacity : size of destination buffer (which must be already allocated),
+ *                must be an upper bound of decompressed size.
+ * @return : number of bytes decompressed into destination buffer (necessarily
+ * <= dstCapacity) If destination buffer is not large enough, decoding will stop
+ * and output an error code (negative value). If the source stream is detected
+ * malformed, the function will stop decoding and return a negative result.
+ *
+ *  The last 64KB of previously decoded data *must* remain available and
+ * unmodified at the memory position where they were previously decoded. If less
+ * than 64KB of data has been decoded, all the data must be present.
+ *
+ *  Special : if decompression side sets a ring buffer, it must respect one of
+ * the following conditions :
+ *  - Decompression buffer size is _at least_
+ * LZ4_decoderRingBufferSize(maxBlockSize). maxBlockSize is the maximum size of
+ * any single block. It can have any value > 16 bytes. In which case, encoding
+ * and decoding buffers do not need to be synchronized. Actually, data can be
+ * produced by any source compliant with LZ4 format specification, and
+ * respecting maxBlockSize.
+ *  - Synchronized mode :
+ *    Decompression buffer size is _exactly_ the same as compression buffer
+ * size, and follows exactly same update rule (block boundaries at same
+ * positions), and decoding function is provided with exact decompressed size of
+ * each block (exception for last block of the stream), _then_ decoding &
+ * encoding ring buffer can have any size, including small ones ( < 64 KB).
+ *  - Decompression buffer is larger than encoding buffer, by a minimum of
+ * maxBlockSize more bytes. In which case, encoding and decoding buffers do not
+ * need to be synchronized, and encoding ring buffer can have any size,
+ * including small ones ( < 64 KB).
+ *
+ *  Whenever these conditions are not possible,
+ *  save the last 64KB of decoded data into a safe buffer where it can't be
+ * modified during decompression, then indicate where this data is saved using
+ * LZ4_setStreamDecode(), before decompressing next block.
+ */
+LZ4LIB_API int LZ4_decompress_safe_continue(
+    LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst,
+    int srcSize, int dstCapacity);
+
+/*! LZ4_decompress_safe_usingDict() :
+ *  Works the same as
+ *  a combination of LZ4_setStreamDecode() followed by
+ * LZ4_decompress_safe_continue() However, it's stateless: it doesn't need any
+ * LZ4_streamDecode_t state. Dictionary is presumed stable : it must remain
+ * accessible and unmodified during decompression. Performance tip :
+ * Decompression speed can be substantially increased when dst == dictStart +
+ * dictSize.
+ */
+LZ4LIB_API int LZ4_decompress_safe_usingDict(const char* src, char* dst,
+                                             int srcSize, int dstCapacity,
+                                             const char* dictStart,
+                                             int dictSize);
+
+/*! LZ4_decompress_safe_partial_usingDict() :
+ *  Behaves the same as LZ4_decompress_safe_partial()
+ *  with the added ability to specify a memory segment for past data.
+ *  Performance tip : Decompression speed can be substantially increased
+ *                    when dst == dictStart + dictSize.
+ */
+LZ4LIB_API int LZ4_decompress_safe_partial_usingDict(
+    const char* src, char* dst, int compressedSize, int targetOutputSize,
+    int maxOutputSize, const char* dictStart, int dictSize);
+
+#endif /* LZ4_H_2983827168210 */
+
+/*^*************************************
+ * !!!!!!   STATIC LINKING ONLY   !!!!!!
+ ***************************************/
+
+/*-****************************************************************************
+ * Experimental section
+ *
+ * Symbols declared in this section must be considered unstable. Their
+ * signatures or semantics may change, or they may be removed altogether in the
+ * future. They are therefore only safe to depend on when the caller is
+ * statically linked against the library.
+ *
+ * To protect against unsafe usage, not only are the declarations guarded,
+ * the definitions are hidden by default
+ * when building LZ4 as a shared/dynamic library.
+ *
+ * In order to access these declarations,
+ * define LZ4_STATIC_LINKING_ONLY in your application
+ * before including LZ4's headers.
+ *
+ * In order to make their implementations accessible dynamically, you must
+ * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library.
+ ******************************************************************************/
+
+#ifdef LZ4_STATIC_LINKING_ONLY
+
+#ifndef LZ4_STATIC_3504398509
+#define LZ4_STATIC_3504398509
+
+#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS
+#define LZ4LIB_STATIC_API LZ4LIB_API
+#else
+#define LZ4LIB_STATIC_API
+#endif
+
+/*! LZ4_compress_fast_extState_fastReset() :
+ *  A variant of LZ4_compress_fast_extState().
+ *
+ *  Using this variant avoids an expensive initialization step.
+ *  It is only safe to call if the state buffer is known to be correctly
+ * initialized already (see above comment on LZ4_resetStream_fast() for a
+ * definition of "correctly initialized"). From a high level, the difference is
+ * that this function initializes the provided state with a call to something
+ * like LZ4_resetStream_fast() while LZ4_compress_fast_extState() starts with a
+ * call to LZ4_resetStream().
+ */
+LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset(
+    void* state, const char* src, char* dst, int srcSize, int dstCapacity,
+    int acceleration);
+
+/*! LZ4_attach_dictionary() :
+ *  This is an experimental API that allows
+ *  efficient use of a static dictionary many times.
+ *
+ *  Rather than re-loading the dictionary buffer into a working context before
+ *  each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
+ *  working LZ4_stream_t, this function introduces a no-copy setup mechanism,
+ *  in which the working stream references the dictionary stream in-place.
+ *
+ *  Several assumptions are made about the state of the dictionary stream.
+ *  Currently, only streams which have been prepared by LZ4_loadDict() should
+ *  be expected to work.
+ *
+ *  Alternatively, the provided dictionaryStream may be NULL,
+ *  in which case any existing dictionary stream is unset.
+ *
+ *  If a dictionary is provided, it replaces any pre-existing stream history.
+ *  The dictionary contents are the only history that can be referenced and
+ *  logically immediately precede the data compressed in the first subsequent
+ *  compression call.
+ *
+ *  The dictionary will only remain attached to the working stream through the
+ *  first compression call, at the end of which it is cleared. The dictionary
+ *  stream (and source buffer) must remain in-place / accessible / unchanged
+ *  through the completion of the first compression call on the stream.
+ */
+LZ4LIB_STATIC_API void LZ4_attach_dictionary(
+    LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream);
+
+/*! In-place compression and decompression
+ *
+ * It's possible to have input and output sharing the same buffer,
+ * for highly constrained memory environments.
+ * In both cases, it requires input to lay at the end of the buffer,
+ * and decompression to start at beginning of the buffer.
+ * Buffer size must feature some margin, hence be larger than final size.
+ *
+ * |<------------------------buffer--------------------------------->|
+ *                             |<-----------compressed data--------->|
+ * |<-----------decompressed size------------------>|
+ *                                                  |<----margin---->|
+ *
+ * This technique is more useful for decompression,
+ * since decompressed size is typically larger,
+ * and margin is short.
+ *
+ * In-place decompression will work inside any buffer
+ * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize).
+ * This presumes that decompressedSize > compressedSize.
+ * Otherwise, it means compression actually expanded data,
+ * and it would be more efficient to store such data with a flag indicating it's
+ * not compressed. This can happen when data is not compressible (already
+ * compressed, or encrypted).
+ *
+ * For in-place compression, margin is larger, as it must be able to cope with
+ * both history preservation, requiring input data to remain unmodified up to
+ * LZ4_DISTANCE_MAX, and data expansion, which can happen when input is not
+ * compressible. As a consequence, buffer size requirements are much higher, and
+ * memory savings offered by in-place compression are more limited.
+ *
+ * There are ways to limit this cost for compression :
+ * - Reduce history size, by modifying LZ4_DISTANCE_MAX.
+ *   Note that it is a compile-time constant, so all compressions will apply
+ * this limit. Lower values will reduce compression ratio, except when
+ * input_size < LZ4_DISTANCE_MAX, so it's a reasonable trick when inputs are
+ * known to be small.
+ * - Require the compressor to deliver a "maximum compressed size".
+ *   This is the `dstCapacity` parameter in `LZ4_compress*()`.
+ *   When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can
+ * fail, in which case, the return code will be 0 (zero). The caller must be
+ * ready for these cases to happen, and typically design a backup scheme to send
+ * data uncompressed. The combination of both techniques can significantly
+ * reduce the amount of margin required for in-place compression.
+ *
+ * In-place compression can work in any buffer
+ * which size is >= (maxCompressedSize)
+ * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed
+ * compression success. LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both
+ * maxCompressedSize and LZ4_DISTANCE_MAX, so it's possible to reduce memory
+ * requirements by playing with them.
+ */
+
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) \
+    (((compressedSize) >> 8) + 32)
+#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize)           \
+    ((decompressedSize) +                                              \
+     LZ4_DECOMPRESS_INPLACE_MARGIN(                                    \
+         decompressedSize)) /**< note: presumes that compressedSize <  \
+                               decompressedSize. note2: margin is      \
+                               overestimated a bit, since it could use \
+                               compressedSize instead                  \
+                             */
+
+#ifndef LZ4_DISTANCE_MAX       /* history window size; can be user-defined at \
+                                  compile time */
+#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+#endif
+
+#define LZ4_COMPRESS_INPLACE_MARGIN                                       \
+    (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by \
+                               srcSize when it's smaller */
+#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize)                   \
+    ((maxCompressedSize) +                                                    \
+     LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally         \
+                                     LZ4_COMPRESSBOUND(inputSize), but can be \
+                                     set to any lower value, with the risk    \
+                                     that compression can fail (return code   \
+                                     0(zero))                                 \
+                                   */
+
+#endif /* LZ4_STATIC_3504398509 */
+#endif /* LZ4_STATIC_LINKING_ONLY */
+
+#ifndef LZ4_H_98237428734687
+#define LZ4_H_98237428734687
+
+/*-************************************************************
+ *  Private Definitions
+ **************************************************************
+ * Do not use these definitions directly.
+ * They are only exposed to allow static allocation of `LZ4_stream_t` and
+ *`LZ4_streamDecode_t`. Accessing members will expose user code to API and/or
+ *ABI break in future versions of the library.
+ **************************************************************/
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE - 2)
+#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+#define LZ4_HASH_SIZE_U32 \
+    (1 << LZ4_HASHLOG) /* required as macro for static allocation */
+
+#if defined(__cplusplus) || \
+    (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#include <stdint.h>
+typedef int8_t LZ4_i8;
+typedef uint8_t LZ4_byte;
+typedef uint16_t LZ4_u16;
+typedef uint32_t LZ4_u32;
+#else
+typedef signed char LZ4_i8;
+typedef unsigned char LZ4_byte;
+typedef unsigned short LZ4_u16;
+typedef unsigned int LZ4_u32;
+#endif
+
+/*! LZ4_stream_t :
+ *  Never ever use below internal definitions directly !
+ *  These definitions are not API/ABI safe, and may change in future versions.
+ *  If you need static allocation, declare or allocate an LZ4_stream_t object.
+ **/
+
+typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
+struct LZ4_stream_t_internal {
+    LZ4_u32 hashTable[LZ4_HASH_SIZE_U32];
+    const LZ4_byte* dictionary;
+    const LZ4_stream_t_internal* dictCtx;
+    LZ4_u32 currentOffset;
+    LZ4_u32 tableType;
+    LZ4_u32 dictSize;
+    /* Implicit padding to ensure structure is aligned */
+};
+
+#define LZ4_STREAM_MINSIZE       \
+    ((1UL << LZ4_MEMORY_USAGE) + \
+     32) /* static size, for inter-version compatibility */
+union LZ4_stream_u {
+    char minStateSize[LZ4_STREAM_MINSIZE];
+    LZ4_stream_t_internal internal_donotuse;
+}; /* previously typedef'd to LZ4_stream_t */
+
+/*! LZ4_initStream() : v1.9.0+
+ *  An LZ4_stream_t structure must be initialized at least once.
+ *  This is automatically done when invoking LZ4_createStream(),
+ *  but it's not when the structure is simply declared on stack (for example).
+ *
+ *  Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t.
+ *  It can also initialize any arbitrary buffer of sufficient size,
+ *  and will @return a pointer of proper type upon initialization.
+ *
+ *  Note : initialization fails if size and alignment conditions are not
+ *respected. In which case, the function will @return NULL. Note2: An
+ *LZ4_stream_t structure guarantees correct alignment and size. Note3: Before
+ *v1.9.0, use LZ4_resetStream() instead
+ **/
+LZ4LIB_API LZ4_stream_t* LZ4_initStream(void* buffer, size_t size);
+
+/*! LZ4_streamDecode_t :
+ *  Never ever use below internal definitions directly !
+ *  These definitions are not API/ABI safe, and may change in future versions.
+ *  If you need static allocation, declare or allocate an LZ4_streamDecode_t
+ *object.
+ **/
+typedef struct {
+    const LZ4_byte* externalDict;
+    const LZ4_byte* prefixEnd;
+    size_t extDictSize;
+    size_t prefixSize;
+} LZ4_streamDecode_t_internal;
+
+#define LZ4_STREAMDECODE_MINSIZE 32
+union LZ4_streamDecode_u {
+    char minStateSize[LZ4_STREAMDECODE_MINSIZE];
+    LZ4_streamDecode_t_internal internal_donotuse;
+}; /* previously typedef'd to LZ4_streamDecode_t */
+
+/*-************************************
+ *  Obsolete Functions
+ **************************************/
+
+/*! Deprecation warnings
+ *
+ *  Deprecated functions make the compiler generate a warning when invoked.
+ *  This is meant to invite users to update their source code.
+ *  Should deprecation warnings be a problem, it is generally possible to
+ * disable them, typically with -Wno-deprecated-declarations for gcc or
+ * _CRT_SECURE_NO_WARNINGS in Visual.
+ *
+ *  Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS
+ *  before including the header file.
+ */
+#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
+#define LZ4_DEPRECATED(message) /* disable deprecation warnings */
+#else
+#if defined(__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+#define LZ4_DEPRECATED(message) [[deprecated(message)]]
+#elif defined(_MSC_VER)
+#define LZ4_DEPRECATED(message) __declspec(deprecated(message))
+#elif defined(__clang__) || \
+    (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
+#define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
+#elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
+#define LZ4_DEPRECATED(message) __attribute__((deprecated))
+#else
+#pragma message( \
+    "WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
+#define LZ4_DEPRECATED(message) /* disabled */
+#endif
+#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
+
+/*! Obsolete compression functions (since v1.7.3) */
+LZ4_DEPRECATED("use LZ4_compress_default() instead")
+LZ4LIB_API int LZ4_compress(const char* src, char* dest, int srcSize);
+LZ4_DEPRECATED("use LZ4_compress_default() instead")
+LZ4LIB_API int LZ4_compress_limitedOutput(const char* src, char* dest,
+                                          int srcSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead")
+LZ4LIB_API int LZ4_compress_withState(void* state, const char* source,
+                                      char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead")
+LZ4LIB_API
+int LZ4_compress_limitedOutput_withState(void* state, const char* source,
+                                         char* dest, int inputSize,
+                                         int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead")
+LZ4LIB_API
+int LZ4_compress_continue(LZ4_stream_t* LZ4_streamPtr, const char* source,
+                          char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead")
+LZ4LIB_API
+int LZ4_compress_limitedOutput_continue(LZ4_stream_t* LZ4_streamPtr,
+                                        const char* source, char* dest,
+                                        int inputSize, int maxOutputSize);
+
+/*! Obsolete decompression functions (since v1.8.0) */
+LZ4_DEPRECATED("use LZ4_decompress_fast() instead")
+LZ4LIB_API int LZ4_uncompress(const char* source, char* dest, int outputSize);
+LZ4_DEPRECATED("use LZ4_decompress_safe() instead")
+LZ4LIB_API int LZ4_uncompress_unknownOutputSize(const char* source, char* dest,
+                                                int isize, int maxOutputSize);
+
+/* Obsolete streaming functions (since v1.7.0)
+ * degraded functionality; do not use!
+ *
+ * In order to perform streaming compression, these functions depended on data
+ * that is no longer tracked in the state. They have been preserved as well as
+ * possible: using them will still produce a correct output. However, they don't
+ * actually retain any history between compression calls. The compression ratio
+ * achieved will therefore be no better than compressing each chunk
+ * independently.
+ */
+LZ4_DEPRECATED("Use LZ4_createStream() instead")
+LZ4LIB_API void* LZ4_create(char* inputBuffer);
+LZ4_DEPRECATED("Use LZ4_createStream() instead")
+LZ4LIB_API int LZ4_sizeofStreamState(void);
+LZ4_DEPRECATED("Use LZ4_resetStream() instead")
+LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer);
+LZ4_DEPRECATED("Use LZ4_saveDict() instead")
+LZ4LIB_API char* LZ4_slideInputBuffer(void* state);
+
+/*! Obsolete streaming decoding functions (since v1.7.0) */
+LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead")
+LZ4LIB_API
+int LZ4_decompress_safe_withPrefix64k(const char* src, char* dst,
+                                      int compressedSize, int maxDstSize);
+LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead")
+LZ4LIB_API int LZ4_decompress_fast_withPrefix64k(const char* src, char* dst,
+                                                 int originalSize);
+
+/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) :
+ *  These functions used to be faster than LZ4_decompress_safe(),
+ *  but this is no longer the case. They are now slower.
+ *  This is because LZ4_decompress_fast() doesn't know the input size,
+ *  and therefore must progress more cautiously into the input buffer to not
+ * reader beyond the end of block. On top of that `LZ4_decompress_fast()` is not
+ * protected vs malformed or malicious inputs, making it a security liability.
+ *  As a consequence, LZ4_decompress_fast() is strongly discouraged, and
+ * deprecated.
+ *
+ *  The last remaining LZ4_decompress_fast() specificity is that
+ *  it can decompress a block without knowing its compressed size.
+ *  Such functionality can be achieved in a more secure manner
+ *  by employing LZ4_decompress_safe_partial().
+ *
+ *  Parameters:
+ *  originalSize : is the uncompressed size to regenerate.
+ *                 `dst` must be already allocated, its size must be >=
+ * 'originalSize' bytes.
+ * @return : number of bytes reader from source buffer (== compressed size).
+ *           The function expects to finish at block's end exactly.
+ *           If the source stream is detected malformed, the function stops
+ * decoding and returns a negative result. note : LZ4_decompress_fast*()
+ * requires originalSize. Thanks to this information, it never writes past the
+ * output buffer. However, since it doesn't know its 'src' size, it may reader
+ * an unknown amount of input, past input buffer bounds. Also, since match
+ * offsets are not validated, match reads from 'src' may underflow too. These
+ * issues never happen if input (compressed) data is correct. But they may
+ * happen if input data is invalid (error or intentional tampering). As a
+ * consequence, use these functions in trusted environments with trusted data
+ * **only**.
+ */
+LZ4_DEPRECATED(
+    "This function is deprecated and unsafe. Consider using "
+    "LZ4_decompress_safe() instead")
+LZ4LIB_API int LZ4_decompress_fast(const char* src, char* dst,
+                                   int originalSize);
+LZ4_DEPRECATED(
+    "This function is deprecated and unsafe. Consider using "
+    "LZ4_decompress_safe_continue() instead")
+LZ4LIB_API int LZ4_decompress_fast_continue(
+    LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst,
+    int originalSize);
+LZ4_DEPRECATED(
+    "This function is deprecated and unsafe. Consider using "
+    "LZ4_decompress_safe_usingDict() instead")
+LZ4LIB_API int LZ4_decompress_fast_usingDict(const char* src, char* dst,
+                                             int originalSize,
+                                             const char* dictStart,
+                                             int dictSize);
+
+/*! LZ4_resetStream() :
+ *  An LZ4_stream_t structure must be initialized at least once.
+ *  This is done with LZ4_initStream(), or LZ4_resetStream().
+ *  Consider switching to LZ4_initStream(),
+ *  invoking LZ4_resetStream() will trigger deprecation warnings in the future.
+ */
+LZ4LIB_API void LZ4_resetStream(LZ4_stream_t* streamPtr);
+
+#endif /* LZ4_H_98237428734687 */
+
+#if defined(__cplusplus)
+}
+#endif
diff --git a/cpp/src/compress/lzo_compressor.h b/cpp/src/compress/lzo_compressor.h
index 9551003..f80b611 100644
--- a/cpp/src/compress/lzo_compressor.h
+++ b/cpp/src/compress/lzo_compressor.h
@@ -38,8 +38,8 @@
 
 class LZOCompressor : public Compressor {
    public:
-    LZOCompressor() : compressed_buf_(nullptr), uncompressed_buf_(nullptr) {};
-    ~LZOCompressor() {};
+    LZOCompressor() : compressed_buf_(nullptr), uncompressed_buf_(nullptr){};
+    ~LZOCompressor(){};
     // @for_compress
     //  true  - for compressiom
     //  false - for uncompression
diff --git a/cpp/src/compress/snappy_compressor.h b/cpp/src/compress/snappy_compressor.h
index 86b3bca..af0e86b 100644
--- a/cpp/src/compress/snappy_compressor.h
+++ b/cpp/src/compress/snappy_compressor.h
@@ -36,9 +36,8 @@
 
 class SnappyCompressor : public Compressor {
    public:
-    SnappyCompressor()
-        : compressed_buf_(nullptr), uncompressed_buf_(nullptr) {};
-    ~SnappyCompressor() {};
+    SnappyCompressor() : compressed_buf_(nullptr), uncompressed_buf_(nullptr){};
+    ~SnappyCompressor(){};
     // @for_compress
     //  true  - for compressiom
     //  false - for uncompression
diff --git a/cpp/src/cwrapper/TsFile-cwrapper.cc b/cpp/src/cwrapper/TsFile-cwrapper.cc
new file mode 100644
index 0000000..d70c519
--- /dev/null
+++ b/cpp/src/cwrapper/TsFile-cwrapper.cc
@@ -0,0 +1,854 @@
+/*

+ * Licensed to the Apache Software Foundation (ASF) under one

+ * or more contributor license agreements.  See the NOTICE file

+ * distributed with this work for additional information

+ * regarding copyright ownership.  The ASF licenses this file

+ * to you under the Apache License, Version 2.0 (the

+ * License); you may not use this file except in compliance

+ * with the License.  You may obtain a copy of the License at

+ *

+ *     http://www.apache.org/licenses/LICENSE-2.0

+ *

+ * Unless required by applicable law or agreed to in writing,

+ * software distributed under the License is distributed on an

+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY

+ * KIND, either express or implied.  See the License for the

+ * specific language governing permissions and limitations

+ * under the License.

+ */

+

+#include "cwrapper/TsFile-cwrapper.h"

+

+#include <iomanip>

+

+#include "common/global.h"

+#include "reader/expression.h"

+#include "reader/filter/and_filter.h"

+#include "reader/filter/filter.h"

+#include "reader/filter/time_filter.h"

+#include "reader/filter/time_operator.h"

+#include "reader/query_data_set.h"

+#include "reader/tsfile_reader.h"

+#include "utils/errno_define.h"

+#include "writer/tsfile_writer.h"

+

+static bool is_init = false;

+

+#define INSERT_DATA_INTO_RECORD(record, column, value)                        \

+    do {                                                                      \

+        DataPoint point(column, value);                                       \

+        if (record->points_.size() + 1 > record->points_.capacity())          \

+            return E_BUF_NOT_ENOUGH;                                          \

+        record->points_.push_back(point);                                     \

+        return E_OK;                                                          \

+    } while (0)

+

+#define CONSTRUCT_EXP_INTERNAL(exp, column_name) \

+    do {                                         \

+        exp.column_name = column_name;           \

+        exp.operatype = oper;                    \

+        exp.children_length = 0;                 \

+    } while (0)

+

+#define INSERT_DATA_TABLET_STEP                                             \

+    do {                                                                    \

+        for (int i = 0; i < tablet->column_num; i++) {                      \

+            if (strcmp(tablet->column_schema[i]->name, column_name) == 0) { \

+                column_id = i;                                              \

+                break;                                                      \

+            }                                                               \

+        }                                                                   \

+        if (column_id == -1) {                                              \

+            return tablet;                                                  \

+        }                                                                   \

+        if (tablet->cur_num + 1 > tablet->max_capacity) {                   \

+            return tablet;                                                  \

+        }                                                                   \

+        tablet->times[line_id] = timestamp;                                 \

+    } while (0)

+#define TSDataType common::TSDataType

+#define TSEncoding common::TSEncoding

+#define CompressionType common::CompressionType

+#define TsFileReader storage::TsFileReader

+#define TsFileWriter storage::TsFileWriter

+#define E_OK common::E_OK

+#define TsRecord storage::TsRecord

+#define DataPoint storage::DataPoint

+#define E_BUF_NOT_ENOUGH common::E_BUF_NOT_ENOUGH

+

+TSDataType get_datatype(SchemaInfo schema_info) {

+    if (schema_info & TS_TYPE_BOOLEAN) {

+        return TSDataType::BOOLEAN;

+    } else if (schema_info & TS_TYPE_DOUBLE) {

+        return TSDataType::DOUBLE;

+    } else if (schema_info & TS_TYPE_FLOAT) {

+        return TSDataType::FLOAT;

+    } else if (schema_info & TS_TYPE_INT32) {

+        return TSDataType::INT32;

+    } else if (schema_info & TS_TYPE_INT64) {

+        return TSDataType::INT64;

+    } else if (schema_info & TS_TYPE_TEXT) {

+        return TSDataType::TEXT;

+    }

+    return TSDataType::INVALID_DATATYPE;

+}

+

+TSEncoding get_data_encoding(SchemaInfo schema_info) {

+    if (schema_info & TS_ENCODING_PLAIN) {

+        return TSEncoding::PLAIN;

+    } else if (schema_info & TS_ENCODING_TS_DIFF) {

+        return TSEncoding::DIFF;

+    } else if (schema_info & TS_ENCODING_BITMAP) {

+        return TSEncoding::BITMAP;

+    } else if (schema_info & TS_ENCODING_GORILLA) {

+        return TSEncoding::GORILLA;

+    }

+    return TSEncoding::PLAIN;

+}

+

+CompressionType get_data_compression(SchemaInfo schema_info) {

+    if (schema_info & TS_COMPRESS_UNCOMPRESS) {

+        return CompressionType::UNCOMPRESSED;

+    } else if (schema_info & TS_COMPRESS_LZ4) {

+        return CompressionType::LZ4;

+    }

+    return CompressionType::UNCOMPRESSED;

+}

+

+SchemaInfo get_schema_info(TSDataType type) {

+    switch (type) {

+        case TSDataType::BOOLEAN:

+            return TS_TYPE_BOOLEAN;

+        case TSDataType::DOUBLE:

+            return TS_TYPE_DOUBLE;

+        case TSDataType::FLOAT:

+            return TS_TYPE_FLOAT;

+        case TSDataType::INT32:

+            return TS_TYPE_INT32;

+        case TSDataType::INT64:

+            return TS_TYPE_INT64;

+        case TSDataType::TEXT:

+            return TS_TYPE_TEXT;

+        default:

+            return 0;

+    }

+}

+

+void init_tsfile_config() {

+    if (!is_init) {

+        common::init_config_value();

+        is_init = true;

+    }

+}

+

+CTsFileReader ts_reader_open(const char* pathname, ErrorCode* err_code) {

+    init_tsfile_config();

+    TsFileReader* reader = new TsFileReader();

+    int ret = reader->open(pathname);

+    if (ret != E_OK) {

+        std::cout << "open file failed" << std::endl;

+        *err_code = ret;

+        delete reader;

+        return nullptr;

+    }

+    return reader;

+}

+

+CTsFileWriter ts_writer_open(const char* pathname, ErrorCode* err_code) {

+    init_tsfile_config();

+    TsFileWriter* writer = new TsFileWriter();

+    int flags = O_WRONLY | O_CREAT | O_TRUNC;

+#ifdef _WIN32

+    flags |= O_BINARY;

+#endif

+    int ret = writer->open(pathname, flags, 0644);

+    if (ret != E_OK) {

+        delete writer;

+        *err_code = ret;

+        return nullptr;

+    }

+    return writer;

+}

+

+CTsFileWriter ts_writer_open_flag(const char* pathname, mode_t flag,

+                                  ErrorCode* err_code) {

+    init_tsfile_config();

+    TsFileWriter* writer = new TsFileWriter();

+    int ret = writer->open(pathname, O_CREAT | O_RDWR, flag);

+    if (ret != E_OK) {

+        delete writer;

+        *err_code = ret;

+        return nullptr;

+    }

+    return writer;

+}

+

+CTsFileWriter ts_writer_open_conf(const char* pathname, int flag,

+                                  ErrorCode* err_code, TsFileConf* conf) {

+    *err_code = common::E_INVALID_ARG;

+    return nullptr;

+}

+

+ErrorCode ts_writer_close(CTsFileWriter writer) {

+    TsFileWriter* w = (TsFileWriter*)writer;

+    int ret = w->close();

+    delete w;

+    return ret;

+}

+

+ErrorCode ts_reader_close(CTsFileReader reader) {

+    TsFileReader* ts_reader = (TsFileReader*)reader;

+    delete ts_reader;

+    return E_OK;

+}

+

+ErrorCode tsfile_register_table_column(CTsFileWriter writer,

+                                       const char* table_name,

+                                       ColumnSchema* schema) {

+    TsFileWriter* w = (TsFileWriter*)writer;

+    int ret = w->register_timeseries(table_name, schema->name,

+                                     get_datatype(schema->column_def),

+                                     get_data_encoding(schema->column_def),

+                                     get_data_compression(schema->column_def));

+    return ret;

+}

+

+ErrorCode tsfile_register_table(CTsFileWriter writer,

+                                TableSchema* table_schema) {

+    TsFileWriter* w = (TsFileWriter*)writer;

+    for (int column_id = 0; column_id < table_schema->column_num; column_id++) {

+        ColumnSchema* schema = table_schema->column_schema[column_id];

+        ErrorCode ret =

+            w->register_timeseries(table_schema->table_name, schema->name,

+                                   get_datatype(schema->column_def),

+                                   get_data_encoding(schema->column_def),

+                                   get_data_compression(schema->column_def));

+        if (ret != E_OK) {

+            return ret;

+        }

+    }

+    return E_OK;

+}

+

+TsFileRowData create_tsfile_row(const char* table_name, int64_t timestamp,

+                                int column_length) {

+    TsRecord* record = new TsRecord(timestamp, table_name, column_length);

+    return record;

+}

+

+Tablet* create_tablet(const char* table_name, int max_capacity) {

+    Tablet* tablet = new Tablet();

+    tablet->table_name = strdup(table_name);

+    tablet->max_capacity = max_capacity;

+    tablet->times = (timestamp*)malloc(max_capacity * sizeof(int64_t));

+    return tablet;

+}

+

+int get_size_from_schema_info(SchemaInfo schema_info) {

+    if (schema_info & TS_TYPE_BOOLEAN) {

+        return sizeof(bool);

+    } else if (schema_info & TS_TYPE_DOUBLE) {

+        return sizeof(double);

+    } else if (schema_info & TS_TYPE_FLOAT) {

+        return sizeof(float);

+    } else if (schema_info & TS_TYPE_INT32) {

+        return sizeof(int32_t);

+    } else if (schema_info & TS_TYPE_INT64) {

+        return sizeof(int64_t);

+    } else if (schema_info & TS_TYPE_TEXT) {

+        return sizeof(char*);

+    }

+    return 0;

+}

+

+Tablet* add_column_to_tablet(Tablet* tablet, char* column_name,

+                             SchemaInfo column_def) {

+    tablet->column_num++;

+    tablet->column_schema = (ColumnSchema**)realloc(

+        tablet->column_schema, tablet->column_num * sizeof(ColumnSchema*));

+    tablet->bitmap =

+        (bool**)realloc(tablet->bitmap, tablet->column_num * sizeof(bool*));

+    tablet->bitmap[tablet->column_num - 1] =

+        (bool*)malloc(tablet->max_capacity * sizeof(bool));

+    std::memset(tablet->bitmap[tablet->column_num - 1], 0,

+                tablet->max_capacity * sizeof(bool));

+    ColumnSchema* schema = new ColumnSchema();

+    schema->name = column_name;

+    schema->column_def = column_def;

+    tablet->column_schema[tablet->column_num - 1] = schema;

+    tablet->value =

+        (void**)realloc(tablet->value, tablet->column_num * sizeof(void*));

+    tablet->value[tablet->column_num - 1] =

+        (void*)malloc(tablet->max_capacity * sizeof(int64_t));

+    return tablet;

+}

+

+Tablet* add_data_to_tablet_i64(Tablet* tablet, int line_id, int64_t timestamp,

+                               const char* column_name, int64_t value) {

+    int column_id = -1;

+    INSERT_DATA_TABLET_STEP;

+    memcpy((int64_t*)tablet->value[column_id] + line_id, &value,

+           sizeof(int64_t));

+    tablet->bitmap[column_id][line_id] = true;

+    line_id > tablet->cur_num ? tablet->cur_num = line_id : 0;

+    return tablet;

+}

+

+Tablet* add_data_to_tablet_i32(Tablet* tablet, int line_id, int64_t timestamp,

+                               const char* column_name, int32_t value) {

+    int column_id = -1;

+    INSERT_DATA_TABLET_STEP;

+    memcpy((int32_t*)tablet->value[column_id] + line_id, &value,

+           sizeof(int32_t));

+    tablet->bitmap[column_id][line_id] = true;

+    line_id > tablet->cur_num ? tablet->cur_num = line_id : 0;

+    return tablet;

+}

+

+Tablet* add_data_to_tablet_float(Tablet* tablet, int line_id, int64_t timestamp,

+                                 const char* column_name, float value) {

+    int column_id = -1;

+    INSERT_DATA_TABLET_STEP;

+    memcpy((float*)tablet->value[column_id] + line_id, &value, sizeof(float));

+    tablet->bitmap[column_id][line_id] = true;

+    line_id > tablet->cur_num ? tablet->cur_num = line_id : 0;

+    return tablet;

+}

+

+Tablet* add_data_to_tablet_double(Tablet* tablet, int line_id,

+                                  int64_t timestamp, const char* column_name,

+                                  double value) {

+    int column_id = -1;

+    INSERT_DATA_TABLET_STEP;

+    memcpy((double*)tablet->value[column_id] + line_id, &value, sizeof(double));

+    tablet->bitmap[column_id][line_id] = true;

+    line_id > tablet->cur_num ? tablet->cur_num = line_id : 0;

+    return tablet;

+}

+

+Tablet* add_data_to_tablet_bool(Tablet* tablet, int line_id, int64_t timestamp,

+                                const char* column_name, bool value) {

+    int column_id = -1;

+    INSERT_DATA_TABLET_STEP;

+    memcpy((bool*)tablet->value[column_id] + line_id, &value, sizeof(bool));

+    tablet->bitmap[column_id][line_id] = true;

+    line_id > tablet->cur_num ? tablet->cur_num = line_id : 0;

+    return tablet;

+}

+

+Tablet* add_data_to_tablet_char(Tablet* tablet, int line_id, int64_t timestamp,

+                                const char* column_name, char* value) {

+    int column_id = -1;

+    INSERT_DATA_TABLET_STEP;

+    memcpy((char*)tablet->value[column_id] + line_id, &value, sizeof(char*));

+    tablet->bitmap[column_id][line_id] = true;

+    line_id > tablet->cur_num ? tablet->cur_num = line_id : 0;

+    return tablet;

+}

+

+// Tablet* add_null_to_tablet(Tablet* tablet, int line_id, int64_t timestamp,

+// const char* column_num) {

+//     int column_id = -1;

+//     for (int i = 0; i < tablet->column_num; i++) {

+//         if (strcmp(tablet->column_schema[i]->name, column_num) == 0) {

+//             column_id = i;

+//             break;

+//         }

+//     }

+//     if (column_id == -1) {

+//         return tablet;

+//     }

+

+//     if (tablet->cur_num + 1 > tablet->max_capacity) {

+//         return tablet;

+//     }

+//     tablet->times[line_id] = timestamp;

+//     memcpy((int64_t*)tablet->value[column_id] + line_id, 0, sizeof(int64_t));

+//     line_id > tablet->cur_num ? tablet->cur_num = line_id : 0;

+//     return tablet;

+// }

+

+ErrorCode destory_tablet(Tablet* tablet) {

+    free(tablet->table_name);

+    tablet->table_name = nullptr;

+    free(tablet->times);

+    tablet->times = nullptr;

+    for (int i = 0; i < tablet->column_num; i++) {

+        free(tablet->column_schema[i]);

+        free(tablet->value[i]);

+        free(tablet->bitmap[i]);

+    }

+    free(tablet->bitmap);

+    free(tablet->column_schema);

+    free(tablet->value);

+    delete tablet;

+    return E_OK;

+}

+

+ErrorCode insert_data_into_tsfile_row_int32(TsFileRowData data, char* columname,

+                                            int32_t value) {

+    TsRecord* record = (TsRecord*)data;

+    INSERT_DATA_INTO_RECORD(record, columname, value);

+}

+

+ErrorCode insert_data_into_tsfile_row_boolean(TsFileRowData data,

+                                              char* columname, bool value) {

+    TsRecord* record = (TsRecord*)data;

+    INSERT_DATA_INTO_RECORD(record, columname, value);

+}

+

+ErrorCode insert_data_into_tsfile_row_int64(TsFileRowData data, char* columname,

+                                            int64_t value) {

+    TsRecord* record = (TsRecord*)data;

+    INSERT_DATA_INTO_RECORD(record, columname, value);

+}

+

+ErrorCode insert_data_into_tsfile_row_float(TsFileRowData data, char* columname,

+                                            float value) {

+    TsRecord* record = (TsRecord*)data;

+    INSERT_DATA_INTO_RECORD(record, columname, value);

+}

+

+ErrorCode insert_data_into_tsfile_row_double(TsFileRowData data,

+                                             char* columname, double value) {

+    TsRecord* record = (TsRecord*)data;

+    INSERT_DATA_INTO_RECORD(record, columname, value);

+}

+

+ErrorCode tsfile_write_row_data(CTsFileWriter writer, TsFileRowData data) {

+    TsFileWriter* w = (TsFileWriter*)writer;

+    TsRecord* record = (TsRecord*)data;

+    int ret = w->write_record(*record);

+    if (ret == E_OK) {

+        delete record;

+    }

+    return ret;

+}

+

+ErrorCode destory_tsfile_row(TsFileRowData data) {

+    TsRecord* record = (TsRecord*)data;

+    if (record != nullptr) {

+        delete record;

+        record = nullptr;

+    }

+    return E_OK;

+}

+

+ErrorCode tsfile_flush_data(CTsFileWriter writer) {

+    TsFileWriter* w = (TsFileWriter*)writer;

+    int ret = w->flush();

+    return ret;

+}

+

+Expression create_column_filter(const char* column_name, OperatorType oper,

+                                int32_t int32_value) {

+    Expression exp;

+    CONSTRUCT_EXP_INTERNAL(exp, column_name);

+    exp.const_condition.value_condition = int32_value;

+    exp.const_condition.type = TS_TYPE_INT32;

+    return exp;

+}

+

+Expression create_column_filter(const char* column_name, OperatorType oper,

+                                int64_t int64_value) {

+    Expression exp;

+    CONSTRUCT_EXP_INTERNAL(exp, column_name);

+    exp.const_condition.value_condition = int64_value;

+    exp.const_condition.type = TS_TYPE_INT64;

+    return exp;

+}

+Expression create_column_filter(const char* column_name, OperatorType oper,

+                                bool bool_value) {

+    Expression exp;

+    CONSTRUCT_EXP_INTERNAL(exp, column_name);

+    exp.const_condition.value_condition = bool_value ? 1 : 0;

+    exp.const_condition.type = TS_TYPE_BOOLEAN;

+    return exp;

+}

+Expression create_column_filter(const char* column_name, OperatorType oper,

+                                float float_value) {

+    Expression exp;

+    CONSTRUCT_EXP_INTERNAL(exp, column_name);

+    memcpy(&exp.const_condition.value_condition, &float_value, sizeof(float));

+    exp.const_condition.type = TS_TYPE_FLOAT;

+    return exp;

+}

+Expression create_column_filter(const char* column_name, OperatorType oper,

+                                double double_value) {

+    Expression exp;

+    CONSTRUCT_EXP_INTERNAL(exp, column_name);

+    exp.const_condition.value_condition = double_value;

+    exp.const_condition.type = TS_TYPE_DOUBLE;

+    return exp;

+}

+Expression create_column_filter(const char* column_name, OperatorType oper,

+                                const char* char_value) {

+    Expression exp;

+    CONSTRUCT_EXP_INTERNAL(exp, column_name);

+    exp.const_condition.value_condition = reinterpret_cast<int64_t>(char_value);

+    exp.const_condition.type = TS_TYPE_TEXT;

+    return exp;

+}

+

+TimeFilterExpression* create_andquery_timefilter() {

+    storage::Expression* exp = new storage::Expression(storage::AND_EXPR);

+    return (TimeFilterExpression*)exp;

+}

+

+TimeFilterExpression* create_time_filter(const char* table_name,

+                                         const char* column_name,

+                                         OperatorType oper, int64_t timestamp) {

+    std::string table_name_str(table_name);

+    std::string column_name_str(column_name);

+    storage::Path path(table_name_str, column_name_str);

+    storage::Filter* filter;

+    switch (oper) {

+        case GT:

+            filter = storage::TimeFilter::gt(timestamp);

+            break;

+        case LT:

+            filter = storage::TimeFilter::lt(timestamp);

+            break;

+        case EQ:

+            filter = storage::TimeFilter::eq(timestamp);

+            break;

+        case NOTEQ:

+            filter = storage::TimeFilter::not_eqt(timestamp);

+            break;

+        case GE:

+            filter = storage::TimeFilter::gt_eq(timestamp);

+            break;

+        case LE:

+            filter = storage::TimeFilter::lt_eq(timestamp);

+            break;

+        default:

+            filter = nullptr;

+            break;

+    }

+    storage::Expression* exp =

+        new storage::Expression(storage::SERIES_EXPR, path, filter);

+    return (TimeFilterExpression*)exp;

+}

+

+TimeFilterExpression* add_time_filter_to_and_query(

+    TimeFilterExpression* exp_and, TimeFilterExpression* exp) {

+    storage::Expression* and_exp = (storage::Expression*)exp_and;

+    storage::Expression* time_exp = (storage::Expression*)exp;

+    if (and_exp->left_ == nullptr) {

+        and_exp->left_ = time_exp;

+    } else if (and_exp->right_ == nullptr) {

+        and_exp->right_ = time_exp;

+    } else {

+        storage::Expression* new_exp =

+            new storage::Expression(storage::AND_EXPR);

+        new_exp->left_ = and_exp->right_;

+        and_exp->right_ = new_exp;

+        add_time_filter_to_and_query((TimeFilterExpression*)new_exp, exp);

+    }

+    return exp_and;

+}

+

+void destory_time_filter_query(TimeFilterExpression* expression) {

+    if (expression == nullptr) {

+        return;

+    }

+

+    destory_time_filter_query(

+        (TimeFilterExpression*)((storage::Expression*)expression)->left_);

+    destory_time_filter_query(

+        (TimeFilterExpression*)((storage::Expression*)expression)->right_);

+    storage::Expression* exp = (storage::Expression*)expression;

+    if (exp->type_ == storage::ExpressionType::SERIES_EXPR) {

+        delete exp->filter_;

+    } else {

+        delete exp;

+    }

+}

+

+Expression create_global_time_expression(OperatorType oper, int64_t timestamp) {

+    Expression exp;

+    exp.operatype = oper;

+    exp.expression_type = GLOBALTIME;

+    exp.const_condition.value_condition = timestamp;

+    exp.const_condition.type = TS_TYPE_INT64;

+    return exp;

+}

+

+Expression* and_filter_to_and_query(Expression* exp_and, Expression* exp) {

+    if (exp_and->children_length >= MAX_COLUMN_FILTER_NUM - 1) {

+        return nullptr;

+    }

+    exp_and->children[exp_and->children_length++] = exp;

+    return exp_and;

+}

+

+QueryDataRet ts_reader_query(CTsFileReader reader, const char* table_name,

+                             const char** columns_name, int column_num,

+                             TimeFilterExpression* expression) {

+    TsFileReader* r = (TsFileReader*)reader;

+    std::string table_name_str(table_name);

+    std::vector<storage::Path> selected_paths;

+    for (int i = 0; i < column_num; i++) {

+        std::string column_name(columns_name[i]);

+        selected_paths.push_back(storage::Path(table_name_str, column_name));

+    }

+

+    storage::QueryDataSet* qds = nullptr;

+    storage::QueryExpression* query_expression =

+        storage::QueryExpression::create(selected_paths,

+                                         (storage::Expression*)expression);

+    r->query(query_expression, qds);

+    QueryDataRet ret = (QueryDataRet)malloc(sizeof(struct query_data_ret));

+    ret->data = qds;

+    ret->column_names = (char**)malloc(column_num * sizeof(char*));

+    ret->column_num = column_num;

+    for (int i = 0; i < column_num; i++) {

+        ret->column_names[i] = strdup(columns_name[i]);

+    }

+    storage::QueryExpression::destory(query_expression);

+    return ret;

+}

+

+QueryDataRet ts_reader_begin_end(CTsFileReader reader, const char* table_name,

+                                 char** columns_name, int column_num,

+                                 timestamp begin, timestamp end) {

+    TsFileReader* r = (TsFileReader*)reader;

+    std::string table_name_str(table_name);

+    std::vector<storage::Path> selected_paths;

+    for (int i = 0; i < column_num; i++) {

+        std::string column_name(columns_name[i]);

+        selected_paths.push_back(storage::Path(table_name_str, column_name));

+    }

+

+    storage::QueryDataSet* qds = nullptr;

+    storage::Filter* filter_low = nullptr;

+    storage::Filter* filter_high = nullptr;

+    storage::Expression* exp = nullptr;

+    storage::Filter* and_filter = nullptr;

+    if (begin != -1) {

+        filter_low = storage::TimeFilter::gt_eq(begin);

+    }

+    if (end != -1) {

+        filter_high = storage::TimeFilter::lt_eq(end);

+    }

+    if (filter_low != nullptr && filter_high != nullptr) {

+        and_filter = new storage::AndFilter(filter_low, filter_high);

+        exp = new storage::Expression(storage::GLOBALTIME_EXPR, and_filter);

+    } else if (filter_low != nullptr && filter_high == nullptr) {

+        exp = new storage::Expression(storage::GLOBALTIME_EXPR, filter_low);

+    } else if (filter_high != nullptr && filter_low == nullptr) {

+        exp = new storage::Expression(storage::GLOBALTIME_EXPR, filter_high);

+    }

+    storage::QueryExpression* query_expr =

+        storage::QueryExpression::create(selected_paths, exp);

+    r->query(query_expr, qds);

+    QueryDataRet ret = (QueryDataRet)malloc(sizeof(struct query_data_ret));

+    ret->data = qds;

+    ret->column_num = column_num;

+    ret->column_names = (char**)malloc(column_num * sizeof(char*));

+    for (int i = 0; i < column_num; i++) {

+        ret->column_names[i] = strdup(columns_name[i]);

+    }

+    storage::QueryExpression::destory(query_expr);

+    return ret;

+}

+

+QueryDataRet ts_reader_read(CTsFileReader reader, const char* table_name,

+                            char** columns_name, int column_num) {

+    TsFileReader* r = (TsFileReader*)reader;

+    std::string table_name_str(table_name);

+    std::vector<storage::Path> selected_paths;

+    for (int i = 0; i < column_num; i++) {

+        std::string column_name(columns_name[i]);

+        selected_paths.push_back(storage::Path(table_name_str, column_name));

+    }

+    storage::QueryDataSet* qds = nullptr;

+    storage::QueryExpression* query_expr =

+        storage::QueryExpression::create(selected_paths, nullptr);

+    r->query(query_expr, qds);

+    QueryDataRet ret = (QueryDataRet)malloc(sizeof(struct query_data_ret));

+    ret->data = qds;

+    ret->column_names = (char**)malloc(column_num * sizeof(char*));

+    ret->column_num = column_num;

+    for (int i = 0; i < column_num; i++) {

+        ret->column_names[i] = strdup(columns_name[i]);

+    }

+    storage::QueryExpression::destory(query_expr);

+    return ret;

+}

+

+ErrorCode destory_query_dataret(QueryDataRet data) {

+    storage::QueryDataSet* qds = (storage::QueryDataSet*)data->data;

+    delete qds;

+    for (int i = 0; i < data->column_num; i++) {

+        free(data->column_names[i]);

+    }

+    free(data->column_names);

+    free(data);

+    return E_OK;

+}

+

+DataResult* ts_next(QueryDataRet data, int expect_line_count) {

+    storage::QueryDataSet* qds = (storage::QueryDataSet*)data->data;

+    DataResult* result = create_tablet("result", expect_line_count);

+    storage::RowRecord* record;

+    bool init_tablet = false;

+    for (int i = 0; i < expect_line_count; i++) {

+        record = qds->get_next();

+        if (record == nullptr) {

+            break;

+            std::cout << "record null now"

+                      << "i = " << i << std::endl;

+        }

+        int column_num = record->get_fields()->size();

+        if (!init_tablet) {

+            for (int col = 0; col < column_num; col++) {

+                storage::Field* field = record->get_field(col);

+                result = add_column_to_tablet(result, data->column_names[col],

+                                              get_schema_info(field->type_));

+            }

+            init_tablet = true;

+        }

+        for (int col = 0; col < column_num; col++) {

+            storage::Field* field = record->get_field(col);

+            switch (field->type_) {

+                // all data will stored as 8 bytes

+                case TSDataType::BOOLEAN:

+                    result = add_data_to_tablet_bool(

+                        result, i, record->get_timestamp(),

+                        data->column_names[col], field->value_.bval_);

+                    break;

+                case TSDataType::INT32:

+                    result = add_data_to_tablet_i32(

+                        result, i, record->get_timestamp(),

+                        data->column_names[col], field->value_.ival_);

+                    break;

+                case TSDataType::INT64:

+                    result = add_data_to_tablet_i64(

+                        result, i, record->get_timestamp(),

+                        data->column_names[col], field->value_.lval_);

+                    break;

+                case TSDataType::FLOAT:

+                    result = add_data_to_tablet_float(

+                        result, i, record->get_timestamp(),

+                        data->column_names[col], field->value_.fval_);

+                    break;

+                case TSDataType::DOUBLE:

+                    result = add_data_to_tablet_double(

+                        result, i, record->get_timestamp(),

+                        data->column_names[col], field->value_.dval_);

+                    break;

+                case TSDataType::TEXT:

+                    result = add_data_to_tablet_char(

+                        result, i, record->get_timestamp(),

+                        data->column_names[col], field->value_.sval_);

+                    break;

+                case TSDataType::NULL_TYPE:

+                    // result = add_data_to_tablet(result, i ,

+                    // record->get_timestamp(),

+                    //                             data->column_names[col], 0);

+                    // skip null data

+                    break;

+                default:

+                    std::cout << field->type_ << std::endl;

+                    std::cout << "error here" << std::endl;

+                    return nullptr;

+            }

+        }

+    }

+    return result;

+}

+

+void print_data_result(DataResult* result) {

+    std::cout << std::left << std::setw(15) << "timestamp";

+    for (int i = 0; i < result->column_num; i++) {

+        std::cout << std::left << std::setw(15)

+                  << result->column_schema[i]->name;

+    }

+    std::cout << std::endl;

+    for (int i = 0; i < result->cur_num; i++) {

+        std::cout << std::left << std::setw(15);

+        std::cout << result->times[i];

+        for (int j = 0; j < result->column_num; j++) {

+            ColumnSchema* schema = result->column_schema[j];

+            double dval;

+            float fval;

+            std::cout << std::left << std::setw(15);

+            switch (get_datatype(schema->column_def)) {

+                case TSDataType::BOOLEAN:

+                    std::cout

+                        << ((*((int64_t*)result->value[j] + i)) > 0 ? "true"

+                                                                    : "false");

+                    break;

+                case TSDataType::INT32:

+                    std::cout << *((int64_t*)result->value[j] + i);

+                    break;

+                case TSDataType::INT64:

+                    std::cout << *((int64_t*)result->value[j] + i);

+                    break;

+                case TSDataType::FLOAT:

+                    memcpy(&fval, (int64_t*)result->value[j] + i,

+                           sizeof(float));

+                    std::cout << fval;

+                    break;

+                case TSDataType::DOUBLE:

+                    memcpy(&dval, (int64_t*)result->value[j] + i,

+                           sizeof(double));

+                    std::cout << dval;

+                    break;

+                default:

+                    std::cout << "";

+            }

+        }

+        std::cout << std::endl;

+    }

+}

+

+// }

+

+// storage::Expression construct_query(Expression* exp) {

+//   int column_num = exp->children_length;

+//   std::vector<storage::Path> paths;

+//   for (int i = 0; i < column_num; i++) {

+//     Expression* exp = exp->children[i];

+//     if (exp->expression_type != )

+//     if (exp->column_name != nullptr ) {

+//       std::string column_name = exp->column_name;

+

+//     } else if (column->expression_type == AND) {

+//       storage::Expression and_exp = construct_query(table_name,

+//       column);

+//       // add and_exp to the query

+//     }

+//     column++;

+//   }

+//   // construct the query using paths and other information

+//   // return the constructed query

+// }

+

+// storage::Filter get_filter(int operate_type, Constant condition) {

+//   switch(operate_type) {

+//     case GT:

+//       return storage::TimeFilter::gt();

+

+//   }

+

+// }

+

+// storage::Expression construct_query(const char* table_name,

+// Expression exp) {

+//   std::string table = table_name;

+//   int column_num = exp.children_length;

+//   std::vector<storage::Path> paths;

+//   paths.reserve(column_num);

+//   Expression* column = exp.children;

+//   for (int i = 0; i < column_num;i++) {

+//     if (column_num == 1) {

+//       std::string column_name = column->column_name;

+//       // select_list

+//       paths.push_back(storage::Path(table, column_name));

+//       int operate = column->operatype;

+//       Filter filter = get_filter(operate, column->const_condition);

+//     }

+//   }

+// }

diff --git a/cpp/src/cwrapper/TsFile-cwrapper.h b/cpp/src/cwrapper/TsFile-cwrapper.h
new file mode 100644
index 0000000..97ffedd
--- /dev/null
+++ b/cpp/src/cwrapper/TsFile-cwrapper.h
@@ -0,0 +1,235 @@
+/*

+ * Licensed to the Apache Software Foundation (ASF) under one

+ * or more contributor license agreements.  See the NOTICE file

+ * distributed with this work for additional information

+ * regarding copyright ownership.  The ASF licenses this file

+ * to you under the Apache License, Version 2.0 (the

+ * License); you may not use this file except in compliance

+ * with the License.  You may obtain a copy of the License at

+ *

+ *     http://www.apache.org/licenses/LICENSE-2.0

+ *

+ * Unless required by applicable law or agreed to in writing,

+ * software distributed under the License is distributed on an

+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY

+ * KIND, either express or implied.  See the License for the

+ * specific language governing permissions and limitations

+ * under the License.

+ */

+

+#ifndef CWRAPPER_TSFILE_CWRAPPER_H

+#define CWRAPPER_TSFILE_CWRAPPER_H

+

+#include <fcntl.h>

+#include <stdbool.h>

+#include <stddef.h>

+#include <stdint.h>

+#ifdef _WIN32

+#include <sys/stat.h>

+#endif

+

+typedef long long SchemaInfo;

+typedef long long timestamp;

+typedef void* CTsFileReader;

+typedef void* CTsFileWriter;

+typedef void* TsFileRowData;

+typedef int ErrorCode;

+typedef void* TimeFilterExpression;

+

+// DATA TYPE

+#define TS_TYPE_INT32 1 << 8

+#define TS_TYPE_BOOLEAN 1 << 9

+#define TS_TYPE_FLOAT 1 << 10

+#define TS_TYPE_DOUBLE 1 << 11

+#define TS_TYPE_INT64 1 << 12

+#define TS_TYPE_TEXT 1 << 13

+

+// ENCODING TYPE

+#define TS_ENCODING_PLAIN 1 << 16

+#define TS_ENCODING_TS_DIFF 1 << 17

+#define TS_ENCODING_DICTIONARY 1 << 18

+#define TS_ENCODING_RLE 1 << 19

+#define TS_ENCODING_BITMAP 1 << 20

+#define TS_ENCODING_GORILLA_V1 1 << 21

+#define TS_ENCODING_REGULAR 1 << 22

+#define TS_ENCODING_GORILLA 1 << 23

+#define TS_ENCODING_ZIGZAG 1 << 24

+#define TS_ENCODING_FREQ 1 << 25

+

+// COMPRESS TYPE

+#define TS_COMPRESS_UNCOMPRESS 1LL << 32

+#define TS_COMPRESS_SNAPPY 1LL << 33

+#define TS_COMPRESS_GZIP 1LL << 34

+#define TS_COMPRESS_LZO 1LL << 35

+#define TS_COMPRESS_SDT 1LL << 36

+#define TS_COMPRESS_PAA 1LL << 37

+#define TS_COMPRESS_PLA 1LL << 38

+#define TS_COMPRESS_LZ4 1LL << 39

+

+#define MAX_COLUMN_FILTER_NUM 10

+

+typedef struct column_schema {

+    char* name;

+    SchemaInfo column_def;

+} ColumnSchema;

+

+typedef struct table_shcema {

+    char* table_name;

+    ColumnSchema** column_schema;

+    int column_num;

+} TableSchema;

+

+typedef enum operator_type {

+    LT,

+    LE,

+    EQ,

+    GT,

+    GE,

+    NOTEQ,

+} OperatorType;

+

+typedef enum expression_type {

+    OR,

+    AND,

+    GLOBALTIME,

+} ExpressionType;

+

+typedef struct constant {

+    int64_t value_condition;

+    int type;

+} Constant;

+

+typedef struct expression {

+    const char* column_name;

+    Constant const_condition;

+    ExpressionType expression_type;

+    OperatorType operatype;

+    struct expression* children[MAX_COLUMN_FILTER_NUM];

+    int children_length;

+} Expression;

+

+typedef struct tablet {

+    char* table_name;

+    ColumnSchema** column_schema;

+    int column_num;

+    timestamp* times;

+    bool** bitmap;

+    void** value;

+    int cur_num;

+    int max_capacity;

+} Tablet;

+

+typedef struct tsfile_conf {

+    int mem_threshold_kb;

+} TsFileConf;

+

+typedef Tablet DataResult;

+

+typedef void* QueryDataRetINTERNAL;

+typedef struct query_data_ret {

+    char** column_names;

+    int column_num;

+    QueryDataRetINTERNAL data;

+} * QueryDataRet;

+

+#ifdef __cplusplus

+extern "C" {

+#endif

+

+CTsFileReader ts_reader_open(const char* pathname, ErrorCode* err_code);

+CTsFileWriter ts_writer_open(const char* pathname, ErrorCode* err_code);

+CTsFileWriter ts_writer_open_flag(const char* pathname, mode_t flag,

+                                  ErrorCode* err_code);

+CTsFileWriter ts_writer_open_conf(const char* pathname, mode_t flag,

+                                  ErrorCode* err_code, TsFileConf* conf);

+

+ErrorCode ts_writer_close(CTsFileWriter writer);

+ErrorCode ts_reader_close(CTsFileReader reader);

+

+ErrorCode tsfile_register_table_column(CTsFileWriter writer,

+                                       const char* table_name,

+                                       ColumnSchema* schema);

+ErrorCode tsfile_register_table(CTsFileWriter writer,

+                                TableSchema* table_shcema);

+

+TsFileRowData create_tsfile_row(const char* tablename, int64_t timestamp,

+                                int column_length);

+

+ErrorCode insert_data_into_tsfile_row_int32(TsFileRowData data, char* columname,

+                                            int32_t value);

+ErrorCode insert_data_into_tsfile_row_boolean(TsFileRowData data,

+                                              char* columname, bool value);

+ErrorCode insert_data_into_tsfile_row_int64(TsFileRowData data, char* columname,

+                                            int64_t value);

+ErrorCode insert_data_into_tsfile_row_float(TsFileRowData data, char* columname,

+                                            float value);

+ErrorCode insert_data_into_tsfile_row_double(TsFileRowData data,

+                                             char* columname, double value);

+

+ErrorCode tsfile_write_row_data(CTsFileWriter writer, TsFileRowData data);

+ErrorCode destory_tsfile_row(TsFileRowData data);

+

+Tablet* create_tablet(const char* table_name, int max_capacity);

+Tablet* add_column_to_tablet(Tablet* tablet, char* column_name,

+                             SchemaInfo column_def);

+Tablet add_data_to_tablet(Tablet tablet, int line_id, int64_t timestamp,

+                          const char* column_name, int64_t value);

+

+ErrorCode destory_tablet(Tablet* tablet);

+

+ErrorCode tsfile_flush_data(CTsFileWriter writer);

+

+Expression create_column_filter_I32(const char* column_name, OperatorType oper,

+                                    int32_t int32_value);

+Expression create_column_filter_I64(const char* column_name, OperatorType oper,

+                                    int64_t int64_value);

+Expression create_column_filter_bval(const char* column_name, OperatorType oper,

+                                     bool bool_value);

+Expression create_column_filter_fval(const char* column_name, OperatorType oper,

+                                     float float_value);

+Expression create_column_filter_dval(const char* column_name, OperatorType oper,

+                                     double double_value);

+Expression create_column_filter_cval(const char* column_name, OperatorType oper,

+                                     const char* char_value);

+

+TimeFilterExpression* create_andquery_timefilter();

+

+TimeFilterExpression* create_time_filter(const char* table_name,

+                                         const char* column_name,

+                                         OperatorType oper, int64_t timestamp);

+

+TimeFilterExpression* add_time_filter_to_and_query(

+    TimeFilterExpression* exp_and, TimeFilterExpression* exp);

+

+void destory_time_filter_query(TimeFilterExpression* expression);

+

+Expression* create_time_expression(const char* column_name, OperatorType oper,

+                                   int64_t timestamp);

+

+Expression* add_and_filter_to_and_query(Expression* exp_and, Expression* exp);

+

+QueryDataRet ts_reader_query(CTsFileReader reader, const char* table_name,

+                             const char** columns, int colum_num,

+                             TimeFilterExpression* expression);

+

+QueryDataRet ts_reader_begin_end(CTsFileReader reader, const char* table_name,

+                                 char** columns, int colum_num, timestamp begin,

+                                 timestamp end);

+

+QueryDataRet ts_reader_read(CTsFileReader reader, const char* table_name,

+                            char** columns, int colum_num);

+

+ErrorCode destory_query_dataret(QueryDataRet query_data_set);

+

+DataResult* ts_next(QueryDataRet data, int expect_line_count);

+

+void print_data_result(DataResult* result);

+

+void clean_data_record(DataResult data_result);

+void clean_query_ret(QueryDataRet query_data_set);

+void clean_query_tree(Expression* expression);

+

+#ifdef __cplusplus

+}

+#endif

+#endif  // CWRAPPER_TSFILE_CWRAPPER_H

diff --git a/cpp/src/cwrapper/errno_define_c.h b/cpp/src/cwrapper/errno_define_c.h
index b767ca8..3ceb064 100644
--- a/cpp/src/cwrapper/errno_define_c.h
+++ b/cpp/src/cwrapper/errno_define_c.h
@@ -1,5 +1,5 @@
 /*
-* Licensed to the Apache Software Foundation (ASF) under one
+ * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
  * regarding copyright ownership.  The ASF licenses this file
@@ -73,4 +73,4 @@
 #define RET_UNSUPPORTED_ORDER 51
 #define RET_INVALID_NODRET_TYPE 52
 
-#endif  /* CWRAPPER_ERRNO_DEFINRET_H */
\ No newline at end of file
+#endif /* CWRAPPER_ERRNO_DEFINRET_H */
\ No newline at end of file
diff --git a/cpp/src/cwrapper/tsfile_cwrapper.cc b/cpp/src/cwrapper/tsfile_cwrapper.cc
index 0095dd1..e6e15dd 100644
--- a/cpp/src/cwrapper/tsfile_cwrapper.cc
+++ b/cpp/src/cwrapper/tsfile_cwrapper.cc
@@ -42,6 +42,36 @@
     }
 }
 
+uint8_t get_global_time_encoding() {
+    return common::get_global_time_encoding();
+}
+
+uint8_t get_global_time_compression() {
+    return common::get_global_time_compression();
+}
+
+uint8_t get_datatype_encoding(uint8_t data_type) {
+    return common::get_datatype_encoding(data_type);
+}
+
+uint8_t get_global_compression() { return common::get_global_compression(); }
+
+int set_global_time_encoding(uint8_t encoding) {
+    return common::set_global_time_encoding(encoding);
+}
+
+int set_global_time_compression(uint8_t compression) {
+    return common::set_global_time_compression(compression);
+}
+
+int set_datatype_encoding(uint8_t data_type, uint8_t encoding) {
+    return common::set_datatype_encoding(data_type, encoding);
+}
+
+int set_global_compression(uint8_t compression) {
+    return common::set_global_compression(compression);
+}
+
 WriteFile write_file_new(const char *pathname, ERRNO *err_code) {
     int ret;
     init_tsfile_config();
@@ -328,8 +358,7 @@
                                                  const char *column_name) {
     auto *r = static_cast<storage::TableResultSet *>(result_set);
     std::string column_name_(column_name);
-    common::String *ret =
-        r->get_value<common::String *>(column_name_);
+    common::String *ret = r->get_value<common::String *>(column_name_);
     // Caller should free return's char* 's space.
     char *dup = (char *)malloc(ret->len_ + 1);
     if (dup) {
diff --git a/cpp/src/cwrapper/tsfile_cwrapper.h b/cpp/src/cwrapper/tsfile_cwrapper.h
index 3072753..75dc036 100644
--- a/cpp/src/cwrapper/tsfile_cwrapper.h
+++ b/cpp/src/cwrapper/tsfile_cwrapper.h
@@ -52,6 +52,7 @@
     TS_ENCODING_GORILLA = 8,
     TS_ENCODING_ZIGZAG = 9,
     TS_ENCODING_FREQ = 10,
+    TS_ENCODING_SPRINTZ = 12,
     TS_ENCODING_INVALID = 255
 } TSEncoding;
 
@@ -118,6 +119,79 @@
 typedef int32_t ERRNO;
 typedef int64_t Timestamp;
 
+/**
+ * @brief Get the encoding type for global time column
+ *
+ * @return uint8_t Time encoding type enum value (cast to uint8_t)
+ */
+uint8_t get_global_time_encoding();
+
+/**
+ * @brief Get the compression type for global time column
+ *
+ * @return uint8_t Time compression type enum value (cast to uint8_t)
+ */
+uint8_t get_global_time_compression();
+
+/**
+ * @brief Get the encoding type for specified data type
+ *
+ * @param data_type The data type to query encoding for
+ * @return uint8_t Encoding type enum value (cast to uint8_t)
+ */
+uint8_t get_datatype_encoding(uint8_t data_type);
+
+/**
+ * @brief Get the global default compression type
+ *
+ * @return uint8_t Compression type enum value (cast to uint8_t)
+ */
+uint8_t get_global_compression();
+
+/**
+ * @brief Sets the global time column encoding method
+ *
+ * Validates and sets the encoding type for time series timestamps.
+ * Supported encodings: TS_2DIFF, PLAIN, GORILLA, ZIGZAG, RLE, SPRINTZ
+ *
+ * @param encoding The encoding type to set (as uint8_t)
+ * @return int E_OK on success, E_NOT_SUPPORT for invalid encoding
+ */
+int set_global_time_encoding(uint8_t encoding);
+
+/**
+ * @brief Sets the global time column compression method
+ *
+ * Validates and sets the compression type for time series timestamps.
+ * Supported compressions: UNCOMPRESSED, SNAPPY, GZIP, LZO, LZ4
+ *
+ * @param compression The compression type to set (as uint8_t)
+ * @return int E_OK on success, E_NOT_SUPPORT for invalid compression
+ */
+int set_global_time_compression(uint8_t compression);
+
+/**
+ * @brief Set encoding type for specific data type
+ * @param data_type The data type to configure
+ * @param encoding The encoding type to set
+ * @return E_OK if success, E_NOT_SUPPORT if encoding is not supported for the
+ * data type
+ * @note Supported encodings per data type:
+ *        - BOOLEAN: PLAIN only
+ *        - INT32/INT64: PLAIN, TS_2DIFF, GORILLA, ZIGZAG, RLE, SPRINTZ
+ *        - FLOAT/DOUBLE: PLAIN, TS_2DIFF, GORILLA, SPRINTZ
+ *        - STRING: PLAIN, DICTIONARY
+ */
+int set_datatype_encoding(uint8_t data_type, uint8_t encoding);
+
+/**
+ * @brief Set the global default compression type
+ * @param compression Compression type to set
+ * @return E_OK if success, E_NOT_SUPPORT if compression is not supported
+ * @note Supported compressions: UNCOMPRESSED, SNAPPY, GZIP, LZO, LZ4
+ */
+int set_global_compression(uint8_t compression);
+
 /*--------------------------TsFile Reader and Writer------------------------ */
 
 /**
diff --git a/cpp/src/cwrapper/tsfile_cwrapper_expression.cc b/cpp/src/cwrapper/tsfile_cwrapper_expression.cc
index d9b622a..0effd8f 100644
--- a/cpp/src/cwrapper/tsfile_cwrapper_expression.cc
+++ b/cpp/src/cwrapper/tsfile_cwrapper_expression.cc
@@ -67,8 +67,8 @@
 //                                 float float_value) {
 //     Expression exp;
 //     CONSTRUCT_EXP_INTERNAL(exp, column_name);
-//     memcpy(&exp.const_condition.value_condition, &float_value, sizeof(float));
-//     exp.const_condition.type = TSDataType::TS_DATATYPE_FLOAT;
+//     memcpy(&exp.const_condition.value_condition, &float_value,
+//     sizeof(float)); exp.const_condition.type = TSDataType::TS_DATATYPE_FLOAT;
 //     return exp;
 // }
 // Expression create_column_filter(const char* column_name, OperatorType oper,
@@ -83,9 +83,9 @@
 //                                 const char* char_value) {
 //     Expression exp;
 //     CONSTRUCT_EXP_INTERNAL(exp, column_name);
-//     exp.const_condition.value_condition = reinterpret_cast<int64_t>(char_value);
-//     exp.const_condition.type = TSDataType::TS_DATATYPE_TEXT;
-//     return exp;
+//     exp.const_condition.value_condition =
+//     reinterpret_cast<int64_t>(char_value); exp.const_condition.type =
+//     TSDataType::TS_DATATYPE_TEXT; return exp;
 // }
 //
 // TimeFilterExpression* create_andquery_timefilter() {
@@ -95,7 +95,8 @@
 //
 // TimeFilterExpression* create_time_filter(const char* table_name,
 //                                          const char* column_name,
-//                                          OperatorType oper, int64_t timestamp) {
+//                                          OperatorType oper, int64_t
+//                                          timestamp) {
 //     std::string table_name_str(table_name);
 //     std::string column_name_str(column_name);
 //     storage::Path path(table_name_str, column_name_str);
@@ -163,7 +164,8 @@
 //     }
 // }
 //
-// Expression create_global_time_expression(OperatorType oper, int64_t timestamp) {
+// Expression create_global_time_expression(OperatorType oper, int64_t
+// timestamp) {
 //     Expression exp;
 //     exp.operate_type = oper;
 //     exp.expression_type = GLOBALTIME;
diff --git a/cpp/src/cwrapper/tsfile_cwrapper_expression.h b/cpp/src/cwrapper/tsfile_cwrapper_expression.h
index 4c59386..692be87 100644
--- a/cpp/src/cwrapper/tsfile_cwrapper_expression.h
+++ b/cpp/src/cwrapper/tsfile_cwrapper_expression.h
@@ -17,23 +17,23 @@
  * under the License.
  */
 //
-//#ifndef CWRAPPER_TSFILE_CWRAPPER_H
-//#define CWRAPPER_TSFILE_CWRAPPER_H
+// #ifndef CWRAPPER_TSFILE_CWRAPPER_H
+// #define CWRAPPER_TSFILE_CWRAPPER_H
 //
-//#include <fcntl.h>
-//#include <stdbool.h>
-//#include <stddef.h>
-//#include <stdint.h>
-//#ifdef _WIN32
-//#include <sys/stat.h>
-//#endif
+// #include <fcntl.h>
+// #include <stdbool.h>
+// #include <stddef.h>
+// #include <stdint.h>
+// #ifdef _WIN32
+// #include <sys/stat.h>
+// #endif
 //
-//#include "tsfile_cwrapper.h"
+// #include "tsfile_cwrapper.h"
 //
-//typedef void* TimeFilterExpression;
+// typedef void* TimeFilterExpression;
 //
-//#define MAX_COLUMN_FILTER_NUM 10
-//typedef enum operator_type {
+// #define MAX_COLUMN_FILTER_NUM 10
+// typedef enum operator_type {
 //    LT,
 //    LE,
 //    EQ,
@@ -42,18 +42,18 @@
 //    NOTEQ,
 //} OperatorType;
 //
-//typedef enum expression_type {
+// typedef enum expression_type {
 //    OR,
 //    AND,
 //    GLOBALTIME,
 //} ExpressionType;
 //
-//typedef struct constant {
+// typedef struct constant {
 //    int64_t value_condition;
 //    int type;
 //} Constant;
 //
-//typedef struct expression {
+// typedef struct expression {
 //    const char* column_name;
 //    Constant const_condition;
 //    ExpressionType expression_type;
@@ -62,40 +62,42 @@
 //    int children_length;
 //} Expression;
 //
-//typedef void* QueryDataRetINTERNAL;
-//typedef struct query_data_ret {
+// typedef void* QueryDataRetINTERNAL;
+// typedef struct query_data_ret {
 //    char** column_names;
 //    int column_num;
 //    QueryDataRetINTERNAL data;
 //}* QueryDataRet;
 //
-//#ifdef __cplusplus
-//extern "C" {
-//#endif
+// #ifdef __cplusplus
+// extern "C" {
+// #endif
 //
-//TimeFilterExpression* create_query_and_time_filter();
+// TimeFilterExpression* create_query_and_time_filter();
 //
-//TimeFilterExpression* create_time_filter(const char* table_name,
+// TimeFilterExpression* create_time_filter(const char* table_name,
 //                                         const char* column_name,
 //                                         OperatorType oper,
 //                                         timestamp timestamp);
 //
-//TimeFilterExpression* add_time_filter_to_and_query(
+// TimeFilterExpression* add_time_filter_to_and_query(
 //    TimeFilterExpression* exp_and, TimeFilterExpression* exp);
 //
-//void destroy_time_filter_query(TimeFilterExpression* expression);
+// void destroy_time_filter_query(TimeFilterExpression* expression);
 //
-//Expression* create_time_expression(const char* column_name, OperatorType oper,
+// Expression* create_time_expression(const char* column_name, OperatorType
+// oper,
 //                                   timestamp timestamp);
 //
-//Expression* add_and_filter_to_and_query(Expression* exp_and, Expression* exp);
+// Expression* add_and_filter_to_and_query(Expression* exp_and, Expression*
+// exp);
 //
-//QueryDataRet ts_reader_query(TsFileReader reader, const char* table_name,
+// QueryDataRet ts_reader_query(TsFileReader reader, const char* table_name,
 //                             const char** columns, int colum_num,
 //                             TimeFilterExpression* expression);
 //
 //
-//#ifdef __cplusplus
+// #ifdef __cplusplus
 //}
-//#endif
-//#endif  // CWRAPPER_TSFILE_CWRAPPER_H
+// #endif
+// #endif  // CWRAPPER_TSFILE_CWRAPPER_H
diff --git a/cpp/src/encoding/bitpack_decoder.h b/cpp/src/encoding/bitpack_decoder.h
index 099afc7..2468d51 100644
--- a/cpp/src/encoding/bitpack_decoder.h
+++ b/cpp/src/encoding/bitpack_decoder.h
@@ -120,9 +120,6 @@
 
     void read_bit_packing_buffer(int bit_packed_group_count,
                                  int last_bit_packed_num) {
-        if (current_buffer_ != nullptr) {
-            delete[] current_buffer_;
-        }
         current_buffer_ = new int64_t[bit_packed_group_count * 8];
         unsigned char bytes[bit_packed_group_count * bit_width_];
         int bytes_to_read = bit_packed_group_count * bit_width_;
diff --git a/cpp/src/encoding/bitpack_encoder.h b/cpp/src/encoding/bitpack_encoder.h
index d169c73..3a978e0 100644
--- a/cpp/src/encoding/bitpack_encoder.h
+++ b/cpp/src/encoding/bitpack_encoder.h
@@ -170,8 +170,7 @@
             maxSize += bytesPerGroup;
         }
 
-        // Add additional bytes, because each bitpack group has a header of 1
-        // byte and a tail of 1 byte.
+        // Add additional bytes, because each bitpack group has a header of 1 byte and a tail of 1 byte.
         maxSize += fullGroups * (1 + 1) + (remainingValues > 0 ? (1 + 1) : 0);
         return maxSize;
     }
diff --git a/cpp/src/encoding/decoder.h b/cpp/src/encoding/decoder.h
index b792ccf..4dc2478 100644
--- a/cpp/src/encoding/decoder.h
+++ b/cpp/src/encoding/decoder.h
@@ -29,7 +29,7 @@
     Decoder() {}
     virtual ~Decoder() {}
     virtual void reset() = 0;
-    virtual bool has_remaining() = 0;
+    virtual bool has_remaining(const common::ByteStream &buffer) = 0;
     virtual int read_boolean(bool &ret_value, common::ByteStream &in) = 0;
     virtual int read_int32(int32_t &ret_value, common::ByteStream &in) = 0;
     virtual int read_int64(int64_t &ret_value, common::ByteStream &in) = 0;
diff --git a/cpp/src/encoding/decoder_factory.h b/cpp/src/encoding/decoder_factory.h
index 8918c92..37819ce 100644
--- a/cpp/src/encoding/decoder_factory.h
+++ b/cpp/src/encoding/decoder_factory.h
@@ -21,17 +21,24 @@
 #define ENCODING_DECODER_FACTORY_H
 
 #include "decoder.h"
+#include "dictionary_decoder.h"
+#include "double_sprintz_decoder.h"
+#include "encoding/int32_rle_decoder.h"
+#include "encoding/int64_rle_decoder.h"
+#include "float_sprintz_decoder.h"
 #include "gorilla_decoder.h"
+#include "int32_sprintz_decoder.h"
+#include "int64_sprintz_decoder.h"
 #include "plain_decoder.h"
 #include "ts2diff_decoder.h"
+#include "zigzag_decoder.h"
 
 namespace storage {
-
 #define ALLOC_AND_RETURN_DECODER(DecoderType)                                \
     do {                                                                     \
-        void *buf =                                                          \
+        void* buf =                                                          \
             common::mem_alloc(sizeof(DecoderType), common::MOD_DECODER_OBJ); \
-        DecoderType *decoder = nullptr;                                      \
+        DecoderType* decoder = nullptr;                                      \
         if (buf != nullptr) {                                                \
             decoder = new (buf) DecoderType;                                 \
         }                                                                    \
@@ -40,7 +47,7 @@
 
 class DecoderFactory {
    public:
-    static Decoder *alloc_time_decoder() {
+    static Decoder* alloc_time_decoder() {
         if (common::g_config_value_.time_encoding_type_ == common::PLAIN) {
             ALLOC_AND_RETURN_DECODER(PlainDecoder);
         } else if (common::g_config_value_.time_encoding_type_ ==
@@ -48,50 +55,103 @@
             ALLOC_AND_RETURN_DECODER(LongTS2DIFFDecoder);
         } else {
             // not support now
-            ASSERT(false);
             return nullptr;
         }
     }
 
-    static Decoder *alloc_value_decoder(common::TSEncoding encoding,
+    static Decoder* alloc_value_decoder(common::TSEncoding encoding,
                                         common::TSDataType data_type) {
-        if (encoding == common::PLAIN) {
-            ALLOC_AND_RETURN_DECODER(PlainDecoder);
-        } else if (encoding == common::GORILLA) {
-            if (data_type == common::INT32) {
-                ALLOC_AND_RETURN_DECODER(IntGorillaDecoder);
-            } else if (data_type == common::INT64) {
-                ALLOC_AND_RETURN_DECODER(LongGorillaDecoder);
-            } else if (data_type == common::FLOAT) {
-                ALLOC_AND_RETURN_DECODER(FloatGorillaDecoder);
-            } else if (data_type == common::DOUBLE) {
-                ALLOC_AND_RETURN_DECODER(DoubleGorillaDecoder);
-            } else {
-                ASSERT(false);
+        using namespace common;
+
+        switch (encoding) {
+            case PLAIN:
+                ALLOC_AND_RETURN_DECODER(PlainDecoder);
+
+            case DICTIONARY:
+                switch (data_type) {
+                    case STRING:
+                    case TEXT:
+                        ALLOC_AND_RETURN_DECODER(DictionaryDecoder);
+                    default:
+                        return nullptr;
+                }
+
+            case RLE:
+                switch (data_type) {
+                    case INT32:
+                    case DATE:
+                        ALLOC_AND_RETURN_DECODER(Int32RleDecoder);
+                    case INT64:
+                    case TIMESTAMP:
+                        ALLOC_AND_RETURN_DECODER(Int64RleDecoder);
+                    default:
+                        return nullptr;
+                }
+
+            case GORILLA:
+                switch (data_type) {
+                    case INT32:
+                    case DATE:
+                        ALLOC_AND_RETURN_DECODER(IntGorillaDecoder);
+                    case INT64:
+                    case TIMESTAMP:
+                        ALLOC_AND_RETURN_DECODER(LongGorillaDecoder);
+                    case FLOAT:
+                        ALLOC_AND_RETURN_DECODER(FloatGorillaDecoder);
+                    case DOUBLE:
+                        ALLOC_AND_RETURN_DECODER(DoubleGorillaDecoder);
+                    default:
+                        return nullptr;
+                }
+
+            case TS_2DIFF:
+                switch (data_type) {
+                    case INT32:
+                    case DATE:
+                        ALLOC_AND_RETURN_DECODER(IntTS2DIFFDecoder);
+                    case INT64:
+                    case TIMESTAMP:
+                        ALLOC_AND_RETURN_DECODER(LongTS2DIFFDecoder);
+                    case FLOAT:
+                        ALLOC_AND_RETURN_DECODER(FloatTS2DIFFDecoder);
+                    case DOUBLE:
+                        ALLOC_AND_RETURN_DECODER(DoubleTS2DIFFDecoder);
+                    default:
+                        return nullptr;
+                }
+
+            case ZIGZAG:
+                switch (data_type) {
+                    case INT32:
+                        ALLOC_AND_RETURN_DECODER(IntZigzagDecoder);
+                    case INT64:
+                        ALLOC_AND_RETURN_DECODER(LongZigzagDecoder);
+                    default:
+                        return nullptr;
+                }
+
+            case SPRINTZ:
+                switch (data_type) {
+                    case INT32:
+                        ALLOC_AND_RETURN_DECODER(Int32SprintzDecoder);
+                    case INT64:
+                        ALLOC_AND_RETURN_DECODER(Int64SprintzDecoder);
+                    case FLOAT:
+                        ALLOC_AND_RETURN_DECODER(FloatSprintzDecoder);
+                    case DOUBLE:
+                        ALLOC_AND_RETURN_DECODER(DoubleSprintzDecoder);
+                    default:
+                        return nullptr;
+                }
+
+            default:
+                // Not supported encoding
                 return nullptr;
-            }
-        } else if (encoding == common::TS_2DIFF) {
-            if (data_type == common::INT32) {
-                ALLOC_AND_RETURN_DECODER(IntTS2DIFFDecoder);
-            } else if (data_type == common::INT64) {
-                ALLOC_AND_RETURN_DECODER(LongTS2DIFFDecoder);
-            } else if (data_type == common::FLOAT) {
-                ALLOC_AND_RETURN_DECODER(FloatTS2DIFFDecoder);
-            } else if (data_type == common::DOUBLE) {
-                ALLOC_AND_RETURN_DECODER(DoubleTS2DIFFDecoder);
-            } else {
-                ASSERT(false);
-            }
-        } else {
-            // not support now
-            ASSERT(false);
-            return nullptr;
         }
         return nullptr;
     }
 
-    static void free(Decoder *decoder) { common::mem_free(decoder); }
+    static void free(Decoder* decoder) { common::mem_free(decoder); }
 };
-
 }  // end namespace storage
 #endif  // ENCODING_DECODER_FACTORY_H
diff --git a/cpp/src/encoding/dictionary_decoder.h b/cpp/src/encoding/dictionary_decoder.h
index 4ac846d..46214c3 100644
--- a/cpp/src/encoding/dictionary_decoder.h
+++ b/cpp/src/encoding/dictionary_decoder.h
@@ -24,17 +24,45 @@
 #include <vector>
 
 #include "common/allocator/byte_stream.h"
+#include "decoder.h"
 #include "encoder.h"
-#include "encoding/bitpack_decoder.h"
+#include "encoding/int32_rle_decoder.h"
 
 namespace storage {
 
-class DictionaryDecoder {
+class DictionaryDecoder : public Decoder {
    private:
-    BitPackDecoder value_decoder_;
+    Int32RleDecoder value_decoder_;
     std::vector<std::string> entry_index_;
 
    public:
+    ~DictionaryDecoder() override = default;
+    bool has_remaining(const common::ByteStream &buffer) {
+        return (!entry_index_.empty() && value_decoder_.has_next_package()) ||
+               buffer.has_remaining();
+    }
+    int read_boolean(bool &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_int32(int32_t &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_int64(int64_t &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_float(float &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_double(double &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_String(common::String &ret_value, common::PageArena &pa,
+                    common::ByteStream &in) {
+        int ret = common::E_OK;
+        auto std_str = read_string(in);
+        return ret_value.dup_from(std_str, pa);
+    }
+
     void init() { value_decoder_.init(); }
 
     void reset() {
diff --git a/cpp/src/encoding/dictionary_encoder.h b/cpp/src/encoding/dictionary_encoder.h
index f40df54..16b7345 100644
--- a/cpp/src/encoding/dictionary_encoder.h
+++ b/cpp/src/encoding/dictionary_encoder.h
@@ -26,43 +26,70 @@
 
 #include "common/allocator/byte_stream.h"
 #include "encoder.h"
-#include "encoding/bitpack_encoder.h"
+#include "encoding/int32_rle_encoder.h"
 
 namespace storage {
 
-class DictionaryEncoder {
+class DictionaryEncoder : public Encoder {
    private:
     std::map<std::string, int> entry_index_;
     std::vector<std::string> index_entry_;
-    BitPackEncoder values_encoder_;
+    Int32RleEncoder values_encoder_;
     int map_size_;
 
    public:
     DictionaryEncoder() {}
-    ~DictionaryEncoder() {}
+    ~DictionaryEncoder() override {}
+
+    int encode(bool value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(int32_t value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(int64_t value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(float value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(double value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(common::String value, common::ByteStream &out_stream) override {
+        encode(value.to_std_string(), out_stream);
+        return common::E_OK;
+    }
 
     void init() {
         map_size_ = 0;
         values_encoder_.init();
     }
 
-    void reset() {
+    void destroy() override {}
+
+    void reset() override {
         entry_index_.clear();
         index_entry_.clear();
         map_size_ = 0;
         values_encoder_.reset();
     }
 
-    void encode(std::string value, common::ByteStream &out) {
+    int encode(const char *value, common::ByteStream &out) {
+        return encode(std::string(value), out);
+    }
+
+    int encode(std::string value, common::ByteStream &out) {
         if (entry_index_.count(value) == 0) {
             index_entry_.push_back(value);
             map_size_ = map_size_ + value.length();
             entry_index_[value] = entry_index_.size();
         }
         values_encoder_.encode(entry_index_[value], out);
+        return common::E_OK;
     }
 
-    int flush(common::ByteStream &out) {
+    int flush(common::ByteStream &out) override {
         int ret = common::E_OK;
         ret = write_map(out);
         if (ret != common::E_OK) {
@@ -70,10 +97,7 @@
         } else {
             write_encoded_data(out);
         }
-        if (ret != common::E_OK) {
-            return ret;
-        }
-        return common::E_OK;
+        return ret;
     }
 
     int write_map(common::ByteStream &out) {
@@ -93,14 +117,14 @@
     }
 
     void write_encoded_data(common::ByteStream &out) {
-        values_encoder_.encode_flush(out);
+        values_encoder_.flush(out);
     }
 
-    int get_max_byte_size() {
+    int get_max_byte_size() override {
         // 4 bytes for storing dictionary size
         return 4 + map_size_ + values_encoder_.get_max_byte_size();
     }
 };
 
 }  // end namespace storage
-#endif  // ENCODING_DICTIONARY_ENCODER_H
+#endif  // ENCODING_DICTIONARY_ENCODER_H
\ No newline at end of file
diff --git a/cpp/src/encoding/double_sprintz_decoder.h b/cpp/src/encoding/double_sprintz_decoder.h
new file mode 100644
index 0000000..7a3ab6d
--- /dev/null
+++ b/cpp/src/encoding/double_sprintz_decoder.h
@@ -0,0 +1,223 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef DOUBLE_SPRINTZ_DECODER_H
+#define DOUBLE_SPRINTZ_DECODER_H
+
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+#include <string>
+#include <vector>
+
+#include "common/allocator/byte_stream.h"
+#include "encoding/fire.h"
+#include "gorilla_decoder.h"
+#include "int64_packer.h"
+#include "sprintz_decoder.h"
+
+namespace storage {
+
+class DoubleSprintzDecoder : public SprintzDecoder {
+   public:
+    DoubleSprintzDecoder() : fire_pred_(3), predict_scheme_("fire") {
+        SprintzDecoder::reset();
+        current_buffer_.resize(block_size_ + 1);
+        convert_buffer_.resize(block_size_);
+        pre_value_ = 0;
+        current_value_ = 0.0;
+        current_count_ = 0;
+        decode_size_ = 0;
+        is_block_read_ = false;
+        std::fill(current_buffer_.begin(), current_buffer_.end(), 0.0);
+        std::fill(convert_buffer_.begin(), convert_buffer_.end(), 0);
+        fire_pred_.reset();
+    }
+
+    ~DoubleSprintzDecoder() override = default;
+
+    void set_predict_method(const std::string& method) {
+        predict_scheme_ = method;
+    }
+
+    int read_boolean(bool& ret_value, common::ByteStream& in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_int32(int32_t& ret_value, common::ByteStream& in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_int64(int64_t& ret_value, common::ByteStream& in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_double(double& ret_value, common::ByteStream& in) override {
+        int ret = common::E_OK;
+        if (!is_block_read_) {
+            if (RET_FAIL(decode_block(in))) {
+                return ret;
+            }
+        }
+        ret_value = current_buffer_[current_count_++];
+        if (current_count_ == decode_size_) {
+            is_block_read_ = false;
+            current_count_ = 0;
+        }
+        return ret;
+    }
+    int read_float(float& ret_value, common::ByteStream& in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_String(common::String& ret_value, common::PageArena& pa,
+                    common::ByteStream& in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    void reset() override {
+        SprintzDecoder::reset();
+        pre_value_ = 0;
+        current_value_ = 0.0;
+        current_count_ = 0;
+        decode_size_ = 0;
+        is_block_read_ = false;
+        std::fill(current_buffer_.begin(), current_buffer_.end(), 0.0);
+        std::fill(convert_buffer_.begin(), convert_buffer_.end(), 0);
+        fire_pred_.reset();
+    }
+
+    bool has_remaining(const common::ByteStream& input) override {
+        int min_length = sizeof(uint32_t) + 1;
+        return (is_block_read_ && current_count_ < decode_size_) ||
+               input.remaining_size() >= min_length;
+    }
+
+   protected:
+    int decode_block(common::ByteStream& input) override {
+        // read header bitWidth
+        int ret = common::E_OK;
+        uint8_t byte;
+        uint32_t bit_width = 0, read_len = 0;
+        ret = input.read_buf(&byte, 1, read_len);
+        if (ret != common::E_OK || read_len != 1) {
+            return common::E_DECODE_ERR;
+        }
+        bit_width |= static_cast<uint32_t>(byte);
+        bit_width_ = static_cast<int32_t>(bit_width);
+
+        if ((bit_width_ & (1 << 7)) != 0) {
+            decode_size_ = bit_width_ & ~(1 << 7);
+            DoubleGorillaDecoder decoder;
+            for (int i = 0; i < decode_size_; ++i) {
+                if (RET_FAIL(decoder.read_double(current_buffer_[i], input))) {
+                    return ret;
+                }
+            }
+        } else {
+            decode_size_ = block_size_ + 1;
+            common::SerializationUtil::read_double(pre_value_, input);
+            current_buffer_[0] = pre_value_;
+            std::vector<uint8_t> pack_buf(bit_width_);
+            uint32_t read_len = 0;
+            input.read_buf(reinterpret_cast<char*>(pack_buf.data()), bit_width_,
+                           read_len);
+            packer_ = std::make_shared<Int64Packer>(bit_width_);
+            std::vector<int64_t> tmp_buffer(block_size_);
+            packer_->unpack_8values(pack_buf.data(), 0, tmp_buffer.data());
+            for (int i = 0; i < block_size_; ++i) {
+                convert_buffer_[i] = tmp_buffer[i];
+            }
+            ret = recalculate();
+        }
+        is_block_read_ = true;
+        return ret;
+    }
+
+    int recalculate() override {
+        int ret = common::E_OK;
+        for (int i = 0; i < block_size_; ++i) {
+            int64_t v = convert_buffer_[i];
+            convert_buffer_[i] = (v % 2 == 0) ? -v / 2 : (v + 1) / 2;
+        }
+
+        if (predict_scheme_ == "delta") {
+            uint64_t prev_bits;
+            std::memcpy(&prev_bits, &current_buffer_[0], sizeof(prev_bits));
+            int64_t corrected0 =
+                convert_buffer_[0] + static_cast<int64_t>(prev_bits);
+            convert_buffer_[0] = corrected0;
+            double d0;
+            std::memcpy(&d0, &corrected0, sizeof(corrected0));
+            current_buffer_[1] = d0;
+
+            for (int i = 1; i < block_size_; ++i) {
+                convert_buffer_[i] += convert_buffer_[i - 1];
+                int64_t bits = convert_buffer_[i];
+                double di;
+                std::memcpy(&di, &bits, sizeof(bits));
+                current_buffer_[i + 1] = di;
+            }
+
+        } else if (predict_scheme_ == "fire") {
+            fire_pred_.reset();
+            uint64_t prev_bits;
+            std::memcpy(&prev_bits, &current_buffer_[0], sizeof(prev_bits));
+            int64_t p = fire_pred_.predict(prev_bits);
+            int64_t e0 = convert_buffer_[0];
+            int64_t corrected0 = p + e0;
+            convert_buffer_[0] = corrected0;
+            double d0;
+            std::memcpy(&d0, &corrected0, sizeof(corrected0));
+            current_buffer_[1] = d0;
+            fire_pred_.train(prev_bits, corrected0, e0);
+
+            for (int i = 1; i < block_size_; ++i) {
+                uint64_t prev_bits_i;
+                std::memcpy(&prev_bits_i, &current_buffer_[i],
+                            sizeof(prev_bits_i));
+                int64_t err = convert_buffer_[i];
+                int64_t pred = fire_pred_.predict(prev_bits_i);
+                int64_t corrected = pred + err;
+                convert_buffer_[i] = corrected;
+                double di;
+                std::memcpy(&di, &corrected, sizeof(corrected));
+                current_buffer_[i + 1] = di;
+                fire_pred_.train(prev_bits_i, corrected, err);
+            }
+
+        } else {
+            ret = common::E_DECODE_ERR;
+        }
+        return ret;
+    }
+
+   private:
+    double pre_value_;
+    double current_value_;
+    size_t current_count_;
+    int decode_size_;
+    bool is_block_read_ = false;
+
+    std::vector<double> current_buffer_;
+    std::vector<int64_t> convert_buffer_;
+    std::shared_ptr<Int64Packer> packer_;
+    LongFire fire_pred_;
+    std::string predict_scheme_;
+};
+
+}  // namespace storage
+
+#endif  // DOUBLE_SPRINTZ_DECODER_H
diff --git a/cpp/src/encoding/double_sprintz_encoder.h b/cpp/src/encoding/double_sprintz_encoder.h
new file mode 100644
index 0000000..1571dcc
--- /dev/null
+++ b/cpp/src/encoding/double_sprintz_encoder.h
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef DOUBLE_SPRINTZ_ENCODER_H
+#define DOUBLE_SPRINTZ_ENCODER_H
+
+#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <vector>
+
+#include "common/allocator/byte_stream.h"
+#include "encoding/fire.h"
+#include "encoding/int64_packer.h"
+#include "gorilla_encoder.h"
+#include "sprintz_encoder.h"
+
+namespace storage {
+
+class DoubleSprintzEncoder : public SprintzEncoder {
+   public:
+    DoubleSprintzEncoder() : fire_pred_(3) {
+        convert_buffer_.resize(block_size_);
+    }
+
+    ~DoubleSprintzEncoder() override = default;
+
+    void reset() override {
+        SprintzEncoder::reset();
+        values_.clear();
+    }
+
+    void destroy() override {}
+
+    int get_one_item_max_size() override {
+        return 1 + (1 + block_size_) * static_cast<int>(sizeof(int64_t));
+    }
+
+    int get_max_byte_size() override {
+        return 1 + (static_cast<int>(values_.size()) + 1) *
+                       static_cast<int>(sizeof(int64_t));
+    }
+
+    int encode(bool, common::ByteStream&) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(int32_t, common::ByteStream&) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(int64_t, common::ByteStream&) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(float, common::ByteStream&) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(double value, common::ByteStream& out_stream) override {
+        int ret = common::E_OK;
+        if (!is_first_cached_) {
+            values_.push_back(value);
+            is_first_cached_ = true;
+            return ret;
+        }
+        values_.push_back(value);
+
+        if (values_.size() == block_size_ + 1) {
+            fire_pred_.reset();
+            for (int i = 1; i <= block_size_; ++i) {
+                convert_buffer_[i - 1] = predict(values_[i], values_[i - 1]);
+            }
+            bit_pack();
+            is_first_cached_ = false;
+            values_.clear();
+            group_num_++;
+            if (group_num_ == group_max_) {
+                if (RET_FAIL(flush(out_stream))) return ret;
+            }
+        }
+        return ret;
+    }
+    int encode(const common::String, common::ByteStream&) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int flush(common::ByteStream& out_stream) override {
+        int ret = common::E_OK;
+        if (byte_cache_.total_size() > 0) {
+            if (RET_FAIL(common::SerializationUtil::chunk_read_all_data(
+                    byte_cache_, out_stream))) {
+                return ret;
+            }
+        }
+
+        if (!values_.empty()) {
+            int size = static_cast<int>(values_.size());
+            size |= (1 << 7);
+            common::SerializationUtil::
+                write_int_little_endian_padded_on_bit_width(size, out_stream,
+                                                            1);
+            DoubleGorillaEncoder encoder;
+            for (double val : values_) {
+                encoder.encode(val, out_stream);
+            }
+            encoder.flush(out_stream);
+        }
+
+        reset();
+        return ret;
+    }
+
+   protected:
+    void bit_pack() override {
+        // extract and remove first value
+        double pre_value = values_[0];
+        values_.erase(values_.begin());
+
+        // compute bit width and init packer
+        bit_width_ = get_int64_max_bit_width(convert_buffer_);
+        packer_ = std::make_shared<Int64Packer>(bit_width_);
+
+        std::vector<uint8_t> bytes(bit_width_);
+        packer_->pack_8values(convert_buffer_.data(), 0, bytes.data());
+
+        // write bit_width and first value
+        common::SerializationUtil::write_int_little_endian_padded_on_bit_width(
+            bit_width_, byte_cache_, 1);
+        uint8_t buf[8];
+        common::double_to_bytes(pre_value, buf);
+        byte_cache_.write_buf(reinterpret_cast<const char*>(buf), 8);
+        byte_cache_.write_buf(reinterpret_cast<const char*>(bytes.data()),
+                              bytes.size());
+    }
+
+    int64_t predict(double value, double prev) {
+        int64_t curr_bits = common::double_to_long(value);
+        int64_t prev_bits = common::double_to_long(prev);
+        int64_t raw_pred;
+        if (predict_method_ == "delta") {
+            raw_pred = curr_bits - prev_bits;
+        } else if (predict_method_ == "fire") {
+            int64_t pred = fire_pred_.predict(prev_bits);
+            int64_t err = curr_bits - pred;
+            fire_pred_.train(prev_bits, curr_bits, err);
+            raw_pred = err;
+        } else {
+            ASSERT(false);
+        }
+        return (raw_pred <= 0) ? -2 * raw_pred : 2 * raw_pred - 1;
+    }
+
+   private:
+    std::vector<double> values_;
+    std::vector<int64_t> convert_buffer_;
+    std::shared_ptr<Int64Packer> packer_;
+    LongFire fire_pred_;
+};
+
+}  // namespace storage
+
+#endif  // DOUBLE_SPRINTZ_ENCODER_H
diff --git a/cpp/src/encoding/encode_utils.h b/cpp/src/encoding/encode_utils.h
index 45c4b04..a8be851 100644
--- a/cpp/src/encoding/encode_utils.h
+++ b/cpp/src/encoding/encode_utils.h
@@ -52,29 +52,42 @@
     if (i == 0) {
         return 32;
     }
-    int32_t y;
+    uint32_t x = static_cast<uint32_t>(i);
     int32_t n = 31;
-    y = i << 16;
+    uint32_t y;
+    y = x << 16;
     if (y != 0) {
-        n = n - 16;
-        i = y;
+        n -= 16;
+        x = y;
     }
-    y = i << 8;
+
+    y = x << 8;
     if (y != 0) {
-        n = n - 8;
-        i = y;
+        n -= 8;
+        x = y;
     }
-    y = i << 4;
+
+    y = x << 4;
     if (y != 0) {
-        n = n - 4;
-        i = y;
+        n -= 4;
+        x = y;
     }
-    y = i << 2;
+
+    y = x << 2;
     if (y != 0) {
-        n = n - 2;
-        i = y;
+        n -= 2;
+        x = y;
     }
-    return n - (((uint32_t)(i << 1)) >> 31);
+    return n - static_cast<int32_t>((x << 1) >> 31);
+}
+
+FORCE_INLINE int get_int32_max_bit_width(const std::vector<int32_t>& nums) {
+    int ret = 1;
+    for (auto num : nums) {
+        int bit_width = 32 - number_of_leading_zeros(num);
+        ret = std::max(ret, bit_width);
+    }
+    return ret;
 }
 
 FORCE_INLINE int32_t number_of_leading_zeros(int64_t i) {
@@ -108,38 +121,46 @@
 }
 
 FORCE_INLINE int32_t number_of_trailing_zeros(int64_t i) {
-    if (i == 0) {
-        return 64;
-    }
-    int32_t x, y;
+    if (i == 0) return 64;
+    uint32_t x, y;
     int32_t n = 63;
-    y = (int32_t)i;
+    y = static_cast<uint32_t>(i);
     if (y != 0) {
-        n = n - 32;
+        n -= 32;
         x = y;
-    } else
-        x = (int32_t)(((uint64_t)i) >> 32);
+    } else {
+        x = static_cast<uint32_t>(static_cast<uint64_t>(i) >> 32);
+    }
     y = x << 16;
-    if (y != 0) {
-        n = n - 16;
+    if (y) {
+        n -= 16;
         x = y;
     }
     y = x << 8;
-    if (y != 0) {
-        n = n - 8;
+    if (y) {
+        n -= 8;
         x = y;
     }
     y = x << 4;
-    if (y != 0) {
-        n = n - 4;
+    if (y) {
+        n -= 4;
         x = y;
     }
     y = x << 2;
-    if (y != 0) {
-        n = n - 2;
+    if (y) {
+        n -= 2;
         x = y;
     }
-    return n - (((uint32_t)(x << 1)) >> 31);
+    return n - static_cast<int32_t>((x << 1) >> 31);
+}
+
+FORCE_INLINE int get_int64_max_bit_width(const std::vector<int64_t>& nums) {
+    int ret = 1;
+    for (auto num : nums) {
+        int bit_width = 64 - number_of_leading_zeros(num);
+        ret = std::max(ret, bit_width);
+    }
+    return ret;
 }
 
 }  // end namespace storage
diff --git a/cpp/src/encoding/encoder_factory.h b/cpp/src/encoding/encoder_factory.h
index 0e582ae..e19aeec 100644
--- a/cpp/src/encoding/encoder_factory.h
+++ b/cpp/src/encoding/encoder_factory.h
@@ -21,10 +21,18 @@
 #define ENCODING_ENCODER_FACTORY_H
 
 #include "common/global.h"
+#include "dictionary_encoder.h"
+#include "double_sprintz_encoder.h"
 #include "encoder.h"
+#include "encoding/int32_rle_encoder.h"
+#include "encoding/int64_rle_encoder.h"
+#include "float_sprintz_encoder.h"
 #include "gorilla_encoder.h"
+#include "int32_sprintz_encoder.h"
+#include "int64_sprintz_encoder.h"
 #include "plain_encoder.h"
 #include "ts2diff_encoder.h"
+#include "zigzag_encoder.h"
 
 namespace storage {
 
@@ -50,7 +58,6 @@
             ALLOC_AND_RETURN_ENCODER(LongTS2DIFFEncoder);
         } else {
             // not support now
-            ASSERT(false);
             return nullptr;
         }
     }
@@ -62,64 +69,112 @@
             ALLOC_AND_RETURN_ENCODER(LongTS2DIFFEncoder);
         } else {
             // not support now
-            ASSERT(false);
             return nullptr;
         }
     }
 
     static Encoder *alloc_value_encoder(common::TSEncoding encoding,
                                         common::TSDataType data_type) {
-        if (encoding == common::PLAIN) {
-            ALLOC_AND_RETURN_ENCODER(PlainEncoder);
-        } else if (encoding == common::DICTIONARY) {
-            return nullptr;
-        } else if (encoding == common::RLE) {
-            return nullptr;
-        } else if (encoding == common::DIFF) {
-            return nullptr;
-        } else if (encoding == common::TS_2DIFF) {
-            if (data_type == common::INT32) {
-                ALLOC_AND_RETURN_ENCODER(IntTS2DIFFEncoder);
-            } else if (data_type == common::INT64) {
-                ALLOC_AND_RETURN_ENCODER(LongTS2DIFFEncoder);
-            } else if (data_type == common::FLOAT) {
-                ALLOC_AND_RETURN_ENCODER(FloatTS2DIFFEncoder);
-            } else if (data_type == common::DOUBLE) {
-                ALLOC_AND_RETURN_ENCODER(DoubleTS2DIFFEncoder);
-            } else {
-                ASSERT(false);
-            }
-        } else if (encoding == common::BITMAP) {
-            return nullptr;
-        } else if (encoding == common::GORILLA_V1) {
-            return nullptr;
-        } else if (encoding == common::REGULAR) {
-            return nullptr;
-        } else if (encoding == common::GORILLA) {
-            if (data_type == common::INT32) {
-                ALLOC_AND_RETURN_ENCODER(IntGorillaEncoder);
-            } else if (data_type == common::INT64) {
-                ALLOC_AND_RETURN_ENCODER(LongGorillaEncoder);
-            } else if (data_type == common::FLOAT) {
-                ALLOC_AND_RETURN_ENCODER(FloatGorillaEncoder);
-            } else if (data_type == common::DOUBLE) {
-                ALLOC_AND_RETURN_ENCODER(DoubleGorillaEncoder);
-            } else {
-                ASSERT(false);
-            }
-        } else if (encoding == common::ZIGZAG) {
-            return nullptr;
-        } else if (encoding == common::FREQ) {
-            return nullptr;
-        } else {
-            // not support now
-            ASSERT(false);
-            return nullptr;
+        using namespace common;
+
+        switch (encoding) {
+            case PLAIN:
+                ALLOC_AND_RETURN_ENCODER(PlainEncoder);
+
+            case DICTIONARY:
+                switch (data_type) {
+                    case STRING:
+                    case TEXT:
+                        ALLOC_AND_RETURN_ENCODER(DictionaryEncoder);
+                    default:
+                        return nullptr;
+                }
+
+            case RLE:
+                switch (data_type) {
+                    case INT32:
+                    case DATE:
+                        ALLOC_AND_RETURN_ENCODER(Int32RleEncoder);
+                    case INT64:
+                    case TIMESTAMP:
+                        ALLOC_AND_RETURN_ENCODER(Int64RleEncoder);
+                    default:
+                        return nullptr;
+                }
+
+            case TS_2DIFF:
+                switch (data_type) {
+                    case INT32:
+                    case DATE:
+                        ALLOC_AND_RETURN_ENCODER(IntTS2DIFFEncoder);
+                    case INT64:
+                    case TIMESTAMP:
+                        ALLOC_AND_RETURN_ENCODER(LongTS2DIFFEncoder);
+                    case FLOAT:
+                        ALLOC_AND_RETURN_ENCODER(FloatTS2DIFFEncoder);
+                    case DOUBLE:
+                        ALLOC_AND_RETURN_ENCODER(DoubleTS2DIFFEncoder);
+                    default:
+                        return nullptr;
+                }
+
+            case GORILLA:
+                switch (data_type) {
+                    case INT32:
+                    case DATE:
+                        ALLOC_AND_RETURN_ENCODER(IntGorillaEncoder);
+                    case INT64:
+                    case TIMESTAMP:
+                        ALLOC_AND_RETURN_ENCODER(LongGorillaEncoder);
+                    case FLOAT:
+                        ALLOC_AND_RETURN_ENCODER(FloatGorillaEncoder);
+                    case DOUBLE:
+                        ALLOC_AND_RETURN_ENCODER(DoubleGorillaEncoder);
+                    default:
+                        return nullptr;
+                }
+
+            case ZIGZAG:
+                switch (data_type) {
+                    case INT32:
+                        ALLOC_AND_RETURN_ENCODER(IntZigzagEncoder);
+                    case INT64:
+                        ALLOC_AND_RETURN_ENCODER(LongZigzagEncoder);
+                    default:
+                        return nullptr;
+                }
+
+            case SPRINTZ:
+                switch (data_type) {
+                    case INT32:
+                        ALLOC_AND_RETURN_ENCODER(Int32SprintzEncoder);
+                    case INT64:
+                        ALLOC_AND_RETURN_ENCODER(Int64SprintzEncoder);
+                    case FLOAT:
+                        ALLOC_AND_RETURN_ENCODER(FloatSprintzEncoder);
+                    case DOUBLE:
+                        ALLOC_AND_RETURN_ENCODER(DoubleSprintzEncoder);
+                    default:
+                        return nullptr;
+                }
+
+            case DIFF:
+            case BITMAP:
+            case GORILLA_V1:
+            case REGULAR:
+            case FREQ:
+                return nullptr;
+
+            default:
+                return nullptr;
         }
         return nullptr;
     }
 
-    static void free(Encoder *encoder) { common::mem_free(encoder); }
+    static void free(Encoder *encoder) {
+        encoder->~Encoder();
+        common::mem_free(encoder);
+    }
 };
 
 }  // end namespace storage
diff --git a/cpp/src/encoding/fire.h b/cpp/src/encoding/fire.h
new file mode 100644
index 0000000..9b319a1
--- /dev/null
+++ b/cpp/src/encoding/fire.h
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef ENCODING_FIRE_H
+#define ENCODING_FIRE_H
+
+#include <cstdint>
+
+template <typename T>
+class Fire {
+   public:
+    explicit Fire(int learning_rate)
+        : learn_shift_(learning_rate),
+          bit_width_(0),
+          accumulator_(0),
+          delta_(0) {}
+
+    virtual ~Fire() = default;
+
+    virtual T predict(T value) = 0;
+
+    virtual void train(T pre, T val, T err) = 0;
+
+    virtual void reset() {
+        accumulator_ = 0;
+        delta_ = 0;
+    }
+
+   protected:
+    int learn_shift_;
+    int bit_width_;
+    int accumulator_;
+    T delta_;
+};
+
+class IntFire : public Fire<int> {
+   public:
+    explicit IntFire(int learning_rate) : Fire(learning_rate) {
+        bit_width_ = 8;
+        accumulator_ = 0;
+        delta_ = 0;
+    }
+
+    void reset() override {
+        accumulator_ = 0;
+        delta_ = 0;
+    }
+
+    int predict(int value) override {
+        int alpha = accumulator_ >> learn_shift_;
+        int diff = static_cast<int>((static_cast<int64_t>(alpha) * delta_)) >>
+                   bit_width_;
+
+        return value + diff;
+    }
+
+    void train(int pre, int val, int err) override {
+        int gradient = err > 0 ? -delta_ : delta_;
+        accumulator_ -= gradient;
+        delta_ = val - pre;
+    }
+};
+
+class LongFire : public Fire<int64_t> {
+   public:
+    explicit LongFire(int learning_rate) : Fire(learning_rate) {
+        bit_width_ = 16;
+        accumulator_ = 0;
+        delta_ = 0;
+    }
+
+    void reset() override {
+        accumulator_ = 0;
+        delta_ = 0;
+    }
+
+    int64_t predict(int64_t value) override {
+        int64_t alpha = accumulator_ >> learn_shift_;
+        int64_t diff = (alpha * delta_) >> bit_width_;
+        return value + diff;
+    }
+
+    void train(int64_t pre, int64_t val, int64_t err) override {
+        int64_t gradient = err > 0 ? -delta_ : delta_;
+        accumulator_ -= gradient;
+        delta_ = val - pre;
+    }
+};
+
+#endif  // ENCODING_FIRE_H
diff --git a/cpp/src/encoding/float_sprintz_decoder.h b/cpp/src/encoding/float_sprintz_decoder.h
new file mode 100644
index 0000000..319b251
--- /dev/null
+++ b/cpp/src/encoding/float_sprintz_decoder.h
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef FLOAT_SPRINTZ_DECODER_H
+#define FLOAT_SPRINTZ_DECODER_H
+
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+#include <string>
+#include <vector>
+
+#include "common/allocator/byte_stream.h"
+#include "encoding/fire.h"
+#include "gorilla_decoder.h"
+#include "int32_packer.h"
+#include "sprintz_decoder.h"
+
+namespace storage {
+
+class FloatSprintzDecoder : public SprintzDecoder {
+   public:
+    FloatSprintzDecoder() : fire_pred_(2), predict_scheme_("fire") {
+        SprintzDecoder::reset();
+        current_buffer_.resize(block_size_ + 1);
+        convert_buffer_.resize(block_size_);
+        pre_value_ = 0;
+        current_value_ = 0.0f;
+        current_count_ = 0;
+        decode_size_ = 0;
+        is_block_read_ = false;
+        std::fill(current_buffer_.begin(), current_buffer_.end(), 0.0f);
+        std::fill(convert_buffer_.begin(), convert_buffer_.end(), 0);
+        fire_pred_.reset();
+    }
+
+    ~FloatSprintzDecoder() override = default;
+
+    void set_predict_method(const std::string &method) {
+        predict_scheme_ = method;
+    }
+
+    int read_boolean(bool &ret_value, common::ByteStream &in) {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_int32(int32_t &ret_value, common::ByteStream &in) {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_int64(int64_t &ret_value, common::ByteStream &in) {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_double(double &ret_value, common::ByteStream &in) {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_String(common::String &ret_value, common::PageArena &pa,
+                    common::ByteStream &in) {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    void reset() override {
+        SprintzDecoder::reset();
+        pre_value_ = 0;
+        current_value_ = 0.0f;
+        current_count_ = 0;
+        decode_size_ = 0;
+        is_block_read_ = false;
+        std::fill(current_buffer_.begin(), current_buffer_.end(), 0.0f);
+        std::fill(convert_buffer_.begin(), convert_buffer_.end(), 0);
+        fire_pred_.reset();
+    }
+
+    bool has_remaining(const common::ByteStream &input) override {
+        int min_length = sizeof(uint32_t) + 1;
+        return (is_block_read_ && current_count_ < decode_size_) ||
+               input.remaining_size() >= min_length;
+    }
+
+    int read_float(float &ret_value, common::ByteStream &input) override {
+        int ret = common::E_OK;
+        if (!is_block_read_) {
+            if (RET_FAIL(decode_block(input))) {
+                return ret;
+            }
+        }
+        ret_value = current_buffer_[current_count_++];
+        if (current_count_ == decode_size_) {
+            is_block_read_ = false;
+            current_count_ = 0;
+        }
+        return ret;
+    }
+
+   protected:
+    int decode_block(common::ByteStream &input) override {
+        // read header bitWidth
+        int ret = common::E_OK;
+        uint8_t byte;
+        uint32_t bit_width = 0, read_len = 0;
+        ret = input.read_buf(&byte, 1, read_len);
+        if (ret != common::E_OK || read_len != 1) {
+            return common::E_DECODE_ERR;
+        }
+        bit_width |= static_cast<uint32_t>(byte);
+        bit_width_ = static_cast<int32_t>(bit_width);
+
+        if ((bit_width_ & (1 << 7)) != 0) {
+            decode_size_ = bit_width_ & ~(1 << 7);
+            FloatGorillaDecoder decoder;
+            for (int i = 0; i < decode_size_; ++i) {
+                if (RET_FAIL(decoder.read_float(current_buffer_[i], input))) {
+                    return ret;
+                }
+            }
+        } else {
+            // packed block
+            decode_size_ = block_size_ + 1;
+            common::SerializationUtil::read_float(pre_value_, input);
+            current_buffer_[0] = pre_value_;
+            // read packed data
+            std::vector<uint8_t> pack_buf(bit_width_);
+            uint32_t read_len = 0;
+            input.read_buf(reinterpret_cast<char *>(pack_buf.data()),
+                           bit_width_, read_len);
+            packer_ = std::make_shared<Int32Packer>(bit_width_);
+            std::vector<int32_t> tmp_buffer(block_size_);
+            packer_->unpack_8values(pack_buf.data(), 0, tmp_buffer.data());
+            // move into convert_buffer_
+            for (int i = 0; i < block_size_; ++i) {
+                convert_buffer_[i] = tmp_buffer[i];
+            }
+            ret = recalculate();
+        }
+        is_block_read_ = true;
+        return ret;
+    }
+
+    int recalculate() override {
+        int ret = common::E_OK;
+        for (int i = 0; i < block_size_; ++i) {
+            int32_t v = convert_buffer_[i];
+            convert_buffer_[i] = (v % 2 == 0) ? -v / 2 : (v + 1) / 2;
+        }
+
+        if (predict_scheme_ == "delta") {
+            uint32_t prev_bits;
+            std::memcpy(&prev_bits, &current_buffer_[0], sizeof(prev_bits));
+            // Java: convertBuffer[0] = convertBuffer[0] +
+            // Float.floatToIntBits(preValue);
+            int32_t corrected0 =
+                convert_buffer_[0] + static_cast<int32_t>(prev_bits);
+            convert_buffer_[0] = corrected0;
+            // Java: currentBuffer[1] = Float.intBitsToFloat(convertBuffer[0]);
+            float f0;
+            std::memcpy(&f0, &corrected0, sizeof(corrected0));
+            current_buffer_[1] = f0;
+
+            for (int i = 1; i < block_size_; ++i) {
+                // Java: convertBuffer[i] += convertBuffer[i - 1];
+                convert_buffer_[i] += convert_buffer_[i - 1];
+                int32_t bits = convert_buffer_[i];
+                float fi;
+                std::memcpy(&fi, &bits, sizeof(bits));
+                current_buffer_[i + 1] = fi;
+            }
+
+        } else if (predict_scheme_ == "fire") {
+            fire_pred_.reset();
+            uint32_t prev_bits;
+            std::memcpy(&prev_bits, &current_buffer_[0], sizeof(prev_bits));
+            // Java: int p = firePred.predict(Float.floatToIntBits(preValue));
+            int32_t p = fire_pred_.predict(prev_bits);
+            int32_t e0 = convert_buffer_[0];
+            int32_t corrected0 = p + e0;
+            convert_buffer_[0] = corrected0;
+            float f0;
+            std::memcpy(&f0, &corrected0, sizeof(corrected0));
+            current_buffer_[1] = f0;
+            // Java: firePred.train(Float.floatToIntBits(preValue),
+            // convertBuffer[0], e);
+            fire_pred_.train(prev_bits, corrected0, e0);
+
+            for (int i = 1; i < block_size_; ++i) {
+                uint32_t prev_bits_i;
+                std::memcpy(&prev_bits_i, &current_buffer_[i],
+                            sizeof(prev_bits_i));
+                int32_t err = convert_buffer_[i];
+                int32_t pred = fire_pred_.predict(prev_bits_i);
+                int32_t corrected = pred + err;
+                convert_buffer_[i] = corrected;
+                float fi;
+                std::memcpy(&fi, &corrected, sizeof(corrected));
+                current_buffer_[i + 1] = fi;
+                // Java: firePred.train(convertBuffer[i - 1], convertBuffer[i],
+                // err);
+                fire_pred_.train(prev_bits_i, corrected, err);
+            }
+
+        } else {
+            ret = common::E_DECODE_ERR;
+        }
+        return ret;
+    }
+
+   private:
+    float pre_value_;
+    float current_value_;
+    size_t current_count_;
+    int decode_size_;
+    bool is_block_read_ = false;
+
+    std::vector<float> current_buffer_;
+    std::vector<int32_t> convert_buffer_;
+    std::shared_ptr<Int32Packer> packer_;
+    IntFire fire_pred_;
+    std::string predict_scheme_;
+};
+
+}  // namespace storage
+
+#endif  // FLOAT_SPRINTZ_DECODER_H
\ No newline at end of file
diff --git a/cpp/src/encoding/float_sprintz_encoder.h b/cpp/src/encoding/float_sprintz_encoder.h
new file mode 100644
index 0000000..01151a2
--- /dev/null
+++ b/cpp/src/encoding/float_sprintz_encoder.h
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef FLOAT_SPRINTZ_ENCODER_H
+#define FLOAT_SPRINTZ_ENCODER_H
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "common/allocator/byte_stream.h"
+#include "encoding/encode_utils.h"
+#include "encoding/fire.h"
+#include "encoding/int32_rle_encoder.h"
+#include "gorilla_encoder.h"
+#include "int32_packer.h"
+#include "sprintz_encoder.h"
+
+namespace storage {
+
+class FloatSprintzEncoder : public SprintzEncoder {
+   public:
+    FloatSprintzEncoder() : fire_pred_(2) {
+        convert_buffer_.resize(block_size_);
+    }
+
+    ~FloatSprintzEncoder() override = default;
+
+    void reset() override {
+        SprintzEncoder::reset();
+        values_.clear();
+    }
+
+    void destroy() override {}
+
+    int get_one_item_max_size() override {
+        return 1 + (1 + block_size_) * sizeof(int32_t);
+    }
+
+    int get_max_byte_size() override {
+        return 1 + (values_.size() + 1) * sizeof(int32_t);
+    }
+
+    int encode(bool, common::ByteStream&) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(int32_t, common::ByteStream&) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(int64_t, common::ByteStream&) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(float value, common::ByteStream& out_stream) override {
+        int ret = common::E_OK;
+        if (!is_first_cached_) {
+            values_.push_back(value);
+            is_first_cached_ = true;
+            return ret;
+        }
+        values_.push_back(value);
+
+        if (values_.size() == block_size_ + 1) {
+            fire_pred_.reset();
+            for (int i = 1; i <= block_size_; ++i) {
+                convert_buffer_[i - 1] = predict(values_[i], values_[i - 1]);
+            }
+            bit_pack();
+            is_first_cached_ = false;
+            values_.clear();
+            group_num_++;
+            if (group_num_ == group_max_) {
+                if (RET_FAIL(flush(out_stream))) return ret;
+            }
+        }
+        return ret;
+    }
+    int encode(double, common::ByteStream&) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(const common::String, common::ByteStream&) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int flush(common::ByteStream& out_stream) override {
+        int ret = common::E_OK;
+        if (byte_cache_.total_size() > 0) {
+            if (RET_FAIL(common::SerializationUtil::chunk_read_all_data(
+                    byte_cache_, out_stream))) {
+                return ret;
+            }
+        }
+
+        if (!values_.empty()) {
+            int size = static_cast<int>(values_.size());
+            size |= (1 << 7);
+            common::SerializationUtil::
+                write_int_little_endian_padded_on_bit_width(size, out_stream,
+                                                            1);
+            FloatGorillaEncoder encoder;
+            for (float val : values_) {
+                encoder.encode(val, out_stream);
+            }
+            encoder.flush(out_stream);
+        }
+
+        reset();
+        return ret;
+    }
+
+   protected:
+    void bit_pack() override {
+        // extract and remove first value
+        float pre_bits = values_[0];
+        values_.erase(values_.begin());
+
+        bit_width_ = get_int32_max_bit_width(convert_buffer_);
+        packer_ = std::make_shared<Int32Packer>(bit_width_);
+
+        std::vector<uint8_t> bytes(bit_width_);
+        packer_->pack_8values(convert_buffer_.data(), 0, bytes.data());
+
+        common::SerializationUtil::write_int_little_endian_padded_on_bit_width(
+            bit_width_, byte_cache_, 1);
+        uint8_t buffer[4];
+        common::float_to_bytes(pre_bits, buffer);
+        byte_cache_.write_buf(reinterpret_cast<const char*>(buffer), 4);
+        byte_cache_.write_buf(reinterpret_cast<const char*>(bytes.data()),
+                              bytes.size());
+    }
+
+    int32_t predict(float value, float prev_value) {
+        int32_t curr_bits = common::float_to_int(value);
+        int32_t prev_bits = common::float_to_int(prev_value);
+        int32_t raw_pred;
+        if (predict_method_ == "delta") {
+            raw_pred = curr_bits - prev_bits;
+        } else if (predict_method_ == "fire") {
+            int32_t pred = fire_pred_.predict(prev_bits);
+            int32_t err = curr_bits - pred;
+            fire_pred_.train(prev_bits, curr_bits, err);
+            raw_pred = err;
+        } else {
+            // unsupported
+            ASSERT(false);
+        }
+        return (raw_pred <= 0) ? -2 * raw_pred : 2 * raw_pred - 1;
+    }
+
+   private:
+    std::vector<float> values_;
+    std::vector<int32_t> convert_buffer_;
+    std::shared_ptr<Int32Packer> packer_;
+    IntFire fire_pred_;
+};
+
+}  // namespace storage
+
+#endif  // FLOAT_SPRINTZ_ENCODER_H
diff --git a/cpp/src/encoding/gorilla_decoder.h b/cpp/src/encoding/gorilla_decoder.h
index f374b32..e2b6206 100644
--- a/cpp/src/encoding/gorilla_decoder.h
+++ b/cpp/src/encoding/gorilla_decoder.h
@@ -35,7 +35,7 @@
    public:
     GorillaDecoder() { reset(); }
 
-    ~GorillaDecoder() {}
+    ~GorillaDecoder() override = default;
 
     void reset() {
         type_ = common::GORILLA;
@@ -49,7 +49,9 @@
     }
 
     FORCE_INLINE bool has_next() { return has_next_; }
-    FORCE_INLINE bool has_remaining() { return has_next(); }
+    FORCE_INLINE bool has_remaining(const common::ByteStream &buffer) {
+        return buffer.has_remaining() || has_next();
+    }
 
     // If empty, cache 8 bits from in_stream to 'buffer_'.
     void flush_byte_if_empty(common::ByteStream &in) {
@@ -158,7 +160,8 @@
                                                stored_leading_zeros_ -
                                                stored_trailing_zeros_,
                                            in);
-            xor_value <<= stored_trailing_zeros_;
+            xor_value = static_cast<uint32_t>(xor_value)
+                        << stored_trailing_zeros_;
             stored_value_ ^= xor_value;
             // missing break is intentional, we want to overflow to next one
         default:  // case '0': use stored value
@@ -191,7 +194,8 @@
                 read_long(VALUE_BITS_LENGTH_64BIT - stored_leading_zeros_ -
                               stored_trailing_zeros_,
                           in);
-            xor_value <<= stored_trailing_zeros_;
+            xor_value = static_cast<uint64_t>(xor_value)
+                        << stored_trailing_zeros_;
             stored_value_ ^= xor_value;
             // missing break is intentional, we want to overflow to next one
         }
diff --git a/cpp/src/encoding/gorilla_encoder.h b/cpp/src/encoding/gorilla_encoder.h
index 5ae4701..3fa6ecc 100644
--- a/cpp/src/encoding/gorilla_encoder.h
+++ b/cpp/src/encoding/gorilla_encoder.h
@@ -111,7 +111,7 @@
                 bits_left_ = 0;
             } else {
                 shift = bits_left_ - bits;
-                buffer_ |= (uint8_t)(value << shift);
+                buffer_ |= (uint8_t)(static_cast<uint64_t>(value) << shift);
                 bits_left_ -= bits;
                 bits = 0;
             }
diff --git a/cpp/src/encoding/int32_packer.h b/cpp/src/encoding/int32_packer.h
new file mode 100644
index 0000000..ecb42b2
--- /dev/null
+++ b/cpp/src/encoding/int32_packer.h
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef ENCODING_INT32PACKER_ENCODER_H
+#define ENCODING_INT32PACKER_ENCODER_H
+
+#define NUM_OF_INTS 8
+
+#include "encoder.h"
+
+namespace storage {
+
+class Int32Packer {
+   private:
+    int width_;
+
+   public:
+    Int32Packer(int width_) { this->width_ = width_; }
+    ~Int32Packer() { destroy(); }
+
+    void destroy() { /* do nothing for IntPacker */
+    }
+    void reset() { /* do thing for IntPacker */
+    }
+
+    void pack_8values(const int32_t values[], int offset, unsigned char buf[]) {
+        int buf_idx = 0;
+        int value_idx = offset;
+        // remaining bits for the current unfinished Integer
+        int left_bit = 0;
+
+        while (value_idx < NUM_OF_INTS + offset) {
+            // buffer is used for saving 32 bits as a part of result
+            int32_t buffer = 0;
+            // remaining size of bits in the 'buffer'
+            int left_size = 32;
+
+            // encode the left bits of current Integer to 'buffer'
+            if (left_bit > 0) {
+                buffer |= (static_cast<uint32_t>(values[value_idx])
+                           << (32 - left_bit));
+                left_size -= left_bit;
+                left_bit = 0;
+                value_idx++;
+            }
+
+            while (left_size >= width_ && value_idx < NUM_OF_INTS + offset) {
+                // encode one Integer to the 'buffer'
+                buffer |= (static_cast<uint32_t>(values[value_idx])
+                           << (left_size - width_));
+                left_size -= width_;
+                value_idx++;
+            }
+            // If the remaining space of the buffer can not save the bits for
+            // one Integer,
+            if (left_size > 0 && value_idx < NUM_OF_INTS + offset) {
+                // put the first 'left_size' bits of the Integer into remaining
+                // space of the buffer
+                buffer |= (static_cast<uint32_t>(values[value_idx]) >>
+                           (width_ - left_size));
+                left_bit = width_ - left_size;
+            }
+
+            // put the buffer into the final result
+            for (int j = 0; j < 4; j++) {
+                buf[buf_idx] =
+                    (unsigned char)((buffer >> ((3 - j) * 8)) & 0xFF);
+                buf_idx++;
+                // width_ is the bit num of each value, but here is means the
+                // max byte num
+                if (buf_idx >= width_) {
+                    return;
+                }
+            }
+        }
+    }
+
+    /**
+     * decode Integers from byte array.
+     *
+     * @param buf - array where bytes are in.
+     * @param offset - offset of first byte to be decoded in buf
+     * @param values - decoded result , the length of 'values' should be @{link
+     * IntPacker#NUM_OF_INTS}
+     */
+    void unpack_8values(const unsigned char buf[], int offset,
+                        int32_t values[]) {
+        int byte_idx = offset;
+        uint64_t buffer = 0;
+        // total bits which have reader from 'buf' to 'buffer'. i.e.,
+        // number of available bits to be decoded.
+        int total_bits = 0;
+        int value_idx = 0;
+
+        while (value_idx < NUM_OF_INTS) {
+            // If current available bits are not enough to decode one Integer,
+            // then add next byte from buf to 'buffer' until total_bits >= width
+            while (total_bits < width_) {
+                buffer = (buffer << 8) | (buf[byte_idx] & 0xFF);
+                byte_idx++;
+                total_bits += 8;
+            }
+
+            // If current available bits are enough to decode one Integer,
+            // then decode one Integer one by one until left bits in 'buffer' is
+            // not enough to decode one Integer.
+            while (total_bits >= width_ && value_idx < 8) {
+                values[value_idx] = (int)(buffer >> (total_bits - width_));
+                value_idx++;
+                total_bits -= width_;
+                buffer = buffer & ((1 << total_bits) - 1);
+            }
+        }
+    }
+
+    /**
+     * decode all values from 'buf' with specified offset and length decoded
+     * result will be saved in the array named 'values'.
+     *
+     * @param buf array where all bytes are in.
+     * @param length length of bytes to be decoded in buf.
+     * @param values decoded result.
+     */
+    void unpack_all_values(const unsigned char buf[], int length,
+                           int32_t values[]) {
+        int idx = 0;
+        int k = 0;
+        while (idx < length) {
+            int32_t tv[8];
+            // decode 8 values one time, current result will be saved in the
+            // array named 'tv'
+            unpack_8values(buf, idx, tv);
+            // System.arraycopy(tv, 0, values, k, 8);
+            std::memmove(values + k, tv, 8 * sizeof(int32_t));
+            idx += width_;
+            k += 8;
+        }
+    }
+
+    void set_width(int width_) { this->width_ = width_; }
+};
+
+}  // end namespace storage
+#endif  // ENCODING_IntPacker_ENCODER_H
diff --git a/cpp/src/encoding/int32_rle_decoder.h b/cpp/src/encoding/int32_rle_decoder.h
new file mode 100644
index 0000000..647f095
--- /dev/null
+++ b/cpp/src/encoding/int32_rle_decoder.h
@@ -0,0 +1,255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef ENCODING_INT32RLE_DECODER_H
+#define ENCODING_INT32RLE_DECODER_H
+
+#include <vector>
+
+#include "common/allocator/alloc_base.h"
+#include "decoder.h"
+#include "encoder.h"
+#include "encoding/encode_utils.h"
+#include "encoding/int32_packer.h"
+
+namespace storage {
+
+class Int32RleDecoder : public Decoder {
+   private:
+    uint32_t length_;
+    uint32_t bit_width_;
+    int bitpacking_num_;
+    bool is_length_and_bitwidth_readed_;
+    int current_count_;
+    common::ByteStream byte_cache_;
+    int32_t *current_buffer_;
+    Int32Packer *packer_;
+    uint8_t *tmp_buf_;
+
+   public:
+    Int32RleDecoder()
+        : length_(0),
+          bit_width_(0),
+          bitpacking_num_(0),
+          is_length_and_bitwidth_readed_(false),
+          current_count_(0),
+          byte_cache_(1024, common::MOD_DECODER_OBJ),
+          current_buffer_(nullptr),
+          packer_(nullptr),
+          tmp_buf_(nullptr) {}
+    ~Int32RleDecoder() override { destroy(); }
+
+    bool has_remaining(const common::ByteStream &buffer) override {
+        return buffer.has_remaining() || has_next_package();
+    }
+    int read_boolean(bool &ret_value, common::ByteStream &in) {
+        int32_t bool_value;
+        read_int32(bool_value, in);
+        ret_value = bool_value == 0 ? false : true;
+        return common::E_OK;
+    }
+    int read_int32(int32_t &ret_value, common::ByteStream &in) override {
+        ret_value = static_cast<int32_t>(read_int(in));
+        return common::E_OK;
+    }
+    int read_int64(int64_t &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_float(float &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_double(double &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_String(common::String &ret_value, common::PageArena &pa,
+                    common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    void init() {
+        packer_ = nullptr;
+        is_length_and_bitwidth_readed_ = false;
+        length_ = 0;
+        bit_width_ = 0;
+        bitpacking_num_ = 0;
+        current_count_ = 0;
+    }
+
+    bool has_next(common::ByteStream &buffer) {
+        if (current_count_ > 0 || buffer.remaining_size() > 0 ||
+            has_next_package()) {
+            return true;
+        }
+        return false;
+    }
+
+    bool has_next_package() {
+        return current_count_ > 0 || byte_cache_.remaining_size() > 0;
+    }
+
+    int32_t read_int(common::ByteStream &buffer) {
+        if (!is_length_and_bitwidth_readed_) {
+            // start to reader a new rle+bit-packing pattern
+            read_length_and_bitwidth(buffer);
+        }
+        if (current_count_ == 0) {
+            uint8_t header;
+            int ret = common::E_OK;
+            if (RET_FAIL(
+                    common::SerializationUtil::read_ui8(header, byte_cache_))) {
+                return ret;
+            }
+            call_read_bit_packing_buffer(header);
+        }
+        --current_count_;
+        int32_t result = current_buffer_[bitpacking_num_ - current_count_ - 1];
+        if (!has_next_package()) {
+            is_length_and_bitwidth_readed_ = false;
+        }
+        return result;
+    }
+
+    int call_read_bit_packing_buffer(uint8_t header) {
+        int bit_packed_group_count = (int)(header >> 1);
+        // in last bit-packing group, there may be some useless value,
+        // lastBitPackedNum indicates how many values is useful
+        uint8_t last_bit_packed_num;
+        int ret = common::E_OK;
+        if (RET_FAIL(common::SerializationUtil::read_ui8(last_bit_packed_num,
+                                                         byte_cache_))) {
+            return ret;
+        }
+        if (bit_packed_group_count > 0) {
+            current_count_ =
+                (bit_packed_group_count - 1) * 8 + last_bit_packed_num;
+            bitpacking_num_ = current_count_;
+        } else {
+            return common::E_DECODE_ERR;
+        }
+        ret = read_bit_packing_buffer(bit_packed_group_count,
+                                      last_bit_packed_num);
+        return ret;
+    }
+
+    int read_bit_packing_buffer(int bit_packed_group_count,
+                                int last_bit_packed_num) {
+        int ret = common::E_OK;
+        if (current_buffer_ != nullptr) {
+            common::mem_free(current_buffer_);
+        }
+        current_buffer_ = static_cast<int32_t *>(
+            common::mem_alloc(sizeof(int32_t) * bit_packed_group_count * 8,
+                              common::MOD_DECODER_OBJ));
+        if (IS_NULL(current_buffer_)) {
+            return common::E_OOM;
+        }
+        int bytes_to_read = bit_packed_group_count * bit_width_;
+        if (bytes_to_read > (int)byte_cache_.remaining_size()) {
+            bytes_to_read = byte_cache_.remaining_size();
+        }
+        std::vector<unsigned char> bytes(bytes_to_read);
+
+        for (int i = 0; i < bytes_to_read; i++) {
+            if (RET_FAIL(common::SerializationUtil::read_ui8(bytes[i],
+                                                             byte_cache_))) {
+                return ret;
+            }
+        }
+
+        // save all int values in currentBuffer
+        packer_->unpack_all_values(
+            bytes.data(), bytes_to_read,
+            current_buffer_);  // decode from bytes, save in currentBuffer
+        return ret;
+    }
+
+    int read_length_and_bitwidth(common::ByteStream &buffer) {
+        int ret = common::E_OK;
+        if (RET_FAIL(
+                common::SerializationUtil::read_var_uint(length_, buffer))) {
+            return common::E_PARTIAL_READ;
+        } else {
+            if (tmp_buf_) {
+                common::mem_free(tmp_buf_);
+            }
+            tmp_buf_ =
+                (uint8_t *)common::mem_alloc(length_, common::MOD_DECODER_OBJ);
+            if (tmp_buf_ == nullptr) {
+                return common::E_OOM;
+            }
+            uint32_t ret_read_len = 0;
+            if (RET_FAIL(buffer.read_buf((uint8_t *)tmp_buf_, length_,
+                                         ret_read_len))) {
+                return ret;
+            } else if (length_ != ret_read_len) {
+                ret = common::E_PARTIAL_READ;
+            }
+            byte_cache_.wrap_from((char *)tmp_buf_, length_);
+            is_length_and_bitwidth_readed_ = true;
+            uint8_t tmp_bit_width;
+            common::SerializationUtil::read_ui8(tmp_bit_width, byte_cache_);
+            bit_width_ = tmp_bit_width;
+            if (packer_ != nullptr) {
+                delete packer_;
+            }
+            init_packer();
+        }
+        return ret;
+    }
+
+    void init_packer() { packer_ = new Int32Packer(bit_width_); }
+
+    void destroy() { /* do nothing for BitpackEncoder */
+        if (packer_) {
+            delete (packer_);
+            packer_ = nullptr;
+        }
+        if (current_buffer_) {
+            common::mem_free(current_buffer_);
+            current_buffer_ = nullptr;
+        }
+        if (tmp_buf_) {
+            common::mem_free(tmp_buf_);
+            tmp_buf_ = nullptr;
+        }
+    }
+
+    void reset() override {
+        length_ = 0;
+        bit_width_ = 0;
+        bitpacking_num_ = 0;
+        is_length_and_bitwidth_readed_ = false;
+        current_count_ = 0;
+        if (current_buffer_) {
+            delete[] current_buffer_;
+            current_buffer_ = nullptr;
+        }
+        if (packer_) {
+            delete (packer_);
+            packer_ = nullptr;
+        }
+        if (tmp_buf_) {
+            common::mem_free(tmp_buf_);
+            tmp_buf_ = nullptr;
+        }
+    }
+};
+
+}  // end namespace storage
+#endif  // ENCODING_BITPACK_ENCODER_H
diff --git a/cpp/src/encoding/int32_rle_encoder.h b/cpp/src/encoding/int32_rle_encoder.h
new file mode 100644
index 0000000..69ffc1b
--- /dev/null
+++ b/cpp/src/encoding/int32_rle_encoder.h
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef ENCODING_INT32RLE_ENCODER_H
+#define ENCODING_INT32RLE_ENCODER_H
+
+#include <vector>
+
+#include "common/allocator/alloc_base.h"
+#include "encoder.h"
+#include "encoding/encode_utils.h"
+#include "encoding/int32_packer.h"
+#include "utils/errno_define.h"
+
+namespace storage {
+
+class Int32RleEncoder : public Encoder {
+   private:
+    int bitpacked_group_count_;
+    int num_buffered_values_;
+    int bit_width_;
+    Int32Packer *packer_;
+    common::ByteStream byte_cache_;
+    std::vector<int32_t> values_;  // all data tobe encoded
+    int32_t buffered_values_[8];   // encode each 8 values
+    std::vector<unsigned char> bytes_buffer_;
+
+    void inner_flush(common::ByteStream &out) {
+        int last_bitpacked_num = num_buffered_values_;
+        if (num_buffered_values_ > 0) {
+            clear_buffer();
+            write_or_append_bitpacked_run();
+            end_previous_bitpacked_run(last_bitpacked_num);
+        } else {
+            end_previous_bitpacked_run(8);
+        }
+        uint32_t b_length = byte_cache_.total_size();
+        common::SerializationUtil::write_var_uint(b_length, out);
+        merge_byte_stream(out, byte_cache_);
+        reset();
+    }
+
+   public:
+    // BitPackEncoder() :byte_cache_(1024,common::MOD_ENCODER_OBJ){}
+    Int32RleEncoder()
+        : bitpacked_group_count_(0),
+          num_buffered_values_(0),
+          bit_width_(0),
+          packer_(nullptr),
+          byte_cache_(1024, common::MOD_ENCODER_OBJ) {}
+    ~Int32RleEncoder() override { destroy(); }
+
+    int encode(bool value, common::ByteStream &out_stream) override {
+        int32_t bool_value = value == true ? 1 : 0;
+        return encode(bool_value, out_stream);
+    }
+    int encode(float value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(double value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(common::String value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    void init() {
+        bitpacked_group_count_ = 0;
+        num_buffered_values_ = 0;
+        bit_width_ = 0;
+        packer_ = nullptr;
+    }
+
+    void destroy() override { delete (packer_); }
+
+    void reset() override {
+        num_buffered_values_ = 0;
+        bitpacked_group_count_ = 0;
+        bit_width_ = 0;
+        bytes_buffer_.clear();
+        byte_cache_.reset();
+        values_.clear();
+        delete (packer_);
+        packer_ = nullptr;
+    }
+
+    FORCE_INLINE int encode(int64_t value, common::ByteStream &out) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    FORCE_INLINE int encode(int32_t value, common::ByteStream &out) override {
+        values_.push_back(value);
+        // The current_bit_width must be at least 1, even if value is 0.
+        int current_bit_width =
+            std::max(1, 32 - number_of_leading_zeros(value));
+        if (current_bit_width > bit_width_) {
+            bit_width_ = current_bit_width;
+        }
+        return common::E_OK;
+    }
+
+    int flush(common::ByteStream &out) override {
+        ASSERT(packer_ == nullptr);
+        if (bit_width_ == 0) return common::E_OK;
+        packer_ = new Int32Packer(bit_width_);
+        common::SerializationUtil::write_i8(bit_width_, byte_cache_);
+        for (size_t i = 0; i < values_.size(); i++) {
+            // encodeValue(value);
+            buffered_values_[num_buffered_values_] = values_[i];
+            num_buffered_values_++;
+            if (num_buffered_values_ == 8) {
+                write_or_append_bitpacked_run();
+            }
+        }
+        inner_flush(out);
+        return common::E_OK;
+    }
+
+    void write_or_append_bitpacked_run() {
+        if (bitpacked_group_count_ >= 63) {
+            // we've packed as many values as we can for this run,
+            // end it and start a new one
+            end_previous_bitpacked_run(8);
+        }
+        convert_buffer();
+        num_buffered_values_ = 0;
+        ++bitpacked_group_count_;
+    }
+
+    void convert_buffer() {
+        // TODO: put the bytes on the stack instead on the heap
+        unsigned char *bytes = (unsigned char *)common::mem_alloc(
+            bit_width_, common::MOD_BITENCODE_OBJ);
+        int32_t tmp_buffer[8];
+        for (int i = 0; i < 8; i++) {
+            tmp_buffer[i] = (int64_t)buffered_values_[i];
+        }
+        packer_->pack_8values(tmp_buffer, 0, bytes);
+        // we'll not writer bit-packing group to OutputStream immediately
+        // we buffer them in list
+        for (int i = 0; i < bit_width_; i++) {
+            bytes_buffer_.push_back(bytes[i]);
+        }
+        common::mem_free(bytes);
+    }
+
+    void clear_buffer() {
+        for (int i = num_buffered_values_; i < 8; i++) {
+            buffered_values_[i] = 0;
+        }
+    }
+
+    void end_previous_bitpacked_run(int last_bitpacked_num) {
+        unsigned char bitPackHeader =
+            (unsigned char)((bitpacked_group_count_ << 1) | 1);
+        common::SerializationUtil::write_ui8(bitPackHeader, byte_cache_);
+        common::SerializationUtil::write_ui8((uint8_t)last_bitpacked_num,
+                                             byte_cache_);
+        for (size_t i = 0; i < bytes_buffer_.size(); i++) {
+            common::SerializationUtil::write_ui8(bytes_buffer_[i], byte_cache_);
+        }
+        bytes_buffer_.clear();
+        bitpacked_group_count_ = 0;
+    }
+
+    int get_max_byte_size() override {
+        if (values_.empty()) {
+            return 0;
+        }
+        int totalValues = values_.size();
+        int fullGroups = totalValues / 8;
+        int remainingValues = totalValues % 8;
+        int bytesPerGroup = (bit_width_ * 8 + 7) / 8;
+        int maxSize = 0;
+        maxSize += fullGroups * bytesPerGroup;
+        if (remainingValues > 0) {
+            maxSize += bytesPerGroup;
+        }
+
+        // Add additional bytes, because each bitpack group has a header of 1
+        // byte and a tail of 1 byte.
+        maxSize += fullGroups * (1 + 1) + (remainingValues > 0 ? (1 + 1) : 0);
+        return maxSize;
+    }
+};
+
+}  // end namespace storage
+#endif  // ENCODING_BITPACK_ENCODER_H
diff --git a/cpp/src/encoding/int32_sprintz_decoder.h b/cpp/src/encoding/int32_sprintz_decoder.h
new file mode 100644
index 0000000..1a23692
--- /dev/null
+++ b/cpp/src/encoding/int32_sprintz_decoder.h
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef INT32_SPRINTZ_DECODER_H
+#define INT32_SPRINTZ_DECODER_H
+
+#include <iostream>
+#include <istream>
+#include <memory>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include "encoding/fire.h"
+#include "encoding/int32_rle_decoder.h"
+#include "int32_packer.h"
+#include "sprintz_decoder.h"
+
+namespace storage {
+
+class Int32SprintzDecoder : public SprintzDecoder {
+   public:
+    Int32SprintzDecoder()
+        : current_value_(0),
+          pre_value_(0),
+          current_buffer_(block_size_ + 1),
+          fire_pred_(2),
+          predict_scheme_("fire") {
+        SprintzDecoder::reset();
+        current_value_ = 0;
+        pre_value_ = 0;
+        current_count_ = 0;
+        std::fill(current_buffer_.begin(), current_buffer_.end(), 0);
+    }
+
+    ~Int32SprintzDecoder() override = default;
+
+    void set_predict_method(const std::string &method) {
+        predict_scheme_ = method;
+    }
+
+    bool has_remaining(const common::ByteStream &in) {
+        int min_len = sizeof(int32_t) + 1;
+        return (is_block_read_ && current_count_ < block_size_) ||
+               in.remaining_size() >= min_len;
+    }
+
+    int read_boolean(bool &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int read_int64(int64_t &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_float(float &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_double(double &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_String(common::String &ret_value, common::PageArena &pa,
+                    common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int read_int32(int32_t &ret_value, common::ByteStream &in) {
+        int ret = common::E_OK;
+        if (!is_block_read_) {
+            if (RET_FAIL(decode_block(in))) {
+                return ret;
+            }
+        }
+        ret_value = current_buffer_[current_count_++];
+        if (current_count_ == decode_size_) {
+            is_block_read_ = false;
+            current_count_ = 0;
+        }
+        return ret;
+    }
+
+    void reset() override {
+        SprintzDecoder::reset();
+        current_value_ = 0;
+        pre_value_ = 0;
+        current_count_ = 0;
+        std::fill(current_buffer_.begin(), current_buffer_.end(), 0);
+    }
+
+    bool has_next(common::ByteStream &input) {
+        int min_lenth = sizeof(int32_t) + 1;
+        return (is_block_read_ && current_count_ < block_size_) ||
+               input.remaining_size() >= min_lenth;
+    }
+
+   protected:
+    int decode_block(common::ByteStream &input) override {
+        // read header bitWidth
+        int ret = common::E_OK;
+        uint8_t byte;
+        uint32_t bit_width = 0, read_len = 0;
+        ret = input.read_buf(&byte, 1, read_len);
+        if (ret != common::E_OK || read_len != 1) {
+            return common::E_DECODE_ERR;
+        }
+        bit_width |= static_cast<uint32_t>(byte);
+        bit_width_ = static_cast<int32_t>(bit_width);
+
+        if ((bit_width_ & (1 << 7)) != 0) {
+            decode_size_ = bit_width_ & ~(1 << 7);
+            Int32RleDecoder decoder;
+            for (int i = 0; i < decode_size_; ++i) {
+                current_buffer_[i] = decoder.read_int(input);
+            }
+        } else {
+            decode_size_ = block_size_ + 1;
+            uint32_t tmp_prev_value;
+            common::SerializationUtil::read_var_uint(tmp_prev_value, input);
+            pre_value_ = tmp_prev_value;
+            current_buffer_[0] = pre_value_;
+
+            std::vector<uint8_t> pack_buf(bit_width_);
+            uint32_t read_len = 0;
+            input.read_buf(reinterpret_cast<char *>(pack_buf.data()),
+                           bit_width_, read_len);
+
+            std::vector<int32_t> tmp_buffer(8);
+            packer_ = std::make_shared<Int32Packer>(bit_width_);
+            packer_->unpack_8values(pack_buf.data(), 0, tmp_buffer.data());
+
+            for (int i = 0; i < 8; ++i) {
+                current_buffer_[i + 1] = tmp_buffer[i];
+            }
+            ret = recalculate();
+        }
+        is_block_read_ = true;
+        return ret;
+    }
+
+    int recalculate() override {
+        int ret = common::E_OK;
+        for (int i = 1; i <= block_size_; ++i) {
+            if (current_buffer_[i] % 2 == 0) {
+                current_buffer_[i] = -current_buffer_[i] / 2;
+            } else {
+                current_buffer_[i] = (current_buffer_[i] + 1) / 2;
+            }
+        }
+
+        if (predict_scheme_ == "delta") {
+            for (size_t i = 1; i < current_buffer_.size(); ++i) {
+                current_buffer_[i] += current_buffer_[i - 1];
+            }
+        } else if (predict_scheme_ == "fire") {
+            fire_pred_.reset();
+            for (int i = 1; i <= block_size_; ++i) {
+                int32_t pred = fire_pred_.predict(current_buffer_[i - 1]);
+                int32_t err = current_buffer_[i];
+                current_buffer_[i] = pred + err;
+                fire_pred_.train(current_buffer_[i - 1], current_buffer_[i],
+                                 err);
+            }
+        } else {
+            ret = common::E_DECODE_ERR;
+        }
+        return ret;
+    }
+
+   private:
+    std::shared_ptr<Int32Packer> packer_;
+    IntFire fire_pred_;
+    int32_t pre_value_;
+    int32_t current_value_;
+    std::vector<int32_t> current_buffer_;
+    std::string predict_scheme_;
+};
+
+}  // namespace storage
+
+#endif  // INT32_SPRINTZ_DECODER_H
diff --git a/cpp/src/encoding/int32_sprintz_encoder.h b/cpp/src/encoding/int32_sprintz_encoder.h
new file mode 100644
index 0000000..c7d38e2
--- /dev/null
+++ b/cpp/src/encoding/int32_sprintz_encoder.h
@@ -0,0 +1,190 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef INT32SPRINTZENCODER_H
+#define INT32SPRINTZENCODER_H
+
+#include <iostream>
+#include <memory>
+#include <sstream>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include "common/allocator/byte_stream.h"
+#include "encode_utils.h"
+#include "encoding/encode_utils.h"
+#include "encoding/fire.h"
+#include "encoding/int32_rle_encoder.h"
+#include "int32_packer.h"
+#include "sprintz_encoder.h"
+
+namespace storage {
+class Int32SprintzEncoder : public SprintzEncoder {
+   public:
+    Int32SprintzEncoder() : SprintzEncoder(), fire_pred_(2) {}
+
+    ~Int32SprintzEncoder() override = default;
+
+    void reset() override {
+        SprintzEncoder::reset();
+        values_.clear();
+    }
+
+    void destroy() override {}
+
+    int encode(bool value, common::ByteStream& out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int encode(int64_t value, common::ByteStream& out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int encode(float value, common::ByteStream& out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int encode(double value, common::ByteStream& out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int encode(common::String value, common::ByteStream& out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int get_one_item_max_size() override {
+        return 1 + (1 + block_size_) * sizeof(int32_t);
+    }
+
+    int get_max_byte_size() override {
+        return 1 + (values_.size() + 1) * sizeof(int32_t);
+    }
+
+    int encode(int32_t value, common::ByteStream& out_stream) override {
+        int ret = common::E_OK;
+        if (!is_first_cached_) {
+            values_.push_back(value);
+            is_first_cached_ = true;
+            return ret;
+        }
+
+        values_.push_back(value);
+
+        if (values_.size() == block_size_ + 1) {
+            int32_t prev = values_[0];
+            fire_pred_.reset();
+            for (int i = 1; i <= block_size_; ++i) {
+                int32_t temp = values_[i];
+                values_[i] = predict(values_[i], prev);
+                prev = temp;
+            }
+
+            bit_pack();
+            is_first_cached_ = false;
+            values_.clear();
+            group_num_++;
+
+            if (group_num_ == group_max_) {
+                if (RET_FAIL(flush(out_stream))) {
+                    return ret;
+                }
+            }
+        }
+        return ret;
+    }
+
+    int flush(common::ByteStream& out_stream) override {
+        int ret = common::E_OK;
+        if (byte_cache_.total_size() > 0) {
+            if (RET_FAIL(common::SerializationUtil::chunk_read_all_data(
+                    byte_cache_, out_stream))) {
+                return ret;
+            }
+        }
+
+        if (!values_.empty()) {
+            int size = static_cast<int>(values_.size());
+            size |= (1 << 7);  // set MSB
+
+            common::SerializationUtil::
+                write_int_little_endian_padded_on_bit_width(size, out_stream,
+                                                            1);
+            Int32RleEncoder encoder;
+            for (int32_t val : values_) {
+                encoder.encode(val, out_stream);
+            }
+            encoder.flush(out_stream);
+        }
+
+        reset();
+        return ret;
+    }
+
+   protected:
+    void bit_pack() override {
+        int32_t pre_value = values_[0];
+        values_.erase(values_.begin());  // remove first value
+
+        bit_width_ = get_int32_max_bit_width(values_);
+        packer_ = std::make_shared<Int32Packer>(bit_width_);
+
+        std::vector<uint8_t> bytes(bit_width_);
+        std::vector<int32_t> tmp_buffer(values_.begin(),
+                                        values_.begin() + block_size_);
+        packer_->pack_8values(tmp_buffer.data(), 0, bytes.data());
+
+        common::SerializationUtil::write_int_little_endian_padded_on_bit_width(
+            bit_width_, byte_cache_, 1);
+        common::SerializationUtil::write_var_uint(pre_value, byte_cache_);
+        byte_cache_.write_buf(reinterpret_cast<const char*>(bytes.data()),
+                              bytes.size());
+    }
+
+    int32_t predict(int32_t value, int32_t prev) {
+        int32_t pred = 0;
+        if (predict_method_ == "delta") {
+            pred = delta(value, prev);
+        } else if (predict_method_ == "fire") {
+            pred = fire(value, prev);
+        } else {
+            // unsupport
+            ASSERT(false);
+        }
+
+        return (pred <= 0) ? -2 * pred : 2 * pred - 1;
+    }
+
+    int32_t delta(int32_t value, int32_t prev) { return value - prev; }
+
+    int32_t fire(int32_t value, int32_t prev) {
+        int32_t pred = fire_pred_.predict(prev);
+        int32_t err = value - pred;
+        fire_pred_.train(prev, value, err);
+        return err;
+    }
+
+   private:
+    std::vector<int32_t> values_;
+    std::shared_ptr<Int32Packer> packer_;
+    IntFire fire_pred_;
+};
+}  // namespace storage
+
+#endif  // INT32_SPRINTZ_ENCODER_H
diff --git a/cpp/src/encoding/int64_packer.h b/cpp/src/encoding/int64_packer.h
new file mode 100644
index 0000000..2326720
--- /dev/null
+++ b/cpp/src/encoding/int64_packer.h
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef ENCODING_INT64PACKER_ENCODER_H
+#define ENCODING_INT64PACKER_ENCODER_H
+
+#define NUM_OF_INTS 8
+
+#include "encoder.h"
+
+namespace storage {
+
+class Int64Packer {
+   private:
+    int width_;
+
+   public:
+    Int64Packer(int width_) { this->width_ = width_; }
+    ~Int64Packer() { destroy(); }
+
+    void destroy() { /* do nothing for IntPacker */
+    }
+    void reset() { /* do thing for IntPacker */
+    }
+
+    void pack_8values(const int64_t values[], int offset, unsigned char buf[]) {
+        int buf_idx = 0;
+        int value_idx = offset;
+        // remaining bits for the current unfinished Integer
+        int left_bit = 0;
+
+        while (value_idx < NUM_OF_INTS + offset) {
+            // buffer is used for saving 64 bits as a part of result
+            int64_t buffer = 0;
+            // remaining size of bits in the 'buffer'
+            int left_size = 64;
+
+            // encode the left bits of current Integer to 'buffer'
+            if (left_bit > 0) {
+                buffer |= (static_cast<uint64_t>(values[value_idx])
+                           << (64 - left_bit));
+                left_size -= left_bit;
+                left_bit = 0;
+                value_idx++;
+            }
+
+            while (left_size >= width_ && value_idx < NUM_OF_INTS + offset) {
+                // encode one Integer to the 'buffer'
+                buffer |= (static_cast<uint64_t>(values[value_idx])
+                           << (left_size - width_));
+                left_size -= width_;
+                value_idx++;
+            }
+            // If the remaining space of the buffer can not save the bits for
+            // one Integer,
+            if (left_size > 0 && value_idx < NUM_OF_INTS + offset) {
+                // put the first 'left_size' bits of the Integer into remaining
+                // space of the buffer
+                buffer |= ((static_cast<uint64_t>(values[value_idx])) >>
+                           (width_ - left_size));
+                left_bit = width_ - left_size;
+            }
+
+            // put the buffer into the final result
+            for (int j = 0; j < 8; j++) {
+                buf[buf_idx] =
+                    (unsigned char)(((uint64_t)buffer >> ((8 - j - 1) * 8)) &
+                                    0xFF);
+                buf_idx++;
+                // width_ is the bit num of each value, but here is means the
+                // max byte num
+                if (buf_idx >= width_ * 8 / 8) {
+                    return;
+                }
+            }
+        }
+    }
+
+    /**
+     * decode Integers from byte array.
+     *
+     * @param buf - array where bytes are in.
+     * @param offset - offset of first byte to be decoded in buf
+     * @param values - decoded result , the length of 'values' should be @{link
+     * IntPacker#NUM_OF_INTS}
+     */
+    void unpack_8values(const unsigned char buf[], int offset,
+                        int64_t values[]) {
+        int byte_idx = offset;
+        int value_idx = 0;
+        int left_bits = 8;
+        int total_bits = 0;
+
+        while (value_idx < 8) {
+            values[value_idx] = 0;
+            total_bits = 0;
+
+            while (total_bits < width_) {
+                if (width_ - total_bits >= left_bits) {
+                    values[value_idx] <<= left_bits;
+                    values[value_idx] |= static_cast<int64_t>(
+                        buf[byte_idx] & ((1 << left_bits) - 1));
+                    total_bits += left_bits;
+                    byte_idx++;
+                    left_bits = 8;
+                } else {
+                    int t = width_ - total_bits;
+                    values[value_idx] <<= t;
+                    values[value_idx] |= static_cast<int64_t>(
+                        (buf[byte_idx] & ((1 << left_bits) - 1)) >>
+                        (left_bits - t));
+                    left_bits -= t;
+                    total_bits += t;
+                }
+            }
+
+            value_idx++;
+        }
+    }
+
+    /**
+     * decode all values from 'buf' with specified offset and length decoded
+     * result will be saved in the array named 'values'.
+     *
+     * @param buf array where all bytes are in.
+     * @param length length of bytes to be decoded in buf.
+     * @param values decoded result.
+     */
+    void unpack_all_values(const unsigned char buf[], int length,
+                           int64_t values[]) {
+        int idx = 0;
+        int k = 0;
+        while (idx < length) {
+            int64_t tv[8];
+            // decode 8 values one time, current result will be saved in the
+            // array named 'tv'
+            unpack_8values(buf, idx, tv);
+            // System.arraycopy(tv, 0, values, k, 8);
+            std::memmove(values + k, tv, 8 * sizeof(int64_t));
+            idx += width_;
+            k += 8;
+        }
+    }
+
+    void set_width(int width_) { this->width_ = width_; }
+};
+
+}  // end namespace storage
+#endif  // ENCODING_IntPacker_ENCODER_H
diff --git a/cpp/src/encoding/int64_rle_decoder.h b/cpp/src/encoding/int64_rle_decoder.h
new file mode 100644
index 0000000..7b98cc0
--- /dev/null
+++ b/cpp/src/encoding/int64_rle_decoder.h
@@ -0,0 +1,239 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef ENCODING_INT64RLE_DECODER_H
+#define ENCODING_INT64RLE_DECODER_H
+
+#include <vector>
+
+#include "common/allocator/alloc_base.h"
+#include "decoder.h"
+#include "encoder.h"
+#include "encoding/encode_utils.h"
+#include "encoding/int64_packer.h"
+
+namespace storage {
+
+class Int64RleDecoder : public Decoder {
+   private:
+    uint32_t length_;
+    uint32_t bit_width_;
+    int bitpacking_num_;
+    bool is_length_and_bitwidth_readed_;
+    int current_count_;
+    common::ByteStream byte_cache_;
+    int64_t *current_buffer_;
+    Int64Packer *packer_;
+    uint8_t *tmp_buf_;
+
+   public:
+    Int64RleDecoder()
+        : length_(0),
+          bit_width_(0),
+          bitpacking_num_(0),
+          is_length_and_bitwidth_readed_(false),
+          current_count_(0),
+          byte_cache_(1024, common::MOD_DECODER_OBJ),
+          current_buffer_(nullptr),
+          packer_(nullptr),
+          tmp_buf_(nullptr) {}
+    ~Int64RleDecoder() override { destroy(); }
+
+    bool has_remaining(const common::ByteStream &buffer) override {
+        return buffer.has_remaining() || has_next_package();
+    }
+    int read_boolean(bool &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_int32(int32_t &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_int64(int64_t &ret_value, common::ByteStream &in) override {
+        ret_value = read_int(in);
+        return common::E_OK;
+    }
+    int read_float(float &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_double(double &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_String(common::String &ret_value, common::PageArena &pa,
+                    common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    void init() {
+        packer_ = nullptr;
+        is_length_and_bitwidth_readed_ = false;
+        length_ = 0;
+        bit_width_ = 0;
+        bitpacking_num_ = 0;
+        current_count_ = 0;
+    }
+
+    bool has_next(common::ByteStream &buffer) {
+        if (current_count_ > 0 || buffer.remaining_size() > 0 ||
+            has_next_package()) {
+            return true;
+        }
+        return false;
+    }
+
+    bool has_next_package() {
+        return current_count_ > 0 || byte_cache_.remaining_size() > 0;
+    }
+
+    int64_t read_int(common::ByteStream &buffer) {
+        if (!is_length_and_bitwidth_readed_) {
+            // start to reader a new rle+bit-packing pattern
+            read_length_and_bitwidth(buffer);
+        }
+        if (current_count_ == 0) {
+            uint8_t header;
+            int ret = common::E_OK;
+            if (RET_FAIL(
+                    common::SerializationUtil::read_ui8(header, byte_cache_))) {
+                return ret;
+            }
+            call_read_bit_packing_buffer(header);
+        }
+        --current_count_;
+        int64_t result = current_buffer_[bitpacking_num_ - current_count_ - 1];
+        if (!has_next_package()) {
+            is_length_and_bitwidth_readed_ = false;
+        }
+        return result;
+    }
+
+    int call_read_bit_packing_buffer(uint8_t header) {
+        int bit_packed_group_count = (int)(header >> 1);
+        // in last bit-packing group, there may be some useless value,
+        // lastBitPackedNum indicates how many values is useful
+        uint8_t last_bit_packed_num;
+        int ret = common::E_OK;
+        if (RET_FAIL(common::SerializationUtil::read_ui8(last_bit_packed_num,
+                                                         byte_cache_))) {
+            return ret;
+        }
+        if (bit_packed_group_count > 0) {
+            current_count_ =
+                (bit_packed_group_count - 1) * 8 + last_bit_packed_num;
+            bitpacking_num_ = current_count_;
+        } else {
+            printf(
+                "tsfile-encoding IntRleDecoder: bit_packed_group_count %d, "
+                "smaller "
+                "than 1",
+                bit_packed_group_count);
+        }
+        read_bit_packing_buffer(bit_packed_group_count, last_bit_packed_num);
+        return ret;
+    }
+
+    void read_bit_packing_buffer(int bit_packed_group_count,
+                                 int last_bit_packed_num) {
+        if (current_buffer_ != nullptr) {
+            delete[] current_buffer_;
+        }
+        current_buffer_ = new int64_t[bit_packed_group_count * 8];
+        int bytes_to_read = bit_packed_group_count * bit_width_;
+        if (bytes_to_read > (int)byte_cache_.remaining_size()) {
+            bytes_to_read = byte_cache_.remaining_size();
+        }
+        std::vector<unsigned char> bytes(bytes_to_read);
+
+        for (int i = 0; i < bytes_to_read; i++) {
+            common::SerializationUtil::read_ui8(bytes[i], byte_cache_);
+        }
+
+        // save all int values in currentBuffer
+        packer_->unpack_all_values(
+            bytes.data(), bytes_to_read,
+            current_buffer_);  // decode from bytes, save in currentBuffer
+    }
+
+    int read_length_and_bitwidth(common::ByteStream &buffer) {
+        int ret = common::E_OK;
+        if (RET_FAIL(
+                common::SerializationUtil::read_var_uint(length_, buffer))) {
+            return common::E_PARTIAL_READ;
+        } else {
+            tmp_buf_ =
+                (uint8_t *)common::mem_alloc(length_, common::MOD_DECODER_OBJ);
+            if (tmp_buf_ == nullptr) {
+                return common::E_OOM;
+            }
+            uint32_t ret_read_len = 0;
+            if (RET_FAIL(buffer.read_buf((uint8_t *)tmp_buf_, length_,
+                                         ret_read_len))) {
+                return ret;
+            } else if (length_ != ret_read_len) {
+                ret = common::E_PARTIAL_READ;
+            }
+            byte_cache_.wrap_from((char *)tmp_buf_, length_);
+            is_length_and_bitwidth_readed_ = true;
+            uint8_t tmp_bit_width;
+            common::SerializationUtil::read_ui8(tmp_bit_width, byte_cache_);
+            bit_width_ = tmp_bit_width;
+            init_packer();
+        }
+        return ret;
+    }
+
+    void init_packer() { packer_ = new Int64Packer(bit_width_); }
+
+    void destroy() { /* do nothing for BitpackEncoder */
+        if (packer_) {
+            delete (packer_);
+            packer_ = nullptr;
+        }
+        if (current_buffer_) {
+            delete[] current_buffer_;
+            current_buffer_ = nullptr;
+        }
+        if (tmp_buf_) {
+            common::mem_free(tmp_buf_);
+            tmp_buf_ = nullptr;
+        }
+    }
+
+    void reset() override {
+        length_ = 0;
+        bit_width_ = 0;
+        bitpacking_num_ = 0;
+        is_length_and_bitwidth_readed_ = false;
+        current_count_ = 0;
+        if (current_buffer_) {
+            delete[] current_buffer_;
+            current_buffer_ = nullptr;
+        }
+        if (packer_) {
+            delete (packer_);
+            packer_ = nullptr;
+        }
+        if (tmp_buf_) {
+            common::mem_free(tmp_buf_);
+            tmp_buf_ = nullptr;
+        }
+    }
+};
+
+}  // end namespace storage
+#endif  // ENCODING_BITPACK_ENCODER_H
diff --git a/cpp/src/encoding/int64_rle_encoder.h b/cpp/src/encoding/int64_rle_encoder.h
new file mode 100644
index 0000000..abcac68
--- /dev/null
+++ b/cpp/src/encoding/int64_rle_encoder.h
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef ENCODING_INT64RLE_ENCODER_H
+#define ENCODING_INT64RLE_ENCODER_H
+
+#include <vector>
+
+#include "common/allocator/alloc_base.h"
+#include "encoder.h"
+#include "encoding/encode_utils.h"
+#include "encoding/int64_packer.h"
+#include "utils/errno_define.h"
+
+namespace storage {
+
+class Int64RleEncoder : public Encoder {
+   private:
+    int bitpacked_group_count_;
+    int num_buffered_values_;
+    int bit_width_;
+    Int64Packer *packer_;
+    common::ByteStream byte_cache_;
+    std::vector<int64_t> values_;  // all data tobe encoded
+    int64_t buffered_values_[8];   // encode each 8 values
+    std::vector<unsigned char> bytes_buffer_;
+
+    void inner_flush(common::ByteStream &out) {
+        int last_bitpacked_num = num_buffered_values_;
+        if (num_buffered_values_ > 0) {
+            clear_buffer();
+            write_or_append_bitpacked_run();
+            end_previous_bitpacked_run(last_bitpacked_num);
+        } else {
+            end_previous_bitpacked_run(8);
+        }
+        uint64_t b_length = byte_cache_.total_size();
+        common::SerializationUtil::write_var_uint(b_length, out);
+        merge_byte_stream(out, byte_cache_);
+        reset();
+    }
+
+   public:
+    // BitPackEncoder() :byte_cache_(1024,common::MOD_ENCODER_OBJ){}
+    Int64RleEncoder()
+        : bitpacked_group_count_(0),
+          num_buffered_values_(0),
+          bit_width_(0),
+          packer_(nullptr),
+          byte_cache_(1024, common::MOD_ENCODER_OBJ) {}
+    ~Int64RleEncoder() override { destroy(); }
+
+    int encode(bool value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(float value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(double value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(common::String value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    void init() {
+        bitpacked_group_count_ = 0;
+        num_buffered_values_ = 0;
+        bit_width_ = 0;
+        packer_ = nullptr;
+    }
+
+    void destroy() override { delete (packer_); }
+
+    void reset() override {
+        num_buffered_values_ = 0;
+        bitpacked_group_count_ = 0;
+        bit_width_ = 0;
+        bytes_buffer_.clear();
+        byte_cache_.reset();
+        values_.clear();
+        delete (packer_);
+        packer_ = nullptr;
+        memset(buffered_values_, 0, sizeof(buffered_values_));
+    }
+
+    FORCE_INLINE int encode(int64_t value, common::ByteStream &out) override {
+        values_.push_back(value);
+        // The current_bit_width must be at least 1, even if value is 0.
+        int current_bit_width =
+            std::max(1, 64 - number_of_leading_zeros(value));
+        if (current_bit_width > bit_width_) {
+            bit_width_ = current_bit_width;
+        }
+        return common::E_OK;
+    }
+
+    FORCE_INLINE int encode(int32_t value, common::ByteStream &out) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int flush(common::ByteStream &out) override {
+        ASSERT(packer_ == nullptr);
+        if (bit_width_ == 0) return common::E_OK;
+        packer_ = new Int64Packer(bit_width_);
+        common::SerializationUtil::write_i8(bit_width_, byte_cache_);
+        for (size_t i = 0; i < values_.size(); i++) {
+            // encodeValue(value);
+            buffered_values_[num_buffered_values_] = values_[i];
+            num_buffered_values_++;
+            if (num_buffered_values_ == 8) {
+                write_or_append_bitpacked_run();
+            }
+        }
+        inner_flush(out);
+        return common::E_OK;
+    }
+
+    void write_or_append_bitpacked_run() {
+        if (bitpacked_group_count_ >= 63) {
+            // we've packed as many values as we can for this run,
+            // end it and start a new one
+            end_previous_bitpacked_run(8);
+        }
+        convert_buffer();
+        num_buffered_values_ = 0;
+        ++bitpacked_group_count_;
+    }
+
+    void convert_buffer() {
+        // TODO: put the bytes on the stack instead on the heap
+        unsigned char *bytes = (unsigned char *)common::mem_alloc(
+            bit_width_, common::MOD_BITENCODE_OBJ);
+        int64_t tmp_buffer[8];
+        for (int i = 0; i < 8; i++) {
+            tmp_buffer[i] = (int64_t)buffered_values_[i];
+        }
+        packer_->pack_8values(tmp_buffer, 0, bytes);
+        // we'll not writer bit-packing group to OutputStream immediately
+        // we buffer them in list
+        for (int i = 0; i < bit_width_; i++) {
+            bytes_buffer_.push_back(bytes[i]);
+        }
+        common::mem_free(bytes);
+    }
+
+    void clear_buffer() {
+        for (int i = num_buffered_values_; i < 8; i++) {
+            buffered_values_[i] = 0;
+        }
+    }
+
+    void end_previous_bitpacked_run(int last_bitpacked_num) {
+        unsigned char bitPackHeader =
+            (unsigned char)((bitpacked_group_count_ << 1) | 1);
+        common::SerializationUtil::write_ui8(bitPackHeader, byte_cache_);
+        common::SerializationUtil::write_ui8((uint8_t)last_bitpacked_num,
+                                             byte_cache_);
+        for (size_t i = 0; i < bytes_buffer_.size(); i++) {
+            common::SerializationUtil::write_ui8(bytes_buffer_[i], byte_cache_);
+        }
+        bytes_buffer_.clear();
+        bitpacked_group_count_ = 0;
+    }
+
+    int get_max_byte_size() override {
+        if (values_.empty()) {
+            return 0;
+        }
+        int totalValues = values_.size();
+        int fullGroups = totalValues / 8;
+        int remainingValues = totalValues % 8;
+        int bytesPerGroup = (bit_width_ * 8 + 7) / 8;
+        int maxSize = 0;
+        maxSize += fullGroups * bytesPerGroup;
+        if (remainingValues > 0) {
+            maxSize += bytesPerGroup;
+        }
+
+        // Add additional bytes, because each bitpack group has a header of 1
+        // byte and a tail of 1 byte.
+        maxSize += fullGroups * (1 + 1) + (remainingValues > 0 ? (1 + 1) : 0);
+        return maxSize;
+    }
+};
+
+}  // end namespace storage
+#endif  // ENCODING_BITPACK_ENCODER_H
diff --git a/cpp/src/encoding/int64_sprintz_decoder.h b/cpp/src/encoding/int64_sprintz_decoder.h
new file mode 100644
index 0000000..d1db9f9
--- /dev/null
+++ b/cpp/src/encoding/int64_sprintz_decoder.h
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef INT64_SPRINTZ_DECODER_H
+#define INT64_SPRINTZ_DECODER_H
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "common/allocator/byte_stream.h"
+#include "encoding/fire.h"
+#include "encoding/int64_packer.h"
+#include "encoding/int64_rle_decoder.h"
+#include "sprintz_decoder.h"
+
+namespace storage {
+
+class Int64SprintzDecoder : public SprintzDecoder {
+   public:
+    Int64SprintzDecoder()
+        : current_value_(0),
+          pre_value_(0),
+          current_buffer_(block_size_ + 1),
+          fire_pred_(3),
+          predict_scheme_("fire") {
+        SprintzDecoder::reset();
+        current_count_ = 0;
+        std::fill(current_buffer_.begin(), current_buffer_.end(), 0);
+    }
+
+    ~Int64SprintzDecoder() override = default;
+
+    void set_predict_method(const std::string& method) {
+        predict_scheme_ = method;
+    }
+
+    void reset() override {
+        SprintzDecoder::reset();
+        current_value_ = 0;
+        pre_value_ = 0;
+        current_count_ = 0;
+        std::fill(current_buffer_.begin(), current_buffer_.end(), 0);
+    }
+
+    bool has_remaining(const common::ByteStream& in) {
+        return (is_block_read_ && current_count_ < block_size_) ||
+               in.has_remaining();
+    }
+
+    bool has_next(common::ByteStream& input) {
+        return (is_block_read_ && current_count_ < block_size_) ||
+               input.remaining_size() > 0;
+    }
+
+    int read_int32(int32_t& ret_value, common::ByteStream& in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int read_boolean(bool& ret_value, common::ByteStream& in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int read_float(float& ret_value, common::ByteStream& in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int read_double(double& ret_value, common::ByteStream& in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int read_String(common::String& ret_value, common::PageArena& pa,
+                    common::ByteStream& in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int read_int64(int64_t& ret_value, common::ByteStream& in) override {
+        int ret = common::E_OK;
+        if (!is_block_read_) {
+            if (RET_FAIL(decode_block(in))) {
+                return ret;
+            }
+        }
+        ret_value = current_buffer_[current_count_++];
+        if (current_count_ == decode_size_) {
+            is_block_read_ = false;
+            current_count_ = 0;
+        }
+        return ret;
+    }
+
+   protected:
+    int decode_block(common::ByteStream& input) override {
+        // read header bitWidth
+        int ret = common::E_OK;
+        uint8_t byte;
+        uint32_t bit_width = 0, read_len = 0;
+        ret = input.read_buf(&byte, 1, read_len);
+        if (ret != common::E_OK || read_len != 1) {
+            return common::E_DECODE_ERR;
+        }
+        bit_width |= static_cast<uint32_t>(byte);
+        bit_width_ = static_cast<int32_t>(bit_width);
+
+        if ((bit_width_ & (1 << 7)) != 0) {
+            decode_size_ = bit_width_ & ~(1 << 7);
+            Int64RleDecoder decoder;
+            for (int i = 0; i < decode_size_; ++i) {
+                current_buffer_[i] = decoder.read_int(input);
+            }
+        } else {
+            decode_size_ = block_size_ + 1;
+
+            common::SerializationUtil::read_i64(pre_value_, input);
+            current_buffer_[0] = pre_value_;
+
+            // Read packed buffer
+            std::vector<uint8_t> pack_buf(bit_width_);
+            uint32_t read_len = 0;
+            input.read_buf(reinterpret_cast<char*>(pack_buf.data()), bit_width_,
+                           read_len);
+
+            std::vector<int64_t> tmp_buffer(8);
+            packer_ = std::make_shared<Int64Packer>(bit_width_);
+            packer_->unpack_8values(pack_buf.data(), 0, tmp_buffer.data());
+
+            for (int i = 0; i < 8; ++i) {
+                current_buffer_[i + 1] = tmp_buffer[i];
+            }
+
+            ret = recalculate();
+        }
+
+        is_block_read_ = true;
+        return ret;
+    }
+
+    int recalculate() override {
+        int ret = common::E_OK;
+        for (int i = 1; i <= block_size_; ++i) {
+            if ((current_buffer_[i] & 1) == 0) {
+                current_buffer_[i] = -current_buffer_[i] / 2;
+            } else {
+                current_buffer_[i] = (current_buffer_[i] + 1) / 2;
+            }
+        }
+
+        if (predict_scheme_ == "delta") {
+            for (int i = 1; i <= block_size_; ++i) {
+                current_buffer_[i] += current_buffer_[i - 1];
+            }
+        } else if (predict_scheme_ == "fire") {
+            fire_pred_.reset();
+            for (int i = 1; i <= block_size_; ++i) {
+                int64_t pred = fire_pred_.predict(current_buffer_[i - 1]);
+                int64_t err = current_buffer_[i];
+                current_buffer_[i] = pred + err;
+                fire_pred_.train(current_buffer_[i - 1], current_buffer_[i],
+                                 err);
+            }
+        } else {
+            ret = common::E_DECODE_ERR;
+        }
+        return ret;
+    }
+
+   private:
+    std::shared_ptr<Int64Packer> packer_;
+    LongFire fire_pred_;
+    int64_t pre_value_;
+    int64_t current_value_;
+    std::vector<int64_t> current_buffer_;
+    std::string predict_scheme_;
+};
+
+}  // namespace storage
+
+#endif  // INT64_SPRINTZ_DECODER_H
diff --git a/cpp/src/encoding/int64_sprintz_encoder.h b/cpp/src/encoding/int64_sprintz_encoder.h
new file mode 100644
index 0000000..4c5ca87
--- /dev/null
+++ b/cpp/src/encoding/int64_sprintz_encoder.h
@@ -0,0 +1,190 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef INT64_SPRINTZ_ENCODER_H
+#define INT64_SPRINTZ_ENCODER_H
+
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "common/allocator/byte_stream.h"
+#include "encoding/encode_utils.h"
+#include "encoding/fire.h"
+#include "encoding/int64_packer.h"
+#include "encoding/int64_rle_encoder.h"
+#include "sprintz_encoder.h"
+
+namespace storage {
+
+class Int64SprintzEncoder : public SprintzEncoder {
+   public:
+    Int64SprintzEncoder() : SprintzEncoder(), fire_pred_(3) {}
+
+    ~Int64SprintzEncoder() override = default;
+
+    void reset() override {
+        SprintzEncoder::reset();
+        values_.clear();
+    }
+
+    void destroy() override {}
+
+    int encode(int32_t value, common::ByteStream& out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int encode(float value, common::ByteStream& out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int encode(double value, common::ByteStream& out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int encode(bool value, common::ByteStream& out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int encode(common::String value, common::ByteStream& out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int encode(int64_t value, common::ByteStream& out_stream) override {
+        int ret = common::E_OK;
+        if (!is_first_cached_) {
+            values_.push_back(value);
+            is_first_cached_ = true;
+            return ret;
+        }
+
+        values_.push_back(value);
+
+        if (values_.size() == block_size_ + 1) {
+            int64_t prev = values_[0];
+            fire_pred_.reset();
+            for (int i = 1; i <= block_size_; ++i) {
+                int64_t temp = values_[i];
+                values_[i] = predict(values_[i], prev);
+                prev = temp;
+            }
+
+            bit_pack();
+            is_first_cached_ = false;
+            values_.clear();
+            group_num_++;
+
+            if (group_num_ == group_max_) {
+                if (RET_FAIL(flush(out_stream))) {
+                    return ret;
+                }
+            }
+        }
+
+        return ret;
+    }
+
+    int flush(common::ByteStream& out_stream) override {
+        int ret = common::E_OK;
+
+        if (byte_cache_.total_size() > 0) {
+            if (RET_FAIL(common::SerializationUtil::chunk_read_all_data(
+                    byte_cache_, out_stream))) {
+                return ret;
+            }
+        }
+
+        if (!values_.empty()) {
+            int size = static_cast<int>(values_.size());
+            size |= (1 << 7);
+
+            common::SerializationUtil::
+                write_int_little_endian_padded_on_bit_width(size, out_stream,
+                                                            1);
+
+            Int64RleEncoder encoder;
+            for (int64_t val : values_) {
+                encoder.encode(val, out_stream);
+            }
+            encoder.flush(out_stream);
+        }
+
+        reset();
+        return ret;
+    }
+
+    int get_one_item_max_size() override {
+        return 1 + (1 + block_size_) * sizeof(int64_t);
+    }
+
+    int get_max_byte_size() override {
+        return 1 + (values_.size() + 1) * sizeof(int64_t);
+    }
+
+   protected:
+    void bit_pack() override {
+        int64_t pre_value = values_[0];
+        values_.erase(values_.begin());
+
+        bit_width_ = get_int64_max_bit_width(values_);
+        packer_ = std::make_shared<Int64Packer>(bit_width_);
+
+        std::vector<uint8_t> bytes(bit_width_);
+        std::vector<int64_t> tmp_buffer(values_.begin(),
+                                        values_.begin() + block_size_);
+        packer_->pack_8values(tmp_buffer.data(), 0, bytes.data());
+
+        common::SerializationUtil::write_int_little_endian_padded_on_bit_width(
+            bit_width_, byte_cache_, 1);
+        common::SerializationUtil::write_i64(pre_value, byte_cache_);
+        byte_cache_.write_buf(reinterpret_cast<const char*>(bytes.data()),
+                              bytes.size());
+    }
+
+    int64_t predict(int64_t value, int64_t prev) {
+        int64_t pred = 0;
+        if (predict_method_ == "delta") {
+            pred = delta(value, prev);
+        } else if (predict_method_ == "fire") {
+            pred = fire(value, prev);
+        } else {
+            ASSERT(false);
+        }
+
+        return (pred <= 0) ? -2 * pred : 2 * pred - 1;
+    }
+
+    int64_t delta(int64_t value, int64_t prev) { return value - prev; }
+
+    int64_t fire(int64_t value, int64_t prev) {
+        int64_t pred = fire_pred_.predict(prev);
+        int64_t err = value - pred;
+        fire_pred_.train(prev, value, err);
+        return err;
+    }
+
+   private:
+    std::vector<int64_t> values_;
+    std::shared_ptr<Int64Packer> packer_;
+    LongFire fire_pred_;
+};
+
+}  // namespace storage
+
+#endif  // INT64_SPRINTZ_ENCODER_H
diff --git a/cpp/src/encoding/plain_decoder.h b/cpp/src/encoding/plain_decoder.h
index 5b17f39..d1c6969 100644
--- a/cpp/src/encoding/plain_decoder.h
+++ b/cpp/src/encoding/plain_decoder.h
@@ -26,8 +26,12 @@
 
 class PlainDecoder : public Decoder {
    public:
-    FORCE_INLINE void reset() { /* do nothing */ }
-    FORCE_INLINE bool has_remaining() { return false; }
+    ~PlainDecoder() override = default;
+    FORCE_INLINE void reset() { /* do nothing */
+    }
+    FORCE_INLINE bool has_remaining(const common::ByteStream &buffer) {
+        return buffer.has_remaining();
+    }
     FORCE_INLINE int read_boolean(bool &ret_bool, common::ByteStream &in) {
         return common::SerializationUtil::read_ui8((uint8_t &)ret_bool, in);
     }
diff --git a/cpp/src/encoding/plain_encoder.h b/cpp/src/encoding/plain_encoder.h
index a0b66c0..f04ca08 100644
--- a/cpp/src/encoding/plain_encoder.h
+++ b/cpp/src/encoding/plain_encoder.h
@@ -28,8 +28,10 @@
    public:
     PlainEncoder() {}
     ~PlainEncoder() { destroy(); }
-    void destroy() { /* do nothing for PlainEncoder */ }
-    void reset() { /* do thing for PlainEncoder */ }
+    void destroy() { /* do nothing for PlainEncoder */
+    }
+    void reset() { /* do thing for PlainEncoder */
+    }
 
     FORCE_INLINE int encode(bool value, common::ByteStream &out_stream) {
         return common::SerializationUtil::write_i8(value ? 1 : 0, out_stream);
diff --git a/cpp/src/encoding/sprintz_decoder.h b/cpp/src/encoding/sprintz_decoder.h
new file mode 100644
index 0000000..3ad30d4
--- /dev/null
+++ b/cpp/src/encoding/sprintz_decoder.h
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef SPRINTZ_DECODER_H
+#define SPRINTZ_DECODER_H
+
+#include <cstdint>
+#include <iostream>
+#include <istream>
+#include <memory>
+#include <sstream>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include "decoder.h"
+
+namespace storage {
+
+class SprintzDecoder : public Decoder {
+   public:
+    ~SprintzDecoder() override = default;
+
+    // Reset decoder state
+    void reset() override {
+        is_block_read_ = false;
+        current_count_ = 0;
+    }
+
+    // Decode a compressed block (to be implemented by subclasses)
+    virtual int decode_block(common::ByteStream& in) = 0;
+
+    // Update predictor based on decoded data (to be implemented by subclasses)
+    virtual int recalculate() = 0;
+
+   protected:
+    SprintzDecoder()
+        : bit_width_(0),
+          block_size_(8),
+          is_block_read_(false),
+          current_count_(0),
+          decode_size_(0) {}
+
+   protected:
+    int bit_width_;       // Current bit width being used
+    int block_size_;      // Default is 8
+    bool is_block_read_;  // Whether current block has been read
+    int current_count_;   // Current decoding position
+    int decode_size_;     // Number of valid data items in current decoded block
+};
+
+}  // namespace storage
+
+#endif  // SPRINTZ_DECODER_H
\ No newline at end of file
diff --git a/cpp/src/encoding/sprintz_encoder.h b/cpp/src/encoding/sprintz_encoder.h
new file mode 100644
index 0000000..67e4dae
--- /dev/null
+++ b/cpp/src/encoding/sprintz_encoder.h
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef SPRINTZ_ENCODER_H
+#define SPRINTZ_ENCODER_H
+
+#include <sstream>
+#include <string>
+
+#include "decoder.h"
+
+namespace storage {
+class SprintzEncoder : public Encoder {
+   public:
+    virtual ~SprintzEncoder() override = default;
+
+    void set_predict_method(const std::string& method) {
+        predict_method_ = method;
+    }
+
+    virtual void reset() {
+        byte_cache_.reset();
+        is_first_cached_ = false;
+        group_num_ = 0;
+    }
+
+    virtual int get_one_item_max_size() = 0;
+
+    virtual void bit_pack() = 0;
+
+   protected:
+    SprintzEncoder()
+        : block_size_(8),
+          group_max_(16),
+          group_num_(0),
+          bit_width_(0),
+          byte_cache_(1024, common::MOD_ENCODER_OBJ),
+          is_first_cached_(false),
+          predict_method_("fire") {}
+
+   protected:
+    int block_size_;  // Size of each compressed block, default 8
+    int group_max_;   // Maximum number of groups, default 16
+    int group_num_;   // Current group count
+    int bit_width_;   // Current bit width being used
+    common::ByteStream byte_cache_{};
+    std::string
+        predict_method_{};  // Prediction method, e.g. "delta", "fire", etc.
+    bool is_first_cached_;  // Whether the first value has been cached
+};
+}  // namespace storage
+
+#endif  // SPRINTZ_ENCODER_H
diff --git a/cpp/src/encoding/ts2diff_decoder.h b/cpp/src/encoding/ts2diff_decoder.h
index 5ad0e89..a19e616 100644
--- a/cpp/src/encoding/ts2diff_decoder.h
+++ b/cpp/src/encoding/ts2diff_decoder.h
@@ -34,7 +34,7 @@
 class TS2DIFFDecoder : public Decoder {
    public:
     TS2DIFFDecoder() { reset(); }
-    ~TS2DIFFDecoder() {}
+    ~TS2DIFFDecoder() override {}
 
     void reset() {
         write_index_ = -1;
@@ -48,7 +48,8 @@
         current_index_ = 0;
     }
 
-    FORCE_INLINE bool has_remaining() {
+    FORCE_INLINE bool has_remaining(const common::ByteStream &buffer) {
+        if (buffer.has_remaining()) return true;
         return bits_left_ != 0 || (current_index_ <= write_index_ &&
                                    write_index_ != -1 && current_index_ != 0);
     }
diff --git a/cpp/src/encoding/ts2diff_encoder.h b/cpp/src/encoding/ts2diff_encoder.h
index d2da14c..db3a283 100644
--- a/cpp/src/encoding/ts2diff_encoder.h
+++ b/cpp/src/encoding/ts2diff_encoder.h
@@ -41,20 +41,21 @@
 template <>
 struct SIMDOps<int32_t> {
 #ifdef USE_SSE
-    static void rebase(int32_t* arr, int32_t min_val, size_t size) {
+    static void rebase(int32_t *arr, int32_t min_val, size_t size) {
         const __m128i min_vec = _mm_set1_epi32(min_val);
         size_t i = 0;
         for (; i + 3 < size; i += 4) {
-            __m128i vec = _mm_loadu_si128(reinterpret_cast<const __m128i*>(arr + i));
+            __m128i vec =
+                _mm_loadu_si128(reinterpret_cast<const __m128i *>(arr + i));
             vec = _mm_sub_epi32(vec, min_vec);
-            _mm_storeu_si128(reinterpret_cast<__m128i*>(arr + i), vec);
+            _mm_storeu_si128(reinterpret_cast<__m128i *>(arr + i), vec);
         }
         for (; i < size; ++i) {
             arr[i] -= min_val;
         }
     }
 #else
-    static void rebase(int32_t* arr, int32_t min_val, size_t size) {
+    static void rebase(int32_t *arr, int32_t min_val, size_t size) {
         for (size_t i = 0; i < size; ++i) {
             arr[i] -= min_val;
         }
@@ -65,20 +66,21 @@
 template <>
 struct SIMDOps<int64_t> {
 #ifdef USE_AVX2
-    static void rebase(int64_t* arr, int64_t min_val, size_t size) {
+    static void rebase(int64_t *arr, int64_t min_val, size_t size) {
         const __m256i min_vec = _mm256_set1_epi64x(min_val);
         size_t i = 0;
         for (; i + 3 < size; i += 4) {
-            __m256i vec = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(arr + i));
+            __m256i vec =
+                _mm256_loadu_si256(reinterpret_cast<const __m256i *>(arr + i));
             vec = _mm256_sub_epi64(vec, min_vec);
-            _mm256_storeu_si256(reinterpret_cast<__m256i*>(arr + i), vec);
+            _mm256_storeu_si256(reinterpret_cast<__m256i *>(arr + i), vec);
         }
         for (; i < size; ++i) {
             arr[i] -= min_val;
         }
     }
 #else
-    static void rebase(int64_t* arr, int64_t min_val, size_t size) {
+    static void rebase(int64_t *arr, int64_t min_val, size_t size) {
         for (size_t i = 0; i < size; ++i) {
             arr[i] -= min_val;
         }
diff --git a/cpp/src/encoding/zigzag_decoder.h b/cpp/src/encoding/zigzag_decoder.h
index 4a540ee..e12f748 100644
--- a/cpp/src/encoding/zigzag_decoder.h
+++ b/cpp/src/encoding/zigzag_decoder.h
@@ -30,10 +30,10 @@
 namespace storage {
 
 template <typename T>
-class ZigzagDecoder {
+class ZigzagDecoder : public Decoder {
    public:
     ZigzagDecoder() { init(); }
-    ~ZigzagDecoder() { destroy(); }
+    ~ZigzagDecoder() override { destroy(); }
 
     void init() {
         type_ = common::ZIGZAG;
@@ -46,7 +46,32 @@
         zigzag_decode_arr_ = nullptr;
     }
 
-    void reset() {
+    bool has_remaining(const common::ByteStream &buffer) override {
+        return buffer.has_remaining() || !list_transit_in_zd_.empty();
+    }
+    int read_boolean(bool &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    inline int read_int32(int32_t &ret_value, common::ByteStream &in) override {
+        ret_value = decode(in);
+        return common::E_OK;
+    }
+    inline int read_int64(int64_t &ret_value, common::ByteStream &in) override {
+        ret_value = decode(in);
+        return common::E_OK;
+    }
+    int read_float(float &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_double(double &ret_value, common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int read_String(common::String &ret_value, common::PageArena &pa,
+                    common::ByteStream &in) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    void each_encode_reset() {
         bits_left_ = 0;
         buffer_ = 0;
         stored_value_ = 0;
@@ -54,6 +79,17 @@
         num_of_sorts_of_zigzag_ = 0;
     }
 
+    void reset() override {
+        type_ = common::ZIGZAG;
+        bits_left_ = 0;
+        buffer_ = 0;
+        stored_value_ = 0;
+        first_bit_of_byte_ = 0;
+        num_of_sorts_of_zigzag_ = 0;
+        first_read_ = true;
+        destroy();
+    }
+
     void destroy() {
         if (zigzag_decode_arr_ != nullptr) {
             common::mem_free(zigzag_decode_arr_);
@@ -111,7 +147,7 @@
         return stored_value_;
     }
 
-    T decode(common::ByteStream &in);
+    inline T decode(common::ByteStream &in);
 
    public:
     common::TSEncoding type_;
@@ -128,7 +164,7 @@
 };
 
 template <>
-int32_t ZigzagDecoder<int32_t>::decode(common::ByteStream &in) {
+inline int32_t ZigzagDecoder<int32_t>::decode(common::ByteStream &in) {
     if (UNLIKELY(first_read_ == true)) {
         read_header(in);
         zigzag_decode_arr_ =
@@ -156,12 +192,12 @@
 
     int32_t ret_value = (int32_t)(stored_value_);
     ret_value = (int32_t)(zigzag_decoder(stored_value_));
-    reset();
+    each_encode_reset();
     return ret_value;
 }
 
 template <>
-int64_t ZigzagDecoder<int64_t>::decode(common::ByteStream &in) {
+inline int64_t ZigzagDecoder<int64_t>::decode(common::ByteStream &in) {
     if (UNLIKELY(first_read_ == true)) {
         read_header(in);
         zigzag_decode_arr_ =
@@ -189,7 +225,7 @@
 
     int64_t ret_value = (int64_t)(stored_value_);
     ret_value = (int64_t)(zigzag_decoder(stored_value_));
-    reset();
+    each_encode_reset();
     return ret_value;
 }
 
diff --git a/cpp/src/encoding/zigzag_encoder.h b/cpp/src/encoding/zigzag_encoder.h
index 887ba07..841b8b0 100644
--- a/cpp/src/encoding/zigzag_encoder.h
+++ b/cpp/src/encoding/zigzag_encoder.h
@@ -29,13 +29,36 @@
 namespace storage {
 
 template <typename T>
-class ZigzagEncoder {
+class ZigzagEncoder : public Encoder {
    public:
     ZigzagEncoder() { init(); }
 
-    ~ZigzagEncoder() {}
+    ~ZigzagEncoder() override = default;
 
-    void destroy() {}
+    void destroy() override {}
+
+    // int init(common::TSDataType data_type) = 0;
+    int encode(bool value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(int32_t value, common::ByteStream &out_stream) override;
+    int encode(int64_t value, common::ByteStream &out_stream) override;
+    int encode(float value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(double value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+    int encode(common::String value, common::ByteStream &out_stream) override {
+        return common::E_TYPE_NOT_MATCH;
+    }
+
+    int get_max_byte_size() override {
+        if (list_transit_in_ze_.empty()) {
+            return 0;
+        }
+        return 8 + list_transit_in_ze_.size();
+    }
 
     void init() {
         type_ = common::ZIGZAG;
@@ -45,7 +68,7 @@
         first_read_ = true;
     }
 
-    void reset() {
+    void reset() override {
         type_ = common::ZIGZAG;
         buffer_ = 0;
         length_of_input_bytestream_ = 0;
@@ -75,26 +98,42 @@
         add_byte_to_trans();
     }
 
-    int encode(T value);
-    int flush(common::ByteStream &out);
+    inline int encode(T value);
+
+    inline int flush(common::ByteStream &out) override;
 
    public:
     common::TSEncoding type_;
-    uint8_t buffer_;
-    int length_of_input_bytestream_;
-    int length_of_encode_bytestream_;
+    uint8_t buffer_ = 0;
+    int length_of_input_bytestream_ = 0;
+    int length_of_encode_bytestream_ = 0;
     std::vector<uint8_t> list_transit_in_ze_;
-    bool first_read_;
+    bool first_read_{};
 };
 
+template <typename T>
+inline int ZigzagEncoder<T>::encode(int32_t /*value*/,
+                                    common::ByteStream & /*out*/) {
+    return common::E_TYPE_NOT_MATCH;
+}
+
+template <typename T>
+inline int ZigzagEncoder<T>::encode(int64_t /*value*/,
+                                    common::ByteStream & /*out*/) {
+    return common::E_TYPE_NOT_MATCH;
+}
+
 template <>
-int ZigzagEncoder<int32_t>::encode(int32_t value) {
+inline int ZigzagEncoder<int32_t>::encode(int32_t value) {
     if (UNLIKELY(first_read_ == true)) {
         reset();
         first_read_ = false;
     }
     length_of_input_bytestream_ += 1;
-    int32_t value_zigzag = (value << 1) ^ (value >> 31);
+    int32_t value_zigzag =
+        static_cast<int32_t>((static_cast<uint32_t>(value) << 1) ^
+                             static_cast<uint32_t>(value >> 31));
+
     if ((value_zigzag & ~0x7F) != 0) {
         write_byte_with_subsequence(value_zigzag);
         value_zigzag = (uint32_t)value_zigzag >> 7;
@@ -111,13 +150,22 @@
 }
 
 template <>
-int ZigzagEncoder<int64_t>::encode(int64_t value) {
+inline int ZigzagEncoder<int32_t>::encode(int32_t value,
+                                          common::ByteStream &out_stream) {
+    return encode(value);
+}
+
+template <>
+inline int ZigzagEncoder<int64_t>::encode(int64_t value) {
     if (UNLIKELY(first_read_ == true)) {
         reset();
         first_read_ = false;
     }
     length_of_input_bytestream_ += 1;
-    int64_t value_zigzag = (value << 1) ^ (value >> 63);
+    int64_t value_zigzag =
+        static_cast<int64_t>((static_cast<uint64_t>(value) << 1) ^
+                             static_cast<uint64_t>(value >> 63));
+
     if ((value_zigzag & ~0x7F) != 0) {
         write_byte_with_subsequence(value_zigzag);
         value_zigzag = (uint64_t)value_zigzag >> 7;
@@ -134,7 +182,13 @@
 }
 
 template <>
-int ZigzagEncoder<int32_t>::flush(common::ByteStream &out) {
+inline int ZigzagEncoder<int64_t>::encode(int64_t value,
+                                          common::ByteStream &out_stream) {
+    return encode(value);
+}
+
+template <>
+inline int ZigzagEncoder<int32_t>::flush(common::ByteStream &out) {
     common::SerializationUtil::write_var_uint(length_of_encode_bytestream_,
                                               out);
     common::SerializationUtil::write_var_uint(length_of_input_bytestream_, out);
@@ -148,7 +202,7 @@
 }
 
 template <>
-int ZigzagEncoder<int64_t>::flush(common::ByteStream &out) {
+inline int ZigzagEncoder<int64_t>::flush(common::ByteStream &out) {
     common::SerializationUtil::write_var_uint(length_of_encode_bytestream_,
                                               out);
     common::SerializationUtil::write_var_uint(length_of_input_bytestream_, out);
diff --git a/cpp/src/file/tsfile_io_reader.cc b/cpp/src/file/tsfile_io_reader.cc
index a256695..c70c429 100644
--- a/cpp/src/file/tsfile_io_reader.cc
+++ b/cpp/src/file/tsfile_io_reader.cc
@@ -25,356 +25,366 @@
 
 namespace storage {
 int TsFileIOReader::init(const std::string &file_path) {
-  int ret = E_OK;
-  read_file_ = new ReadFile;
-  read_file_created_ = true;
-  if (RET_FAIL(read_file_->open(file_path))) {
-  }
-  return ret;
+    int ret = E_OK;
+    read_file_ = new ReadFile;
+    read_file_created_ = true;
+    if (RET_FAIL(read_file_->open(file_path))) {
+    }
+    return ret;
 }
 
 int TsFileIOReader::init(ReadFile *read_file) {
-  if (IS_NULL(read_file)) {
-    ASSERT(false);
-    return E_INVALID_ARG;
-  }
-  read_file_created_ = false;
-  read_file_ = read_file;
-  return E_OK;
+    if (IS_NULL(read_file)) {
+        ASSERT(false);
+        return E_INVALID_ARG;
+    }
+    read_file_created_ = false;
+    read_file_ = read_file;
+    return E_OK;
 }
 
 void TsFileIOReader::reset() {
-  if (read_file_ != nullptr) {
-    if (read_file_created_) {
-      read_file_->destroy();
-      delete read_file_;
+    if (read_file_ != nullptr) {
+        if (read_file_created_) {
+            read_file_->destroy();
+            delete read_file_;
+        }
+        read_file_ = nullptr;
+        tsfile_meta_page_arena_.destroy();
+        tsfile_meta_ready_ = false;
     }
-    read_file_ = nullptr;
-    tsfile_meta_page_arena_.destroy();
-    tsfile_meta_ready_ = false;
-  }
 }
 
 int TsFileIOReader::alloc_ssi(std::shared_ptr<IDeviceID> device_id,
                               const std::string &measurement_name,
                               TsFileSeriesScanIterator *&ssi,
                               common::PageArena &pa, Filter *time_filter) {
-  int ret = E_OK;
-  if (RET_FAIL(load_tsfile_meta_if_necessary())) {
-  } else {
-    ssi = new TsFileSeriesScanIterator;
-    ssi->init(device_id, measurement_name, read_file_, time_filter, pa);
-    if (RET_FAIL(load_timeseries_index_for_ssi(device_id,
-                                               measurement_name, ssi))) {
-    } else if (time_filter != nullptr &&
-        !filter_stasify(ssi->itimeseries_index_, time_filter)) {
-      ret = E_NO_MORE_DATA;
-    } else if (RET_FAIL(ssi->init_chunk_reader())) {
+    int ret = E_OK;
+    if (RET_FAIL(load_tsfile_meta_if_necessary())) {
+    } else {
+        ssi = new TsFileSeriesScanIterator;
+        ssi->init(device_id, measurement_name, read_file_, time_filter, pa);
+        if (RET_FAIL(load_timeseries_index_for_ssi(device_id, measurement_name,
+                                                   ssi))) {
+        } else if (time_filter != nullptr &&
+                   !filter_stasify(ssi->itimeseries_index_, time_filter)) {
+            ret = E_NO_MORE_DATA;
+        } else if (RET_FAIL(ssi->init_chunk_reader())) {
+        }
+        if (ret != E_OK) {
+            ssi->destroy();
+            delete ssi;
+            ssi = nullptr;
+        }
     }
-    if (ret != E_OK) {
-      ssi->destroy();
-      delete ssi;
-      ssi = nullptr;
-    }
-  }
-  return ret;
+    return ret;
 }
 
 void TsFileIOReader::revert_ssi(TsFileSeriesScanIterator *ssi) {
-  if (ssi != nullptr) {
-    ssi->destroy();
-    delete ssi;
-  }
+    if (ssi != nullptr) {
+        ssi->destroy();
+        delete ssi;
+    }
 }
 
 int TsFileIOReader::get_device_timeseries_meta_without_chunk_meta(
     std::shared_ptr<IDeviceID> device_id,
-    std::vector<ITimeseriesIndex *> &timeseries_indexs,
-    PageArena &pa) {
-  int ret = E_OK;
-  load_tsfile_meta_if_necessary();
-  std::shared_ptr<IMetaIndexEntry> meta_index_entry;
-  int64_t end_offset;
-  std::vector<std::pair<std::shared_ptr<IMetaIndexEntry>, int64_t> >
-      meta_index_entry_list;
-  if (RET_FAIL(
-      load_device_index_entry(std::make_shared<DeviceIDComparable>(device_id),
-                              meta_index_entry, end_offset))) {
-  } else if (RET_FAIL(load_all_measurement_index_entry(
-      meta_index_entry->get_offset(), end_offset, pa,
-      meta_index_entry_list))) {
-  } else if (RET_FAIL(do_load_all_timeseries_index(meta_index_entry_list, pa,
-                                                   timeseries_indexs))) {
-  }
-  return ret;
+    std::vector<ITimeseriesIndex *> &timeseries_indexs, PageArena &pa) {
+    int ret = E_OK;
+    load_tsfile_meta_if_necessary();
+    std::shared_ptr<IMetaIndexEntry> meta_index_entry;
+    int64_t end_offset;
+    std::vector<std::pair<std::shared_ptr<IMetaIndexEntry>, int64_t> >
+        meta_index_entry_list;
+    if (RET_FAIL(load_device_index_entry(
+            std::make_shared<DeviceIDComparable>(device_id), meta_index_entry,
+            end_offset))) {
+    } else if (RET_FAIL(load_all_measurement_index_entry(
+                   meta_index_entry->get_offset(), end_offset, pa,
+                   meta_index_entry_list))) {
+    } else if (RET_FAIL(do_load_all_timeseries_index(meta_index_entry_list, pa,
+                                                     timeseries_indexs))) {
+    }
+    return ret;
 }
 
 bool TsFileIOReader::filter_stasify(ITimeseriesIndex *ts_index,
                                     Filter *time_filter) {
-  ASSERT(ts_index->get_statistic() != nullptr);
-  return time_filter->satisfy(ts_index->get_statistic());
+    ASSERT(ts_index->get_statistic() != nullptr);
+    return time_filter->satisfy(ts_index->get_statistic());
 }
 
 int TsFileIOReader::load_tsfile_meta_if_necessary() {
-  int ret = E_OK;
-  if (!tsfile_meta_ready_) {
-    if (RET_FAIL(load_tsfile_meta())) {
-      // log_err("load_tsfile_meta error, ret=%d", ret);
-      return ret;
-    } else {
-      tsfile_meta_ready_ = true;
+    int ret = E_OK;
+    if (!tsfile_meta_ready_) {
+        if (RET_FAIL(load_tsfile_meta())) {
+            // log_err("load_tsfile_meta error, ret=%d", ret);
+            return ret;
+        } else {
+            tsfile_meta_ready_ = true;
+        }
     }
-  }
-  return ret;
+    return ret;
 }
 
 int TsFileIOReader::load_tsfile_meta() {
-  const int32_t TSFILE_READ_IO_SIZE = 1024; // TODO make it configurable
-  const int32_t TAIL_MAGIC_AND_META_SIZE_SIZE =
-      10;                  // magic(6B) + meta_size(4B)
-  ASSERT(file_size() > 0); // > 13
+    const int32_t TSFILE_READ_IO_SIZE = 1024;  // TODO make it configurable
+    const int32_t TAIL_MAGIC_AND_META_SIZE_SIZE =
+        10;                   // magic(6B) + meta_size(4B)
+    ASSERT(file_size() > 0);  // > 13
 
-  int ret = E_OK;
-  uint32_t tsfile_meta_size = 0;
-  int32_t read_offset = 0;
-  int32_t ret_read_len = 0;
+    int ret = E_OK;
+    uint32_t tsfile_meta_size = 0;
+    int32_t read_offset = 0;
+    int32_t ret_read_len = 0;
 
-  // Step 1: reader the tsfile_meta_size
-  // 1.1 prepare reader buffer
-  int32_t alloc_size = UTIL_MIN(TSFILE_READ_IO_SIZE, file_size());
-  char *read_buf = (char *) mem_alloc(alloc_size, MOD_TSFILE_READER);
-  if (IS_NULL(read_buf)) {
-    return E_OOM;
-  }
-  // 1.2 reader data from file
-  read_offset = file_size() - alloc_size;
-  ret_read_len = 0;
-  if (RET_FAIL(read_file_->read(read_offset, read_buf, alloc_size,
-                                ret_read_len))) {
-  } else if (ret_read_len != alloc_size) {
-    ret = E_FILE_READ_ERR;
-    // log_err("do not reader enough data from tsfile, want-size=%d,
-    // reader-size=%d, file=%s", alloc_size, ret_read_len,
-    // get_file_path().c_str());
-  }
-  // 1.3 deserialize tsfile_meta_size
-  if (IS_SUCC(ret)) {
-    // deserialize tsfile_meta_size
-    char *size_buf = read_buf + alloc_size - TAIL_MAGIC_AND_META_SIZE_SIZE;
-    tsfile_meta_size = SerializationUtil::read_ui32(size_buf);
-    ASSERT(tsfile_meta_size > 0 && tsfile_meta_size <= (1ll << 20));
-  }
-
-  // Step 2: reader TsFileMeta
-  if (IS_SUCC(ret)) {
-    // 2.1 prepare enough buffer (use the previous buffer if can).
-    char *tsfile_meta_buf = nullptr;
-    if (tsfile_meta_size + TAIL_MAGIC_AND_META_SIZE_SIZE >
-        (uint32_t) alloc_size) {
-      // prepare buffer to re-reader from start of tsfile_meta
-      char *old_read_buf = read_buf;
-      read_buf = (char *) mem_realloc(read_buf, tsfile_meta_size);
-      if (IS_NULL(read_buf)) {
-        read_buf = old_read_buf;
-        ret = E_OOM;
-      } else if (RET_FAIL(read_file_->read(
-          file_size() - tsfile_meta_size -
-              TAIL_MAGIC_AND_META_SIZE_SIZE,
-          read_buf, tsfile_meta_size, ret_read_len))) {
-      } else if (tsfile_meta_size != (uint32_t) ret_read_len) {
+    // Step 1: reader the tsfile_meta_size
+    // 1.1 prepare reader buffer
+    int32_t alloc_size = UTIL_MIN(TSFILE_READ_IO_SIZE, file_size());
+    char *read_buf = (char *)mem_alloc(alloc_size, MOD_TSFILE_READER);
+    if (IS_NULL(read_buf)) {
+        return E_OOM;
+    }
+    // 1.2 reader data from file
+    read_offset = file_size() - alloc_size;
+    ret_read_len = 0;
+    if (RET_FAIL(read_file_->read(read_offset, read_buf, alloc_size,
+                                  ret_read_len))) {
+    } else if (ret_read_len != alloc_size) {
         ret = E_FILE_READ_ERR;
         // log_err("do not reader enough data from tsfile, want-size=%d,
-        // reader-size=%d, file=%s", tsfile_meta_size, ret_read_len,
+        // reader-size=%d, file=%s", alloc_size, ret_read_len,
         // get_file_path().c_str());
-      } else {
-        tsfile_meta_buf = read_buf;
-      }
-    } else {
-      // the previous buffer has contained the TsFileMeta data
-      tsfile_meta_buf = read_buf + alloc_size - tsfile_meta_size -
-          TAIL_MAGIC_AND_META_SIZE_SIZE;
-      // DEBUG_hex_dump_buf("tsfile_meta_buf=", tsfile_meta_buf,
-      // tsfile_meta_size);
     }
+    // 1.3 deserialize tsfile_meta_size
     if (IS_SUCC(ret)) {
-      ByteStream tsfile_meta_bs;
-      tsfile_meta_bs.wrap_from(tsfile_meta_buf, tsfile_meta_size);
-      if (RET_FAIL(tsfile_meta_.deserialize_from(tsfile_meta_bs))) {
-      }
-#if DEBUG_SE
-      std::cout << "load tsfile_meta, ret=" << ret
-                << ", tsfile_meta_=" << tsfile_meta_ << std::endl;
-#endif
+        // deserialize tsfile_meta_size
+        char *size_buf = read_buf + alloc_size - TAIL_MAGIC_AND_META_SIZE_SIZE;
+        tsfile_meta_size = SerializationUtil::read_ui32(size_buf);
+        ASSERT(tsfile_meta_size > 0 && tsfile_meta_size <= (1ll << 20));
     }
-  }
-  mem_free(read_buf);
-  return ret;
+
+    // Step 2: reader TsFileMeta
+    if (IS_SUCC(ret)) {
+        // 2.1 prepare enough buffer (use the previous buffer if can).
+        char *tsfile_meta_buf = nullptr;
+        if (tsfile_meta_size + TAIL_MAGIC_AND_META_SIZE_SIZE >
+            (uint32_t)alloc_size) {
+            // prepare buffer to re-reader from start of tsfile_meta
+            char *old_read_buf = read_buf;
+            read_buf = (char *)mem_realloc(read_buf, tsfile_meta_size);
+            if (IS_NULL(read_buf)) {
+                read_buf = old_read_buf;
+                ret = E_OOM;
+            } else if (RET_FAIL(read_file_->read(
+                           file_size() - tsfile_meta_size -
+                               TAIL_MAGIC_AND_META_SIZE_SIZE,
+                           read_buf, tsfile_meta_size, ret_read_len))) {
+            } else if (tsfile_meta_size != (uint32_t)ret_read_len) {
+                ret = E_FILE_READ_ERR;
+                // log_err("do not reader enough data from tsfile, want-size=%d,
+                // reader-size=%d, file=%s", tsfile_meta_size, ret_read_len,
+                // get_file_path().c_str());
+            } else {
+                tsfile_meta_buf = read_buf;
+            }
+        } else {
+            // the previous buffer has contained the TsFileMeta data
+            tsfile_meta_buf = read_buf + alloc_size - tsfile_meta_size -
+                              TAIL_MAGIC_AND_META_SIZE_SIZE;
+            // DEBUG_hex_dump_buf("tsfile_meta_buf=", tsfile_meta_buf,
+            // tsfile_meta_size);
+        }
+        if (IS_SUCC(ret)) {
+            ByteStream tsfile_meta_bs;
+            tsfile_meta_bs.wrap_from(tsfile_meta_buf, tsfile_meta_size);
+            if (RET_FAIL(tsfile_meta_.deserialize_from(tsfile_meta_bs))) {
+            }
+#if DEBUG_SE
+            std::cout << "load tsfile_meta, ret=" << ret
+                      << ", tsfile_meta_=" << tsfile_meta_ << std::endl;
+#endif
+        }
+    }
+    mem_free(read_buf);
+    return ret;
 }
 
 int TsFileIOReader::load_timeseries_index_for_ssi(
     std::shared_ptr<IDeviceID> device_id, const std::string &measurement_name,
     TsFileSeriesScanIterator *&ssi) {
-  int ret = E_OK;
-  std::shared_ptr<IMetaIndexEntry> device_index_entry;
-  int64_t device_ie_end_offset = 0;
-  std::shared_ptr<IMetaIndexEntry> measurement_index_entry;
-  int64_t measurement_ie_end_offset = 0;
-  // bool is_aligned = false;
-  if (RET_FAIL(load_device_index_entry(
-      std::make_shared<DeviceIDComparable>(device_id), device_index_entry,
-      device_ie_end_offset))) {
-  } else if (RET_FAIL(load_measurement_index_entry(
-      measurement_name, device_index_entry->get_offset(),
-      device_ie_end_offset, measurement_index_entry,
-      measurement_ie_end_offset))) {
-  } else if (RET_FAIL(do_load_timeseries_index(
-      measurement_name, measurement_index_entry->get_offset(),
-      measurement_ie_end_offset, ssi->timeseries_index_pa_,
-      ssi->itimeseries_index_))) {
-  } else {
+    int ret = E_OK;
+    std::shared_ptr<IMetaIndexEntry> device_index_entry;
+    int64_t device_ie_end_offset = 0;
+    std::shared_ptr<IMetaIndexEntry> measurement_index_entry;
+    int64_t measurement_ie_end_offset = 0;
+    // bool is_aligned = false;
+    if (RET_FAIL(load_device_index_entry(
+            std::make_shared<DeviceIDComparable>(device_id), device_index_entry,
+            device_ie_end_offset))) {
+        return ret;
+    }
+    auto &pa = ssi->timeseries_index_pa_;
+
+    int start_offset = device_index_entry->get_offset(),
+        end_offset = device_ie_end_offset;
+    ASSERT(start_offset < end_offset);
+    const int32_t read_size = end_offset - start_offset;
+    int32_t ret_read_len = 0;
+    char *data_buf = (char *)pa.alloc(read_size);
+    void *m_idx_node_buf = pa.alloc(sizeof(MetaIndexNode));
+    if (IS_NULL(data_buf) || IS_NULL(m_idx_node_buf)) {
+        return E_OOM;
+    }
+    auto *top_node_ptr = new (m_idx_node_buf) MetaIndexNode(&pa);
+    auto top_node = std::shared_ptr<MetaIndexNode>(top_node_ptr,
+                                                   MetaIndexNode::self_deleter);
+
+    if (RET_FAIL(read_file_->read(start_offset, data_buf, read_size,
+                                  ret_read_len))) {
+        return ret;
+    } else if (RET_FAIL(top_node->deserialize_from(data_buf, read_size))) {
+        return ret;
+    }
+
+    bool is_aligned = is_aligned_device(top_node);
+    TimeseriesIndex *timeseries_index = nullptr;
+    if (is_aligned) {
+        if (RET_FAIL(
+                get_time_column_metadata(top_node, timeseries_index, pa))) {
+            return ret;
+        }
+    }
+
+    if (RET_FAIL(load_measurement_index_entry(measurement_name, top_node,
+                                              measurement_index_entry,
+                                              measurement_ie_end_offset))) {
+        return ret;
+    } else if (RET_FAIL(do_load_timeseries_index(
+                   measurement_name, measurement_index_entry->get_offset(),
+                   measurement_ie_end_offset, ssi->timeseries_index_pa_,
+                   ssi->itimeseries_index_, is_aligned))) {
+        return ret;
+    }
+    if (is_aligned) {
+        auto *aligned_timeseries_index =
+            dynamic_cast<AlignedTimeseriesIndex *>(ssi->itimeseries_index_);
+        if (aligned_timeseries_index) {
+            aligned_timeseries_index->time_ts_idx_ = timeseries_index;
+        }
+    }
+
 #if DEBUG_SE
     if (measurement_index_entry.name_.len_) {
         std::cout << "load timeseries index: "
-                  << *((TimeseriesIndex *)ssi->itimeseries_index_)
-                  << std::endl;
+                  << *((TimeseriesIndex *)ssi->itimeseries_index_) << std::endl;
     } else {
         std::cout << "load aligned timeseries index: "
                   << *((AlignedTimeseriesIndex *)ssi->itimeseries_index_)
                   << std::endl;
     }
 #endif
-  }
-  return ret;
+    return ret;
 }
 
 int TsFileIOReader::load_device_index_entry(
     std::shared_ptr<IComparable> device_name,
     std::shared_ptr<IMetaIndexEntry> &device_index_entry, int64_t &end_offset) {
-  int ret = E_OK;
-  std::shared_ptr<DeviceIDComparable> device_id_comparable =
-      std::dynamic_pointer_cast<DeviceIDComparable>(device_name);
-  if (device_id_comparable == nullptr) {
-    return E_INVALID_DATA_POINT;
-  }
-  auto index_node = tsfile_meta_.table_metadata_index_node_map_[device_id_comparable->device_id_->get_table_name()];
-  assert(tsfile_meta_.table_metadata_index_node_map_.find(device_id_comparable->device_id_->get_table_name()) !=
-        tsfile_meta_.table_metadata_index_node_map_.end());
-  assert(index_node != nullptr);
-  if (index_node->node_type_ == LEAF_DEVICE) {
-    // FIXME
-    ret = index_node->binary_search_children(
-        device_name, true, device_index_entry, end_offset);
-  } else {
-    ret = search_from_internal_node(device_name, index_node,
-                                    device_index_entry, end_offset);
-  }
-  if (ret == E_NOT_EXIST) {
-    ret = E_DEVICE_NOT_EXIST;
-  }
+    int ret = E_OK;
+    std::shared_ptr<DeviceIDComparable> device_id_comparable =
+        std::dynamic_pointer_cast<DeviceIDComparable>(device_name);
+    if (device_id_comparable == nullptr) {
+        return E_INVALID_DATA_POINT;
+    }
+    auto index_node = tsfile_meta_.table_metadata_index_node_map_
+                          [device_id_comparable->device_id_->get_table_name()];
+    assert(tsfile_meta_.table_metadata_index_node_map_.find(
+               device_id_comparable->device_id_->get_table_name()) !=
+           tsfile_meta_.table_metadata_index_node_map_.end());
+    assert(index_node != nullptr);
+    if (index_node->node_type_ == LEAF_DEVICE) {
+        // FIXME
+        ret = index_node->binary_search_children(
+            device_name, true, device_index_entry, end_offset);
+    } else {
+        ret = search_from_internal_node(device_name, true, index_node,
+                                        device_index_entry, end_offset);
+    }
+    if (ret == E_NOT_EXIST) {
+        ret = E_DEVICE_NOT_EXIST;
+    }
 #if DEBUG_SE
-  std::cout << "load_device_index_entry, device_index_entry={"
-            << device_index_entry << "}, end_offset=" << end_offset
-            << std::endl;
+    std::cout << "load_device_index_entry, device_index_entry={"
+              << device_index_entry << "}, end_offset=" << end_offset
+              << std::endl;
 #endif
-  return ret;
+    return ret;
 }
 
 int TsFileIOReader::load_measurement_index_entry(
-    const std::string &measurement_name_str, int64_t start_offset,
-    int64_t end_offset, std::shared_ptr<IMetaIndexEntry> &ret_measurement_index_entry,
+    const std::string &measurement_name_str,
+    std::shared_ptr<MetaIndexNode> top_node,
+    std::shared_ptr<IMetaIndexEntry> &ret_measurement_index_entry,
     int64_t &ret_end_offset) {
-#if DEBUG_SE
-  std::cout << "load_measurement_index_entry: measurement_name_str="
-            << measurement_name_str << ", start_offset=" << start_offset
-            << ", end_offset=" << end_offset << std::endl;
-#endif
-  ASSERT(start_offset < end_offset);
-  int ret = E_OK;
-
-  // 1. load top measuremnt_index_node
-  PageArena pa;
-  pa.init(512, MOD_TSFILE_READER);
-  const int32_t read_size = (int32_t) (end_offset - start_offset);
-  int32_t ret_read_len = 0;
-  char *data_buf = (char *) pa.alloc(read_size);
-  void *m_idx_node_buf = pa.alloc(sizeof(MetaIndexNode));
-  if (IS_NULL(data_buf) || IS_NULL(m_idx_node_buf)) {
-    return E_OOM;
-  }
-  auto *top_node_ptr = new(m_idx_node_buf) MetaIndexNode(&pa);
-  auto top_node = std::shared_ptr<MetaIndexNode>(
-      top_node_ptr, MetaIndexNode::self_deleter);
-
-  if (RET_FAIL(read_file_->read(start_offset, data_buf, read_size,
-                                ret_read_len))) {
-  } else if (RET_FAIL(top_node->deserialize_from(data_buf, read_size))) {
-  }
-#if DEBUG_SE
-  std::cout
-      << "load_measurement_index_entry deserialize MetaIndexNode, top_node="
-      << *top_node << " at file pos " << start_offset << " to " << end_offset
-      << std::endl;
-#endif
-  // 2. search from top_node in top-down way
-  if (IS_SUCC(ret)) {
+    int ret = E_OK;
+    // search from top_node in top-down way
     auto measurement_name =
         std::make_shared<StringComparable>(measurement_name_str);
     if (top_node->node_type_ == LEAF_MEASUREMENT) {
-      ret = top_node->binary_search_children(
-          measurement_name, /*exact*/ false, ret_measurement_index_entry,
-          ret_end_offset);
+        ret = top_node->binary_search_children(
+            measurement_name, /*exact*/ false, ret_measurement_index_entry,
+            ret_end_offset);
     } else {
-      ret = search_from_internal_node(measurement_name, top_node,
-                                      ret_measurement_index_entry,
-                                      ret_end_offset);
+        ret = search_from_internal_node(measurement_name, false, top_node,
+                                        ret_measurement_index_entry,
+                                        ret_end_offset);
     }
-  }
-  if (ret == E_NOT_EXIST) {
-    ret = E_MEASUREMENT_NOT_EXIST;
-  }
-  return ret;
+    if (ret == E_NOT_EXIST) {
+        ret = E_MEASUREMENT_NOT_EXIST;
+    }
+    return ret;
 }
 
 int TsFileIOReader::load_all_measurement_index_entry(
     int64_t start_offset, int64_t end_offset, common::PageArena &pa,
     std::vector<std::pair<std::shared_ptr<IMetaIndexEntry>, int64_t> >
-    &ret_measurement_index_entry) {
+        &ret_measurement_index_entry) {
 #if DEBUG_SE
-  std::cout << "load_measurement_index_entry: measurement_name_str= "
-            << ", start_offset=" << start_offset
-            << ", end_offset=" << end_offset << std::endl;
+    std::cout << "load_measurement_index_entry: measurement_name_str= "
+              << ", start_offset=" << start_offset
+              << ", end_offset=" << end_offset << std::endl;
 #endif
-  ASSERT(start_offset < end_offset);
-  int ret = E_OK;
-  // 1. load top measuremnt_index_node
-  const int32_t read_size = (int32_t) (end_offset - start_offset);
-  int32_t ret_read_len = 0;
-  char *data_buf = (char *) pa.alloc(read_size);
-  void *m_idx_node_buf = pa.alloc(sizeof(MetaIndexNode));
-  if (IS_NULL(data_buf) || IS_NULL(m_idx_node_buf)) {
-    return E_OOM;
-  }
-  auto *top_node_ptr = new(m_idx_node_buf) MetaIndexNode(&pa);
-  auto top_node = std::shared_ptr<MetaIndexNode>(
-      top_node_ptr, MetaIndexNode::self_deleter);
-  if (RET_FAIL(read_file_->read(start_offset, data_buf, read_size,
-                                ret_read_len))) {
-  } else if (RET_FAIL(top_node->deserialize_from(data_buf, read_size))) {
-  }
+    ASSERT(start_offset < end_offset);
+    int ret = E_OK;
+    // 1. load top measuremnt_index_node
+    const int32_t read_size = (int32_t)(end_offset - start_offset);
+    int32_t ret_read_len = 0;
+    char *data_buf = (char *)pa.alloc(read_size);
+    void *m_idx_node_buf = pa.alloc(sizeof(MetaIndexNode));
+    if (IS_NULL(data_buf) || IS_NULL(m_idx_node_buf)) {
+        return E_OOM;
+    }
+    auto *top_node_ptr = new (m_idx_node_buf) MetaIndexNode(&pa);
+    auto top_node = std::shared_ptr<MetaIndexNode>(top_node_ptr,
+                                                   MetaIndexNode::self_deleter);
+    if (RET_FAIL(read_file_->read(start_offset, data_buf, read_size,
+                                  ret_read_len))) {
+    } else if (RET_FAIL(top_node->deserialize_from(data_buf, read_size))) {
+    }
 #if DEBUG_SE
-  std::cout
-      << "load_measurement_index_entry deserialize MetaIndexNode, top_node="
-      << *top_node << " at file pos " << start_offset << " to " << end_offset
-      << std::endl;
+    std::cout
+        << "load_measurement_index_entry deserialize MetaIndexNode, top_node="
+        << *top_node << " at file pos " << start_offset << " to " << end_offset
+        << std::endl;
 #endif
-  // 2. search from top_node in top-down way
-  if (IS_SUCC(ret)) {
-    get_all_leaf(top_node, ret_measurement_index_entry);
-  }
-  if (ret == E_NOT_EXIST) {
-    ret = E_MEASUREMENT_NOT_EXIST;
-  }
-  return ret;
+    // 2. search from top_node in top-down way
+    if (IS_SUCC(ret)) {
+        get_all_leaf(top_node, ret_measurement_index_entry);
+    }
+    if (ret == E_NOT_EXIST) {
+        ret = E_MEASUREMENT_NOT_EXIST;
+    }
+    return ret;
 }
 
 int TsFileIOReader::read_device_meta_index(int32_t start_offset,
@@ -395,37 +405,77 @@
     if (RET_FAIL(read_file_->read(start_offset, data_buf, read_size,
                                   ret_read_len))) {
     }
-    ret = device_meta_index->device_deserialize_from(data_buf, read_size);
+    if (!leaf) {
+        ret = device_meta_index->device_deserialize_from(data_buf, read_size);
+    } else {
+        ret = device_meta_index->deserialize_from(data_buf, read_size);
+    }
     return ret;
 }
 
-int TsFileIOReader::get_timeseries_indexes(std::shared_ptr<IDeviceID> device_id,
-                             const std::unordered_set<std::string> &measurement_names,
-                             std::vector<ITimeseriesIndex *> &timeseries_indexs,
-                             common::PageArena &pa) {
-  int ret = E_OK;
-  std::shared_ptr<IMetaIndexEntry> device_index_entry;
-  int64_t device_ie_end_offset = 0;
-  std::shared_ptr<IMetaIndexEntry> measurement_index_entry;
-  int64_t measurement_ie_end_offset = 0;
-  if (RET_FAIL(load_device_index_entry(
-      std::make_shared<DeviceIDComparable>(device_id), device_index_entry,
-      device_ie_end_offset))) {
-    return ret;
-  }
-  int64_t idx = 0;
-  for (const auto &measurement_name : measurement_names) {
-    if (RET_FAIL(load_measurement_index_entry(
-        measurement_name, device_index_entry->get_offset(),
-        device_ie_end_offset, measurement_index_entry,
-        measurement_ie_end_offset))) {
-    } else if (RET_FAIL(do_load_timeseries_index(
-        measurement_name, measurement_index_entry->get_offset(),
-        measurement_ie_end_offset, pa,
-        timeseries_indexs[idx++]))) {
+int TsFileIOReader::get_timeseries_indexes(
+    std::shared_ptr<IDeviceID> device_id,
+    const std::unordered_set<std::string> &measurement_names,
+    std::vector<ITimeseriesIndex *> &timeseries_indexs, common::PageArena &pa) {
+    int ret = E_OK;
+    std::shared_ptr<IMetaIndexEntry> device_index_entry;
+    int64_t device_ie_end_offset = 0;
+    std::shared_ptr<IMetaIndexEntry> measurement_index_entry;
+    int64_t measurement_ie_end_offset = 0;
+    if (RET_FAIL(load_device_index_entry(
+            std::make_shared<DeviceIDComparable>(device_id), device_index_entry,
+            device_ie_end_offset))) {
+        return ret;
     }
-  }
-  return ret;
+
+    int start_offset = device_index_entry->get_offset(),
+        end_offset = device_ie_end_offset;
+    ASSERT(start_offset < end_offset);
+    const int32_t read_size = end_offset - start_offset;
+    int32_t ret_read_len = 0;
+    char *data_buf = (char *)pa.alloc(read_size);
+    void *m_idx_node_buf = pa.alloc(sizeof(MetaIndexNode));
+    if (IS_NULL(data_buf) || IS_NULL(m_idx_node_buf)) {
+        return E_OOM;
+    }
+    auto *top_node_ptr = new (m_idx_node_buf) MetaIndexNode(&pa);
+    auto top_node = std::shared_ptr<MetaIndexNode>(top_node_ptr,
+                                                   MetaIndexNode::self_deleter);
+
+    if (RET_FAIL(read_file_->read(start_offset, data_buf, read_size,
+                                  ret_read_len))) {
+        return ret;
+    } else if (RET_FAIL(top_node->deserialize_from(data_buf, read_size))) {
+        return ret;
+    }
+
+    bool is_aligned = is_aligned_device(top_node);
+    TimeseriesIndex *timeseries_index = nullptr;
+    if (is_aligned) {
+        get_time_column_metadata(top_node, timeseries_index, pa);
+    }
+
+    int64_t idx = 0;
+    for (const auto &measurement_name : measurement_names) {
+        if (RET_FAIL(load_measurement_index_entry(measurement_name, top_node,
+                                                  measurement_index_entry,
+                                                  measurement_ie_end_offset))) {
+        } else if (RET_FAIL(do_load_timeseries_index(
+                       measurement_name, measurement_index_entry->get_offset(),
+                       measurement_ie_end_offset, pa, timeseries_indexs[idx],
+                       is_aligned))) {
+        }
+        if (is_aligned) {
+            AlignedTimeseriesIndex *aligned_timeseries_index =
+                dynamic_cast<AlignedTimeseriesIndex *>(timeseries_indexs[idx]);
+            if (aligned_timeseries_index) {
+                aligned_timeseries_index->time_ts_idx_ = timeseries_index;
+            }
+        }
+
+        idx++;
+    }
+    return ret;
 }
 
 /*
@@ -435,230 +485,292 @@
 int TsFileIOReader::search_from_leaf_node(
     std::shared_ptr<IComparable> target_name,
     std::shared_ptr<MetaIndexNode> index_node,
-    std::shared_ptr<IMetaIndexEntry> &ret_index_entry, int64_t &ret_end_offset) {
-  int ret = E_OK;
-  ret = index_node->binary_search_children(target_name, true, ret_index_entry,
-                                           ret_end_offset);
-  return ret;
+    std::shared_ptr<IMetaIndexEntry> &ret_index_entry,
+    int64_t &ret_end_offset) {
+    int ret = E_OK;
+    ret = index_node->binary_search_children(target_name, true, ret_index_entry,
+                                             ret_end_offset);
+    return ret;
 }
 
 int TsFileIOReader::search_from_internal_node(
-    std::shared_ptr<IComparable> target_name,
-    std::shared_ptr<MetaIndexNode> index_node, std::shared_ptr<IMetaIndexEntry> &ret_index_entry,
+    std::shared_ptr<IComparable> target_name, bool is_device,
+    std::shared_ptr<MetaIndexNode> index_node,
+    std::shared_ptr<IMetaIndexEntry> &ret_index_entry,
     int64_t &ret_end_offset) {
-  int ret = E_OK;
-  std::shared_ptr<IMetaIndexEntry> index_entry;
-  int64_t end_offset = 0;
+    int ret = E_OK;
+    std::shared_ptr<IMetaIndexEntry> index_entry;
+    int64_t end_offset = 0;
 
-  ASSERT(index_node->node_type_ == INTERNAL_MEASUREMENT ||
-      index_node->node_type_ == INTERNAL_DEVICE);
-  if (RET_FAIL(index_node->binary_search_children(
-      target_name, /*exact=*/false, index_entry, end_offset))) {
-    return ret;
-  }
+    ASSERT(index_node->node_type_ == INTERNAL_MEASUREMENT ||
+           index_node->node_type_ == INTERNAL_DEVICE);
+    if (RET_FAIL(index_node->binary_search_children(
+            target_name, /*exact=*/false, index_entry, end_offset))) {
+        return ret;
+    }
 
-  while (IS_SUCC(ret)) {
-    // reader next level index node
-    const int read_size = end_offset - index_entry->get_offset();
+    while (IS_SUCC(ret)) {
+        // reader next level index node
+        const int read_size = end_offset - index_entry->get_offset();
 #if DEBUG_SE
-    std::cout << "search_from_internal_node, end_offset=" << end_offset
-              << ", index_entry.offset_=" << index_entry.get_offset()
-              << std::endl;
+        std::cout << "search_from_internal_node, end_offset=" << end_offset
+                  << ", index_entry.offset_=" << index_entry.get_offset()
+                  << std::endl;
 #endif
-    ASSERT(read_size > 0 && read_size < (1 << 30));
-    PageArena cur_level_index_node_pa;
-    void *buf = cur_level_index_node_pa.alloc(sizeof(MetaIndexNode));
-    char *data_buf = (char *) cur_level_index_node_pa.alloc(read_size);
-    if (IS_NULL(buf) || IS_NULL(data_buf)) {
-      return E_OOM;
+        ASSERT(read_size > 0 && read_size < (1 << 30));
+        PageArena cur_level_index_node_pa;
+        void *buf = cur_level_index_node_pa.alloc(sizeof(MetaIndexNode));
+        char *data_buf = (char *)cur_level_index_node_pa.alloc(read_size);
+        if (IS_NULL(buf) || IS_NULL(data_buf)) {
+            return E_OOM;
+        }
+        MetaIndexNode *cur_level_index_node =
+            new (buf) MetaIndexNode(&cur_level_index_node_pa);
+        int32_t ret_read_len = 0;
+        if (RET_FAIL(read_file_->read(index_entry->get_offset(), data_buf,
+                                      read_size, ret_read_len))) {
+        } else if (read_size != ret_read_len) {
+            return E_TSFILE_CORRUPTED;
+        }
+        if (!is_device) {
+            ret = cur_level_index_node->deserialize_from(data_buf, read_size);
+        } else {
+            ret = cur_level_index_node->device_deserialize_from(data_buf,
+                                                                read_size);
+        }
+        if (ret != E_OK) {
+            return ret;
+        }
+        if (cur_level_index_node->node_type_ == LEAF_DEVICE) {
+            ret = cur_level_index_node->binary_search_children(
+                target_name, /*exact=*/true, ret_index_entry, ret_end_offset);
+            cur_level_index_node->destroy();
+            return ret;  //// FIXME
+        } else if (cur_level_index_node->node_type_ == LEAF_MEASUREMENT) {
+            ret = cur_level_index_node->binary_search_children(
+                target_name, /*exact=*/false, ret_index_entry, ret_end_offset);
+            cur_level_index_node->destroy();
+            return ret;  //// FIXME
+        } else {
+            ret = cur_level_index_node->binary_search_children(
+                target_name, /*exact=*/false, index_entry, end_offset);
+            cur_level_index_node->destroy();
+        }
     }
-    MetaIndexNode *cur_level_index_node =
-        new(buf) MetaIndexNode(&cur_level_index_node_pa);
-    int32_t ret_read_len = 0;
-    if (RET_FAIL(read_file_->read(index_entry->get_offset(), data_buf, read_size,
-                                  ret_read_len))) {
-    } else if (read_size != ret_read_len) {
-      ret = E_TSFILE_CORRUPTED;
-    } else if (RET_FAIL(cur_level_index_node->device_deserialize_from(
-        data_buf, read_size))) {
-    } else {
-      if (cur_level_index_node->node_type_ == LEAF_DEVICE) {
-        ret = cur_level_index_node->binary_search_children(
-            target_name, /*exact=*/true, ret_index_entry,
-            ret_end_offset);
-        cur_level_index_node->destroy();
-        return ret; //// FIXME
-      } else if (cur_level_index_node->node_type_ == LEAF_MEASUREMENT) {
-        ret = cur_level_index_node->binary_search_children(
-            target_name, /*exact=*/false, ret_index_entry,
-            ret_end_offset);
-        cur_level_index_node->destroy();
-        return ret; //// FIXME
-      } else {
-        ret = cur_level_index_node->binary_search_children(
-            target_name, /*exact=*/false, index_entry, end_offset);
-        cur_level_index_node->destroy();
-      }
+    return ret;
+}
+
+bool TsFileIOReader::is_aligned_device(
+    std::shared_ptr<MetaIndexNode> measurement_node) {
+    auto entry = measurement_node->children_[0];
+    return entry->get_name().is_null() ||
+           entry->get_name().to_std_string() == "";
+}
+
+int TsFileIOReader::get_time_column_metadata(
+    std::shared_ptr<MetaIndexNode> measurement_node,
+    TimeseriesIndex *&ret_timeseries_index, PageArena &pa) {
+    int ret = E_OK;
+    if (!is_aligned_device(measurement_node)) {
+        return ret;
     }
-  }
-  return ret;
+    char *ti_buf = nullptr;
+    int start_idx = 0, end_idx = 0;
+    int ret_read_len = 0;
+    if (measurement_node->node_type_ == LEAF_MEASUREMENT) {
+        ByteStream buffer;
+        if (measurement_node->children_.size() > 1) {
+            start_idx = measurement_node->children_[0]->get_offset();
+            end_idx = measurement_node->children_[1]->get_offset();
+            ti_buf = pa.alloc(end_idx - start_idx);
+            if (RET_FAIL(read_file_->read(start_idx, ti_buf,
+                                          end_idx - start_idx, ret_read_len))) {
+                return ret;
+            }
+        } else {
+            start_idx = measurement_node->children_[0]->get_offset();
+            end_idx = measurement_node->end_offset_;
+            ti_buf = pa.alloc(end_idx - start_idx);
+            if (RET_FAIL(read_file_->read(start_idx, ti_buf,
+                                          end_idx - start_idx, ret_read_len))) {
+                return ret;
+            }
+        }
+        buffer.wrap_from(ti_buf, end_idx - start_idx);
+        void *buf = pa.alloc(sizeof(TimeseriesIndex));
+        if (IS_NULL(buf)) {
+            return E_OOM;
+        }
+        ret_timeseries_index = new (buf) TimeseriesIndex;
+        ret_timeseries_index->deserialize_from(buffer, &pa);
+    } else if (measurement_node->node_type_ == INTERNAL_MEASUREMENT) {
+        start_idx = measurement_node->children_[0]->get_offset();
+        end_idx = measurement_node->children_[1]->get_offset();
+        ti_buf = pa.alloc(end_idx - start_idx);
+        if (RET_FAIL(read_file_->read(start_idx, ti_buf, end_idx - start_idx,
+                                      ret_read_len))) {
+            return ret;
+        }
+        std::shared_ptr<MetaIndexNode> meta_index_node =
+            std::make_shared<MetaIndexNode>(&pa);
+        meta_index_node->deserialize_from(ti_buf, end_idx - start_idx);
+        return get_time_column_metadata(meta_index_node, ret_timeseries_index,
+                                        pa);
+    }
+    return ret;
 }
 
 int TsFileIOReader::do_load_timeseries_index(
     const std::string &measurement_name_str, int64_t start_offset,
     int64_t end_offset, PageArena &in_timeseries_index_pa,
-    ITimeseriesIndex *&ret_timeseries_index) {
-  ASSERT(end_offset > start_offset);
-  int ret = E_OK;
-  int32_t read_size = (int32_t) (end_offset - start_offset);
-  int32_t ret_read_len = 0;
-  char *ti_buf = (char *) mem_alloc(read_size, MOD_TSFILE_READER);
-  if (IS_NULL(ti_buf)) {
-    return E_OOM;
-  }
-  if (RET_FAIL(
-      read_file_->read(start_offset, ti_buf, read_size, ret_read_len))) {
-  } else {
-    ByteStream bs;
-    bs.wrap_from(ti_buf, read_size);
-    const String target_measurement_name(
-        (char *) measurement_name_str.c_str(),
-        strlen(measurement_name_str.c_str()));
-    bool found = false;
-#if DEBUG_SE
-    std::cout << "do_load_timeseries_index, reader file at " << start_offset
-              << " to " << end_offset << std::endl;
-#endif
-    bool is_aligned = false;
-    AlignedTimeseriesIndex *aligned_ts_idx = nullptr;
-    while (IS_SUCC(ret)) {
-      TimeseriesIndex cur_timeseries_index;
-      PageArena cur_timeseries_index_pa;
-      cur_timeseries_index_pa.init(512, MOD_TSFILE_READER); // TODO 512
-      if (RET_FAIL(cur_timeseries_index.deserialize_from(
-          bs, &cur_timeseries_index_pa))) {
-      } else if (is_aligned ||
-          cur_timeseries_index.get_data_type() == common::VECTOR) {
-        if (!is_aligned) {
-          is_aligned = true;
-          void *buf = in_timeseries_index_pa.alloc(
-              sizeof(AlignedTimeseriesIndex));
-          aligned_ts_idx = new(buf) AlignedTimeseriesIndex;
-          buf = in_timeseries_index_pa.alloc(sizeof(TimeseriesIndex));
-          aligned_ts_idx->time_ts_idx_ = new(buf) TimeseriesIndex;
-          aligned_ts_idx->time_ts_idx_->clone_from(
-              cur_timeseries_index, &in_timeseries_index_pa);
-        } else if (cur_timeseries_index.get_measurement_name().equal_to(
-            target_measurement_name)) {
-          void *buf =
-              in_timeseries_index_pa.alloc(sizeof(TimeseriesIndex));
-          aligned_ts_idx->value_ts_idx_ = new(buf) TimeseriesIndex;
-          aligned_ts_idx->value_ts_idx_->clone_from(
-              cur_timeseries_index, &in_timeseries_index_pa);
-          ret_timeseries_index = aligned_ts_idx;
-          found = true;
-          break;
-        }
-      } else if (!is_aligned &&
-          cur_timeseries_index.get_measurement_name().equal_to(
-              target_measurement_name)) {
-        void *buf =
-            in_timeseries_index_pa.alloc(sizeof(TimeseriesIndex));
-        auto ts_idx = new(buf) TimeseriesIndex;
-        ts_idx->clone_from(cur_timeseries_index,
-                           &in_timeseries_index_pa);
-        ret_timeseries_index = ts_idx;
-        found = true;
-        break;
-      }
-    } // end while
-    if (!found) {
-      ret = E_NOT_EXIST;
+    ITimeseriesIndex *&ret_timeseries_index, bool is_aligned) {
+    ASSERT(end_offset > start_offset);
+    int ret = E_OK;
+    int32_t read_size = (int32_t)(end_offset - start_offset);
+    int32_t ret_read_len = 0;
+    char *ti_buf = (char *)mem_alloc(read_size, MOD_TSFILE_READER);
+    if (IS_NULL(ti_buf)) {
+        return E_OOM;
     }
-  }
-  mem_free(ti_buf);
-  return ret;
+    if (RET_FAIL(
+            read_file_->read(start_offset, ti_buf, read_size, ret_read_len))) {
+    } else {
+        ByteStream bs;
+        bs.wrap_from(ti_buf, read_size);
+        const String target_measurement_name(
+            (char *)measurement_name_str.c_str(),
+            strlen(measurement_name_str.c_str()));
+        bool found = false;
+#if DEBUG_SE
+        std::cout << "do_load_timeseries_index, reader file at " << start_offset
+                  << " to " << end_offset << std::endl;
+#endif
+        while (IS_SUCC(ret)) {
+            TimeseriesIndex cur_timeseries_index;
+            PageArena cur_timeseries_index_pa;
+            cur_timeseries_index_pa.init(512, MOD_TSFILE_READER);  // TODO 512
+            if (RET_FAIL(cur_timeseries_index.deserialize_from(
+                    bs, &cur_timeseries_index_pa))) {
+            } else if (is_aligned &&
+                       cur_timeseries_index.get_measurement_name().equal_to(
+                           target_measurement_name)) {
+                void *buf = in_timeseries_index_pa.alloc(
+                    sizeof(AlignedTimeseriesIndex));
+                if (IS_NULL(buf)) {
+                    return E_OOM;
+                }
+                AlignedTimeseriesIndex *aligned_ts_idx =
+                    new (buf) AlignedTimeseriesIndex;
+                buf = in_timeseries_index_pa.alloc(sizeof(TimeseriesIndex));
+                if (IS_NULL(buf)) {
+                    return E_OOM;
+                }
+                aligned_ts_idx->value_ts_idx_ = new (buf) TimeseriesIndex;
+                aligned_ts_idx->value_ts_idx_->clone_from(
+                    cur_timeseries_index, &in_timeseries_index_pa);
+                ret_timeseries_index = aligned_ts_idx;
+                found = true;
+                break;
+            } else if (!is_aligned &&
+                       cur_timeseries_index.get_measurement_name().equal_to(
+                           target_measurement_name)) {
+                void *buf =
+                    in_timeseries_index_pa.alloc(sizeof(TimeseriesIndex));
+                auto ts_idx = new (buf) TimeseriesIndex;
+                ts_idx->clone_from(cur_timeseries_index,
+                                   &in_timeseries_index_pa);
+                ret_timeseries_index = ts_idx;
+                found = true;
+                break;
+            }
+        }  // end while
+        if (!found) {
+            ret = E_NOT_EXIST;
+        }
+    }
+    mem_free(ti_buf);
+    return ret;
 }
 
 int TsFileIOReader::do_load_all_timeseries_index(
-    std::vector<std::pair<std::shared_ptr<IMetaIndexEntry>, int64_t> > &
-    index_node_entry_list,
+    std::vector<std::pair<std::shared_ptr<IMetaIndexEntry>, int64_t> >
+        &index_node_entry_list,
     common::PageArena &in_timeseries_index_pa,
     std::vector<ITimeseriesIndex *> &ts_indexs) {
-  int ret = E_OK;
-  for (const auto &index_node_entry : index_node_entry_list) {
-    int64_t start_offset = index_node_entry.first->get_offset(),
-        end_offset = index_node_entry.second;
-    const std::string target_measurement_name(
-        index_node_entry.first->get_name().to_std_string());
-    ITimeseriesIndex *ts_idx;
-    ret = do_load_timeseries_index(target_measurement_name, start_offset,
-                                   end_offset, in_timeseries_index_pa,
-                                   ts_idx);
-    if (IS_SUCC(ret)) {
-      ts_indexs.push_back(ts_idx);
+    int ret = E_OK;
+    for (const auto &index_node_entry : index_node_entry_list) {
+        int64_t start_offset = index_node_entry.first->get_offset(),
+                end_offset = index_node_entry.second;
+        const std::string target_measurement_name(
+            index_node_entry.first->get_name().to_std_string());
+        ITimeseriesIndex *ts_idx;
+        ret = do_load_timeseries_index(target_measurement_name, start_offset,
+                                       end_offset, in_timeseries_index_pa,
+                                       ts_idx);
+        if (IS_SUCC(ret)) {
+            ts_indexs.push_back(ts_idx);
+        }
     }
-  }
-  return ret;
+    return ret;
 }
 
 int TsFileIOReader::get_all_leaf(
     std::shared_ptr<MetaIndexNode> index_node,
-    std::vector<std::pair<std::shared_ptr<IMetaIndexEntry>, int64_t> > &
-    index_node_entry_list) {
-  int ret = E_OK;
-  if (index_node->node_type_ == LEAF_MEASUREMENT ||
-      index_node->node_type_ == LEAF_DEVICE) {
-    for (size_t i = 0; i < index_node->children_.size(); i++) {
-      if (i + 1 < index_node->children_.size()) {
-        index_node_entry_list.push_back(
-            std::make_pair(index_node->children_[i],
-                           index_node->children_[i + 1]->get_offset()));
-      } else {
-        index_node_entry_list.push_back(std::make_pair(
-            index_node->children_[i], index_node->end_offset_));
-      }
-    }
-  } else {
-    // read next level index node
-    for (size_t i = 0; i < index_node->children_.size(); i++) {
-      int64_t end_offset = index_node->end_offset_;
-      if (i + 1 < index_node->children_.size()) {
-        end_offset = index_node->children_[i + 1]->get_offset();
-      }
-      const int read_size =
-          end_offset - index_node->children_[i]->get_offset();
+    std::vector<std::pair<std::shared_ptr<IMetaIndexEntry>, int64_t> >
+        &index_node_entry_list) {
+    int ret = E_OK;
+    if (index_node->node_type_ == LEAF_MEASUREMENT ||
+        index_node->node_type_ == LEAF_DEVICE) {
+        for (size_t i = 0; i < index_node->children_.size(); i++) {
+            if (i + 1 < index_node->children_.size()) {
+                index_node_entry_list.push_back(
+                    std::make_pair(index_node->children_[i],
+                                   index_node->children_[i + 1]->get_offset()));
+            } else {
+                index_node_entry_list.push_back(std::make_pair(
+                    index_node->children_[i], index_node->end_offset_));
+            }
+        }
+    } else {
+        // read next level index node
+        for (size_t i = 0; i < index_node->children_.size(); i++) {
+            int64_t end_offset = index_node->end_offset_;
+            if (i + 1 < index_node->children_.size()) {
+                end_offset = index_node->children_[i + 1]->get_offset();
+            }
+            const int read_size =
+                end_offset - index_node->children_[i]->get_offset();
 #if DEBUG_SE
-      std::cout << "search_from_internal_node, end_offset=" << end_offset
-                << ", index_entry.offset_=" << index_node->children_[i]->get_offset()
-                << std::endl;
+            std::cout << "search_from_internal_node, end_offset=" << end_offset
+                      << ", index_entry.offset_="
+                      << index_node->children_[i]->get_offset() << std::endl;
 #endif
-      ASSERT(read_size > 0 && read_size < (1 << 30));
-      PageArena cur_level_index_node_pa;
-      void *buf = cur_level_index_node_pa.alloc(sizeof(MetaIndexNode));
-      char *data_buf = (char *) cur_level_index_node_pa.alloc(read_size);
-      if (IS_NULL(buf) || IS_NULL(data_buf)) {
-        return E_OOM;
-      }
-      auto *cur_level_index_node_ptr = new(buf) MetaIndexNode(
-          &cur_level_index_node_pa);
-      auto cur_level_index_node = std::shared_ptr<MetaIndexNode>(
-          cur_level_index_node_ptr, MetaIndexNode::self_deleter);
+            ASSERT(read_size > 0 && read_size < (1 << 30));
+            PageArena cur_level_index_node_pa;
+            void *buf = cur_level_index_node_pa.alloc(sizeof(MetaIndexNode));
+            char *data_buf = (char *)cur_level_index_node_pa.alloc(read_size);
+            if (IS_NULL(buf) || IS_NULL(data_buf)) {
+                return E_OOM;
+            }
+            auto *cur_level_index_node_ptr =
+                new (buf) MetaIndexNode(&cur_level_index_node_pa);
+            auto cur_level_index_node = std::shared_ptr<MetaIndexNode>(
+                cur_level_index_node_ptr, MetaIndexNode::self_deleter);
 
-      int32_t ret_read_len = 0;
-      if (RET_FAIL(
-          read_file_->read(index_node->children_[i]->get_offset(),
-                           data_buf, read_size, ret_read_len))) {
-      } else if (read_size != ret_read_len) {
-        ret = E_TSFILE_CORRUPTED;
-      } else if (RET_FAIL(cur_level_index_node->deserialize_from(
-          data_buf, read_size))) {
-      } else {
-        ret = get_all_leaf(cur_level_index_node, index_node_entry_list);
-      }
+            int32_t ret_read_len = 0;
+            if (RET_FAIL(
+                    read_file_->read(index_node->children_[i]->get_offset(),
+                                     data_buf, read_size, ret_read_len))) {
+            } else if (read_size != ret_read_len) {
+                ret = E_TSFILE_CORRUPTED;
+            } else if (RET_FAIL(cur_level_index_node->deserialize_from(
+                           data_buf, read_size))) {
+            } else {
+                ret = get_all_leaf(cur_level_index_node, index_node_entry_list);
+            }
+        }
     }
-  }
-  return ret;
+    return ret;
 }
 #if 0
 int TsFileIOReader::get_next(const std::string &device_path,
@@ -767,4 +879,4 @@
   return ret;
 }
 #endif
-} // end namespace storage
+}  // end namespace storage
diff --git a/cpp/src/file/tsfile_io_reader.h b/cpp/src/file/tsfile_io_reader.h
index bd4a293..709dd35 100644
--- a/cpp/src/file/tsfile_io_reader.h
+++ b/cpp/src/file/tsfile_io_reader.h
@@ -77,8 +77,7 @@
                                 std::vector<ChunkMeta *> &chunk_meta_list);
     int read_device_meta_index(int32_t start_offset, int32_t end_offset,
                                common::PageArena &pa,
-                               MetaIndexNode *&device_meta_index,
-                               bool leaf);
+                               MetaIndexNode *&device_meta_index, bool leaf);
     int get_timeseries_indexes(
         std::shared_ptr<IDeviceID> device_id,
         const std::unordered_set<std::string> &measurement_names,
@@ -92,13 +91,15 @@
 
     int load_tsfile_meta_if_necessary();
 
-    int load_device_index_entry(std::shared_ptr<IComparable> target_name,
-                                std::shared_ptr<IMetaIndexEntry> &device_index_entry,
-                                int64_t &end_offset);
+    int load_device_index_entry(
+        std::shared_ptr<IComparable> target_name,
+        std::shared_ptr<IMetaIndexEntry> &device_index_entry,
+        int64_t &end_offset);
 
     int load_measurement_index_entry(
-        const std::string &measurement_name, int64_t start_offset,
-        int64_t end_offset, std::shared_ptr<IMetaIndexEntry> &ret_measurement_index_entry,
+        const std::string &measurement_name,
+        std::shared_ptr<MetaIndexNode> top_node,
+        std::shared_ptr<IMetaIndexEntry> &ret_measurement_index_entry,
         int64_t &ret_end_offset);
 
     int load_all_measurement_index_entry(
@@ -106,10 +107,17 @@
         std::vector<std::pair<std::shared_ptr<IMetaIndexEntry>, int64_t> >
             &ret_measurement_index_entry);
 
+    bool is_aligned_device(std::shared_ptr<MetaIndexNode> measurement_node);
+
+    int get_time_column_metadata(
+        std::shared_ptr<MetaIndexNode> measurement_node,
+        TimeseriesIndex *&ret_timeseries_index, common::PageArena &pa);
+
     int do_load_timeseries_index(const std::string &measurement_name_str,
                                  int64_t start_offset, int64_t end_offset,
                                  common::PageArena &pa,
-                                 ITimeseriesIndex *&ts_index);
+                                 ITimeseriesIndex *&ts_index,
+                                 bool is_aligned = false);
 
     int do_load_all_timeseries_index(
         std::vector<std::pair<std::shared_ptr<IMetaIndexEntry>, int64_t> >
@@ -126,10 +134,11 @@
                               std::shared_ptr<IMetaIndexEntry> &ret_index_entry,
                               int64_t &ret_end_offset);
 
-    int search_from_internal_node(std::shared_ptr<IComparable> target_name,
-                                  std::shared_ptr<MetaIndexNode> index_node,
-                                  std::shared_ptr<IMetaIndexEntry> &ret_index_entry,
-                                  int64_t &ret_end_offset);
+    int search_from_internal_node(
+        std::shared_ptr<IComparable> target_name, bool is_device,
+        std::shared_ptr<MetaIndexNode> index_node,
+        std::shared_ptr<IMetaIndexEntry> &ret_index_entry,
+        int64_t &ret_end_offset);
 
     bool filter_stasify(ITimeseriesIndex *ts_index, Filter *time_filter);
 
diff --git a/cpp/src/file/tsfile_io_writer.cc b/cpp/src/file/tsfile_io_writer.cc
index af4c29d..f65aebc 100644
--- a/cpp/src/file/tsfile_io_writer.cc
+++ b/cpp/src/file/tsfile_io_writer.cc
@@ -409,7 +409,6 @@
             // Time column also need add to bloom filter.
             ret = filter.add_path_entry(tmp_device_name, measurement_name);
 
-
             if (RET_FAIL(ts_index.serialize_to(write_stream_))) {
             } else {
 #if DEBUG_SE
@@ -461,11 +460,11 @@
         tsfile_meta.table_metadata_index_node_map_ = table_nodes_map;
         tsfile_meta.table_schemas_ = schema_->table_schema_map_;
         tsfile_meta.tsfile_properties_.insert(
-            std::make_pair("encryptLevel", encrypt_level_));
+            std::make_pair("encryptLevel", new std::string(encrypt_level_)));
         tsfile_meta.tsfile_properties_.insert(
-            std::make_pair("encryptType", encrypt_type_));
+            std::make_pair("encryptType", new std::string(encrypt_type_)));
         tsfile_meta.tsfile_properties_.insert(
-            std::make_pair("encryptKey", encrypt_key_));
+            std::make_pair("encryptKey", nullptr));
 #if DEBUG_SE
         auto tsfile_meta_offset = write_stream_.total_size();
 #endif
@@ -730,6 +729,9 @@
             alloc_and_init_meta_index_node(wmm, cur_index_node, node_type))) {
     }
     while (IS_SUCC(ret)) {
+        for (auto iter = to->begin(); iter != to->end(); iter++) {
+            iter.get().reset();
+        }
         to->clear();
         SimpleList<std::shared_ptr<MetaIndexNode>>::Iterator from_iter;
         for (from_iter = from->begin();
diff --git a/cpp/src/file/tsfile_io_writer.h b/cpp/src/file/tsfile_io_writer.h
index 4de4fce..6debbf0 100644
--- a/cpp/src/file/tsfile_io_writer.h
+++ b/cpp/src/file/tsfile_io_writer.h
@@ -25,185 +25,197 @@
 
 #include "common/allocator/page_arena.h"
 #include "common/container/list.h"
+#include "common/global.h"
 #include "common/schema.h"
 #include "common/tsfile_common.h"
 #include "reader/bloom_filter.h"
 #include "write_file.h"
-#include "common/global.h"
 
 namespace storage {
 
 struct FileIndexWritingMemManager {
-  common::PageArena pa_;
-  std::vector<std::shared_ptr<MetaIndexNode>> all_index_nodes_;
+    common::PageArena pa_;
+    std::vector<std::shared_ptr<MetaIndexNode>> all_index_nodes_;
 
-  FileIndexWritingMemManager() {
-    pa_.init(512, common::MOD_WRITER_INDEX_NODE);
-  }
-  ~FileIndexWritingMemManager() {
-    for (size_t i = 0; i < all_index_nodes_.size(); i++) {
-      all_index_nodes_[i]->children_.clear();
+    FileIndexWritingMemManager() {
+        pa_.init(512, common::MOD_WRITER_INDEX_NODE);
     }
-    all_index_nodes_.clear();
-  }
+    ~FileIndexWritingMemManager() {
+        for (size_t i = 0; i < all_index_nodes_.size(); i++) {
+            all_index_nodes_[i]->children_.clear();
+        }
+        all_index_nodes_.clear();
+    }
 };
 
 class TsFileIOWriter {
-public:
-  typedef std::map<std::shared_ptr<IDeviceID>, std::shared_ptr<MetaIndexNode>, IDeviceIDComparator>
-      DeviceNodeMap;
-  typedef DeviceNodeMap::iterator DeviceNodeMapIterator;
+   public:
+    typedef std::map<std::shared_ptr<IDeviceID>, std::shared_ptr<MetaIndexNode>,
+                     IDeviceIDComparator>
+        DeviceNodeMap;
+    typedef DeviceNodeMap::iterator DeviceNodeMapIterator;
 
-public:
-  static const uint32_t WRITE_STREAM_PAGE_SIZE = 512;  // FIXME
-public:
-  TsFileIOWriter()
-      : meta_allocator_(),
-        write_stream_(WRITE_STREAM_PAGE_SIZE, common::MOD_TSFILE_WRITE_STREAM,
-            /*atomic*/ true),
-        write_stream_consumer_(write_stream_),
-        cur_chunk_meta_(nullptr),
-        cur_chunk_group_meta_(nullptr),
-        chunk_meta_count_(0),
-        chunk_group_meta_list_(&meta_allocator_),
-        use_prev_alloc_cgm_(false),
-        cur_device_name_(),
-        file_(nullptr),
-        ts_time_index_vector_(),
-        write_file_created_(false),
-        generate_table_schema_(false),
-        schema_(std::make_shared<Schema>()) {
-    if (common::g_config_value_.encrypt_flag_) {
-      // TODO: support encrypt
-      encrypt_level_ = "2";
-      encrypt_type_ = "";
-      encrypt_key_ = "";
-    } else {
-      encrypt_level_ = "0";
-      encrypt_type_ = "org.apache.tsfile.encrypt.UNENCRYPTED";
-      encrypt_key_ = "";
+   public:
+    static const uint32_t WRITE_STREAM_PAGE_SIZE = 512;  // FIXME
+   public:
+    TsFileIOWriter()
+        : meta_allocator_(),
+          write_stream_(WRITE_STREAM_PAGE_SIZE, common::MOD_TSFILE_WRITE_STREAM,
+                        /*atomic*/ true),
+          write_stream_consumer_(write_stream_),
+          cur_chunk_meta_(nullptr),
+          cur_chunk_group_meta_(nullptr),
+          chunk_meta_count_(0),
+          chunk_group_meta_list_(&meta_allocator_),
+          use_prev_alloc_cgm_(false),
+          cur_device_name_(),
+          file_(nullptr),
+          ts_time_index_vector_(),
+          write_file_created_(false),
+          generate_table_schema_(false),
+          schema_(std::make_shared<Schema>()) {
+        if (common::g_config_value_.encrypt_flag_) {
+            // TODO: support encrypt
+            encrypt_level_ = "2";
+            encrypt_type_ = "";
+            encrypt_key_ = "";
+        } else {
+            encrypt_level_ = "0";
+            encrypt_type_ = "org.apache.tsfile.encrypt.UNENCRYPTED";
+            encrypt_key_ = "";
+        }
     }
-  }
-  ~TsFileIOWriter() { destroy(); }
+    ~TsFileIOWriter() { destroy(); }
 
 #ifndef LIBTSFILE_SDK
-  int init();
-  FORCE_INLINE common::FileID get_file_id() { return file_->get_file_id(); }
+    int init();
+    FORCE_INLINE common::FileID get_file_id() { return file_->get_file_id(); }
 #endif
-  int init(WriteFile *write_file);
-  void destroy();
+    int init(WriteFile *write_file);
+    void destroy();
 
-  void set_generate_table_schema(bool generate_table_schema);
-  int start_file();
-  int start_flush_chunk_group(std::shared_ptr<IDeviceID> device_id,
-                              bool is_aligned = false);
-  int start_flush_chunk(common::ByteStream &chunk_data,
-                        common::ColumnSchema &col_schema, int32_t num_of_pages);
-  int start_flush_chunk(common::ByteStream &chunk_data,
-                        std::string &measurement_name,
-                        common::TSDataType data_type,
-                        common::TSEncoding encoding,
-                        common::CompressionType compression,
-                        int32_t num_of_pages);
-  int flush_chunk(common::ByteStream &chunk_data);
-  int end_flush_chunk(Statistic *chunk_statistic);
-  int end_flush_chunk_group(bool is_aligned = false);
-  int end_file();
+    void set_generate_table_schema(bool generate_table_schema);
+    int start_file();
+    int start_flush_chunk_group(std::shared_ptr<IDeviceID> device_id,
+                                bool is_aligned = false);
+    int start_flush_chunk(common::ByteStream &chunk_data,
+                          common::ColumnSchema &col_schema,
+                          int32_t num_of_pages);
+    int start_flush_chunk(common::ByteStream &chunk_data,
+                          std::string &measurement_name,
+                          common::TSDataType data_type,
+                          common::TSEncoding encoding,
+                          common::CompressionType compression,
+                          int32_t num_of_pages);
+    int flush_chunk(common::ByteStream &chunk_data);
+    int end_flush_chunk(Statistic *chunk_statistic);
+    int end_flush_chunk_group(bool is_aligned = false);
+    int end_file();
 
-  FORCE_INLINE std::vector<TimeseriesTimeIndexEntry> &
-  get_ts_time_index_vector() {
-    return ts_time_index_vector_;
-  }
-  FORCE_INLINE std::string get_file_path() { return file_->get_file_path(); }
-  FORCE_INLINE std::shared_ptr<Schema> get_schema() { return schema_; }
-private:
-  int write_log_index_range();
-  int write_file_index();
-  FORCE_INLINE int sync_file() { return file_->sync(); }
-  FORCE_INLINE int close_file() { return file_->close(); }
-  int flush_stream_to_file();
-  int write_chunk_data(common::ByteStream &chunk_data);
-  FORCE_INLINE int64_t cur_file_position() const {
-    return write_stream_.total_size();
-  }
-  FORCE_INLINE int write_buf(const char *buf, uint32_t len) {
-    return write_stream_.write_buf(buf, len);
-  }
-  FORCE_INLINE int write_byte(const char byte) {
-    return common::SerializationUtil::write_char(byte, write_stream_);
-  }
-  FORCE_INLINE int write_string(const std::string &str) {
-    int ret = common::E_OK;
-    if (RET_FAIL(common::SerializationUtil::write_var_int(str.size(),
-                                                          write_stream_))) {
-    } else if (RET_FAIL(write_stream_.write_buf(str.c_str(), str.size()))) {
+    FORCE_INLINE std::vector<TimeseriesTimeIndexEntry> &
+    get_ts_time_index_vector() {
+        return ts_time_index_vector_;
     }
-    return ret;
-  }
-  int write_file_footer();
-  int build_device_level(DeviceNodeMap &device_map, std::shared_ptr<MetaIndexNode> &ret_root,
-                         FileIndexWritingMemManager &wmm);
-  int alloc_and_init_meta_index_entry(FileIndexWritingMemManager &wmm,
-                                      std::shared_ptr<IMetaIndexEntry> &ret_entry,
-                                      common::String &name);
-  int alloc_and_init_meta_index_entry(
-      FileIndexWritingMemManager &wmm,
-      std::shared_ptr<IMetaIndexEntry> &ret_entry,
-      const std::shared_ptr<IDeviceID> &device_id);
-  int alloc_and_init_meta_index_node(FileIndexWritingMemManager &wmm,
-                                     std::shared_ptr<MetaIndexNode> &ret_node,
-                                     MetaIndexNodeType node_type);
-  int add_cur_index_node_to_queue(std::shared_ptr<MetaIndexNode> node,
-                                  common::SimpleList<std::shared_ptr<MetaIndexNode>> *queue) const;
-  int alloc_meta_index_node_queue(
-      FileIndexWritingMemManager &wmm, common::SimpleList<std::shared_ptr<MetaIndexNode>> *&queue);
-  int add_device_node(
-      DeviceNodeMap &device_map, std::shared_ptr<IDeviceID> device_id,
-      common::SimpleList<std::shared_ptr<MetaIndexNode>> *measurement_index_node_queue,
-      FileIndexWritingMemManager &wmm);
-  void destroy_node_list(common::SimpleList<std::shared_ptr<MetaIndexNode>> *list);
-  int clone_node_list(common::SimpleList<std::shared_ptr<MetaIndexNode>> *src,
-                      common::SimpleList<std::shared_ptr<MetaIndexNode>> *dest);
-  int generate_root(common::SimpleList<std::shared_ptr<MetaIndexNode>> *node_queue,
-                    std::shared_ptr<MetaIndexNode> &root_node, MetaIndexNodeType node_type,
-                    FileIndexWritingMemManager &wmm);
-  FORCE_INLINE void swap_list(common::SimpleList<std::shared_ptr<MetaIndexNode>> *&l1,
-                              common::SimpleList<std::shared_ptr<MetaIndexNode>> *&l2) {
-    auto tmp = l1;
-    l1 = l2;
-    l2 = tmp;
-  }
+    FORCE_INLINE std::string get_file_path() { return file_->get_file_path(); }
+    FORCE_INLINE std::shared_ptr<Schema> get_schema() { return schema_; }
 
-  std::shared_ptr<MetaIndexNode> check_and_build_level_index(DeviceNodeMap &device_metadata_index_map);
+   private:
+    int write_log_index_range();
+    int write_file_index();
+    FORCE_INLINE int sync_file() { return file_->sync(); }
+    FORCE_INLINE int close_file() { return file_->close(); }
+    int flush_stream_to_file();
+    int write_chunk_data(common::ByteStream &chunk_data);
+    FORCE_INLINE int64_t cur_file_position() const {
+        return write_stream_.total_size();
+    }
+    FORCE_INLINE int write_buf(const char *buf, uint32_t len) {
+        return write_stream_.write_buf(buf, len);
+    }
+    FORCE_INLINE int write_byte(const char byte) {
+        return common::SerializationUtil::write_char(byte, write_stream_);
+    }
+    FORCE_INLINE int write_string(const std::string &str) {
+        int ret = common::E_OK;
+        if (RET_FAIL(common::SerializationUtil::write_var_int(str.size(),
+                                                              write_stream_))) {
+        } else if (RET_FAIL(write_stream_.write_buf(str.c_str(), str.size()))) {
+        }
+        return ret;
+    }
+    int write_file_footer();
+    int build_device_level(DeviceNodeMap &device_map,
+                           std::shared_ptr<MetaIndexNode> &ret_root,
+                           FileIndexWritingMemManager &wmm);
+    int alloc_and_init_meta_index_entry(
+        FileIndexWritingMemManager &wmm,
+        std::shared_ptr<IMetaIndexEntry> &ret_entry, common::String &name);
+    int alloc_and_init_meta_index_entry(
+        FileIndexWritingMemManager &wmm,
+        std::shared_ptr<IMetaIndexEntry> &ret_entry,
+        const std::shared_ptr<IDeviceID> &device_id);
+    int alloc_and_init_meta_index_node(FileIndexWritingMemManager &wmm,
+                                       std::shared_ptr<MetaIndexNode> &ret_node,
+                                       MetaIndexNodeType node_type);
+    int add_cur_index_node_to_queue(
+        std::shared_ptr<MetaIndexNode> node,
+        common::SimpleList<std::shared_ptr<MetaIndexNode>> *queue) const;
+    int alloc_meta_index_node_queue(
+        FileIndexWritingMemManager &wmm,
+        common::SimpleList<std::shared_ptr<MetaIndexNode>> *&queue);
+    int add_device_node(DeviceNodeMap &device_map,
+                        std::shared_ptr<IDeviceID> device_id,
+                        common::SimpleList<std::shared_ptr<MetaIndexNode>>
+                            *measurement_index_node_queue,
+                        FileIndexWritingMemManager &wmm);
+    void destroy_node_list(
+        common::SimpleList<std::shared_ptr<MetaIndexNode>> *list);
+    int clone_node_list(
+        common::SimpleList<std::shared_ptr<MetaIndexNode>> *src,
+        common::SimpleList<std::shared_ptr<MetaIndexNode>> *dest);
+    int generate_root(
+        common::SimpleList<std::shared_ptr<MetaIndexNode>> *node_queue,
+        std::shared_ptr<MetaIndexNode> &root_node, MetaIndexNodeType node_type,
+        FileIndexWritingMemManager &wmm);
+    FORCE_INLINE void swap_list(
+        common::SimpleList<std::shared_ptr<MetaIndexNode>> *&l1,
+        common::SimpleList<std::shared_ptr<MetaIndexNode>> *&l2) {
+        auto tmp = l1;
+        l1 = l2;
+        l2 = tmp;
+    }
 
-  int write_separator_marker(int64_t &meta_offset);
+    std::shared_ptr<MetaIndexNode> check_and_build_level_index(
+        DeviceNodeMap &device_metadata_index_map);
 
-  // for bloom filter
-  int init_bloom_filter(BloomFilter &filter);
-  int32_t get_path_count(common::SimpleList<ChunkGroupMeta *> &cgm_list);
+    int write_separator_marker(int64_t &meta_offset);
 
-  // for open file
-  void add_ts_time_index_entry(TimeseriesIndex &ts_index);
+    // for bloom filter
+    int init_bloom_filter(BloomFilter &filter);
+    int32_t get_path_count(common::SimpleList<ChunkGroupMeta *> &cgm_list);
 
-private:
-  common::PageArena meta_allocator_;
-  common::ByteStream write_stream_;
-  common::ByteStream::Consumer write_stream_consumer_;
-  ChunkMeta *cur_chunk_meta_;
-  ChunkGroupMeta *cur_chunk_group_meta_;
-  int32_t chunk_meta_count_;  // for debug
-  common::SimpleList<ChunkGroupMeta *> chunk_group_meta_list_;
-  bool use_prev_alloc_cgm_;   // chunk group meta
-  std::shared_ptr<IDeviceID> cur_device_name_;
-  WriteFile *file_;
-  std::vector<TimeseriesTimeIndexEntry> ts_time_index_vector_;
-  bool write_file_created_;
-  bool generate_table_schema_;
-  std::shared_ptr<Schema> schema_;
-  std::string encrypt_level_;
-  std::string encrypt_type_;
-  std::string encrypt_key_;
-  bool is_aligned_;
+    // for open file
+    void add_ts_time_index_entry(TimeseriesIndex &ts_index);
+
+   private:
+    common::PageArena meta_allocator_;
+    common::ByteStream write_stream_;
+    common::ByteStream::Consumer write_stream_consumer_;
+    ChunkMeta *cur_chunk_meta_;
+    ChunkGroupMeta *cur_chunk_group_meta_;
+    int32_t chunk_meta_count_;  // for debug
+    common::SimpleList<ChunkGroupMeta *> chunk_group_meta_list_;
+    bool use_prev_alloc_cgm_;  // chunk group meta
+    std::shared_ptr<IDeviceID> cur_device_name_;
+    WriteFile *file_;
+    std::vector<TimeseriesTimeIndexEntry> ts_time_index_vector_;
+    bool write_file_created_;
+    bool generate_table_schema_;
+    std::shared_ptr<Schema> schema_;
+    std::string encrypt_level_;
+    std::string encrypt_type_;
+    std::string encrypt_key_;
+    bool is_aligned_;
 };
 
 }  // end namespace storage
diff --git a/cpp/src/parser/generated/PathLexer.cpp b/cpp/src/parser/generated/PathLexer.cpp
index bb4dcd2..b14856b 100644
--- a/cpp/src/parser/generated/PathLexer.cpp
+++ b/cpp/src/parser/generated/PathLexer.cpp
@@ -18,55 +18,42 @@
  */
 // Generated from PathLexer.g4 by ANTLR 4.9.3
 
-
 #include "PathLexer.h"
 
-
 using namespace antlr4;
 
-
-PathLexer::PathLexer(CharStream *input) : Lexer(input) {
-  _interpreter = new atn::LexerATNSimulator(this, _atn, _decisionToDFA, _sharedContextCache);
+PathLexer::PathLexer(CharStream* input) : Lexer(input) {
+    _interpreter = new atn::LexerATNSimulator(this, _atn, _decisionToDFA,
+                                              _sharedContextCache);
 }
 
-PathLexer::~PathLexer() {
-  delete _interpreter;
-}
+PathLexer::~PathLexer() { delete _interpreter; }
 
-std::string PathLexer::getGrammarFileName() const {
-  return "PathLexer.g4";
-}
+std::string PathLexer::getGrammarFileName() const { return "PathLexer.g4"; }
 
 const std::vector<std::string>& PathLexer::getRuleNames() const {
-  return _ruleNames;
+    return _ruleNames;
 }
 
 const std::vector<std::string>& PathLexer::getChannelNames() const {
-  return _channelNames;
+    return _channelNames;
 }
 
 const std::vector<std::string>& PathLexer::getModeNames() const {
-  return _modeNames;
+    return _modeNames;
 }
 
 const std::vector<std::string>& PathLexer::getTokenNames() const {
-  return _tokenNames;
+    return _tokenNames;
 }
 
-dfa::Vocabulary& PathLexer::getVocabulary() const {
-  return _vocabulary;
-}
+dfa::Vocabulary& PathLexer::getVocabulary() const { return _vocabulary; }
 
 const std::vector<uint16_t> PathLexer::getSerializedATN() const {
-  return _serializedATN;
+    return _serializedATN;
 }
 
-const atn::ATN& PathLexer::getATN() const {
-  return _atn;
-}
-
-
-
+const atn::ATN& PathLexer::getATN() const { return _atn; }
 
 // Static vars and initialization.
 std::vector<dfa::DFA> PathLexer::_decisionToDFA;
@@ -76,395 +63,591 @@
 atn::ATN PathLexer::_atn;
 std::vector<uint16_t> PathLexer::_serializedATN;
 
-std::vector<std::string> PathLexer::_ruleNames = {
-  "ROOT", "WS", "TIME", "TIMESTAMP", "MINUS", "PLUS", "DIV", "MOD", "OPERATOR_DEQ", 
-  "OPERATOR_SEQ", "OPERATOR_GT", "OPERATOR_GTE", "OPERATOR_LT", "OPERATOR_LTE", 
-  "OPERATOR_NEQ", "OPERATOR_BITWISE_AND", "OPERATOR_LOGICAL_AND", "OPERATOR_BITWISE_OR", 
-  "OPERATOR_LOGICAL_OR", "OPERATOR_NOT", "DOT", "COMMA", "SEMI", "STAR", 
-  "DOUBLE_STAR", "LR_BRACKET", "RR_BRACKET", "LS_BRACKET", "RS_BRACKET", 
-  "DOUBLE_COLON", "STRING_LITERAL", "DURATION_LITERAL", "DATETIME_LITERAL", 
-  "DATE_LITERAL", "TIME_LITERAL", "INTEGER_LITERAL", "EXPONENT_NUM_PART", 
-  "DEC_DIGIT", "ID", "QUOTED_ID", "NAME_CHAR", "CN_CHAR", "DQUOTA_STRING", 
-  "SQUOTA_STRING", "BQUOTA_STRING", "A", "B", "C", "D", "E", "F", "G", "H", 
-  "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", 
-  "W", "X", "Y", "Z"
-};
+std::vector<std::string> PathLexer::_ruleNames = {"ROOT",
+                                                  "WS",
+                                                  "TIME",
+                                                  "TIMESTAMP",
+                                                  "MINUS",
+                                                  "PLUS",
+                                                  "DIV",
+                                                  "MOD",
+                                                  "OPERATOR_DEQ",
+                                                  "OPERATOR_SEQ",
+                                                  "OPERATOR_GT",
+                                                  "OPERATOR_GTE",
+                                                  "OPERATOR_LT",
+                                                  "OPERATOR_LTE",
+                                                  "OPERATOR_NEQ",
+                                                  "OPERATOR_BITWISE_AND",
+                                                  "OPERATOR_LOGICAL_AND",
+                                                  "OPERATOR_BITWISE_OR",
+                                                  "OPERATOR_LOGICAL_OR",
+                                                  "OPERATOR_NOT",
+                                                  "DOT",
+                                                  "COMMA",
+                                                  "SEMI",
+                                                  "STAR",
+                                                  "DOUBLE_STAR",
+                                                  "LR_BRACKET",
+                                                  "RR_BRACKET",
+                                                  "LS_BRACKET",
+                                                  "RS_BRACKET",
+                                                  "DOUBLE_COLON",
+                                                  "STRING_LITERAL",
+                                                  "DURATION_LITERAL",
+                                                  "DATETIME_LITERAL",
+                                                  "DATE_LITERAL",
+                                                  "TIME_LITERAL",
+                                                  "INTEGER_LITERAL",
+                                                  "EXPONENT_NUM_PART",
+                                                  "DEC_DIGIT",
+                                                  "ID",
+                                                  "QUOTED_ID",
+                                                  "NAME_CHAR",
+                                                  "CN_CHAR",
+                                                  "DQUOTA_STRING",
+                                                  "SQUOTA_STRING",
+                                                  "BQUOTA_STRING",
+                                                  "A",
+                                                  "B",
+                                                  "C",
+                                                  "D",
+                                                  "E",
+                                                  "F",
+                                                  "G",
+                                                  "H",
+                                                  "I",
+                                                  "J",
+                                                  "K",
+                                                  "L",
+                                                  "M",
+                                                  "N",
+                                                  "O",
+                                                  "P",
+                                                  "Q",
+                                                  "R",
+                                                  "S",
+                                                  "T",
+                                                  "U",
+                                                  "V",
+                                                  "W",
+                                                  "X",
+                                                  "Y",
+                                                  "Z"};
 
-std::vector<std::string> PathLexer::_channelNames = {
-  "DEFAULT_TOKEN_CHANNEL", "HIDDEN"
-};
+std::vector<std::string> PathLexer::_channelNames = {"DEFAULT_TOKEN_CHANNEL",
+                                                     "HIDDEN"};
 
-std::vector<std::string> PathLexer::_modeNames = {
-  "DEFAULT_MODE"
-};
+std::vector<std::string> PathLexer::_modeNames = {"DEFAULT_MODE"};
 
 std::vector<std::string> PathLexer::_literalNames = {
-  "", "", "", "", "", "'-'", "'+'", "'/'", "'%'", "'=='", "'='", "'>'", 
-  "'>='", "'<'", "'<='", "", "'&'", "'&&'", "'|'", "'||'", "'!'", "'.'", 
-  "','", "';'", "'*'", "'**'", "'('", "')'", "'['", "']'", "'::'"
-};
+    "",    "",     "",    "",     "",     "'-'", "'+'",  "'/'",
+    "'%'", "'=='", "'='", "'>'",  "'>='", "'<'", "'<='", "",
+    "'&'", "'&&'", "'|'", "'||'", "'!'",  "'.'", "','",  "';'",
+    "'*'", "'**'", "'('", "')'",  "'['",  "']'", "'::'"};
 
-std::vector<std::string> PathLexer::_symbolicNames = {
-  "", "ROOT", "WS", "TIME", "TIMESTAMP", "MINUS", "PLUS", "DIV", "MOD", 
-  "OPERATOR_DEQ", "OPERATOR_SEQ", "OPERATOR_GT", "OPERATOR_GTE", "OPERATOR_LT", 
-  "OPERATOR_LTE", "OPERATOR_NEQ", "OPERATOR_BITWISE_AND", "OPERATOR_LOGICAL_AND", 
-  "OPERATOR_BITWISE_OR", "OPERATOR_LOGICAL_OR", "OPERATOR_NOT", "DOT", "COMMA", 
-  "SEMI", "STAR", "DOUBLE_STAR", "LR_BRACKET", "RR_BRACKET", "LS_BRACKET", 
-  "RS_BRACKET", "DOUBLE_COLON", "STRING_LITERAL", "DURATION_LITERAL", "DATETIME_LITERAL", 
-  "INTEGER_LITERAL", "EXPONENT_NUM_PART", "ID", "QUOTED_ID"
-};
+std::vector<std::string> PathLexer::_symbolicNames = {"",
+                                                      "ROOT",
+                                                      "WS",
+                                                      "TIME",
+                                                      "TIMESTAMP",
+                                                      "MINUS",
+                                                      "PLUS",
+                                                      "DIV",
+                                                      "MOD",
+                                                      "OPERATOR_DEQ",
+                                                      "OPERATOR_SEQ",
+                                                      "OPERATOR_GT",
+                                                      "OPERATOR_GTE",
+                                                      "OPERATOR_LT",
+                                                      "OPERATOR_LTE",
+                                                      "OPERATOR_NEQ",
+                                                      "OPERATOR_BITWISE_AND",
+                                                      "OPERATOR_LOGICAL_AND",
+                                                      "OPERATOR_BITWISE_OR",
+                                                      "OPERATOR_LOGICAL_OR",
+                                                      "OPERATOR_NOT",
+                                                      "DOT",
+                                                      "COMMA",
+                                                      "SEMI",
+                                                      "STAR",
+                                                      "DOUBLE_STAR",
+                                                      "LR_BRACKET",
+                                                      "RR_BRACKET",
+                                                      "LS_BRACKET",
+                                                      "RS_BRACKET",
+                                                      "DOUBLE_COLON",
+                                                      "STRING_LITERAL",
+                                                      "DURATION_LITERAL",
+                                                      "DATETIME_LITERAL",
+                                                      "INTEGER_LITERAL",
+                                                      "EXPONENT_NUM_PART",
+                                                      "ID",
+                                                      "QUOTED_ID"};
 
 dfa::Vocabulary PathLexer::_vocabulary(_literalNames, _symbolicNames);
 
 std::vector<std::string> PathLexer::_tokenNames;
 
 PathLexer::Initializer::Initializer() {
-  // This code could be in a static initializer lambda, but VS doesn't allow access to private class members from there.
-	for (size_t i = 0; i < _symbolicNames.size(); ++i) {
-		std::string name = _vocabulary.getLiteralName(i);
-		if (name.empty()) {
-			name = _vocabulary.getSymbolicName(i);
-		}
+    // This code could be in a static initializer lambda, but VS doesn't allow
+    // access to private class members from there.
+    for (size_t i = 0; i < _symbolicNames.size(); ++i) {
+        std::string name = _vocabulary.getLiteralName(i);
+        if (name.empty()) {
+            name = _vocabulary.getSymbolicName(i);
+        }
 
-		if (name.empty()) {
-			_tokenNames.push_back("<INVALID>");
-		} else {
-      _tokenNames.push_back(name);
+        if (name.empty()) {
+            _tokenNames.push_back("<INVALID>");
+        } else {
+            _tokenNames.push_back(name);
+        }
     }
-	}
 
-  static const uint16_t serializedATNSegment0[] = {
-    0x3, 0x608b, 0xa72a, 0x8133, 0xb9ed, 0x417c, 0x3be7, 0x7786, 0x5964, 
-       0x2, 0x27, 0x1b0, 0x8, 0x1, 0x4, 0x2, 0x9, 0x2, 0x4, 0x3, 0x9, 0x3, 
-       0x4, 0x4, 0x9, 0x4, 0x4, 0x5, 0x9, 0x5, 0x4, 0x6, 0x9, 0x6, 0x4, 
-       0x7, 0x9, 0x7, 0x4, 0x8, 0x9, 0x8, 0x4, 0x9, 0x9, 0x9, 0x4, 0xa, 
-       0x9, 0xa, 0x4, 0xb, 0x9, 0xb, 0x4, 0xc, 0x9, 0xc, 0x4, 0xd, 0x9, 
-       0xd, 0x4, 0xe, 0x9, 0xe, 0x4, 0xf, 0x9, 0xf, 0x4, 0x10, 0x9, 0x10, 
-       0x4, 0x11, 0x9, 0x11, 0x4, 0x12, 0x9, 0x12, 0x4, 0x13, 0x9, 0x13, 
-       0x4, 0x14, 0x9, 0x14, 0x4, 0x15, 0x9, 0x15, 0x4, 0x16, 0x9, 0x16, 
-       0x4, 0x17, 0x9, 0x17, 0x4, 0x18, 0x9, 0x18, 0x4, 0x19, 0x9, 0x19, 
-       0x4, 0x1a, 0x9, 0x1a, 0x4, 0x1b, 0x9, 0x1b, 0x4, 0x1c, 0x9, 0x1c, 
-       0x4, 0x1d, 0x9, 0x1d, 0x4, 0x1e, 0x9, 0x1e, 0x4, 0x1f, 0x9, 0x1f, 
-       0x4, 0x20, 0x9, 0x20, 0x4, 0x21, 0x9, 0x21, 0x4, 0x22, 0x9, 0x22, 
-       0x4, 0x23, 0x9, 0x23, 0x4, 0x24, 0x9, 0x24, 0x4, 0x25, 0x9, 0x25, 
-       0x4, 0x26, 0x9, 0x26, 0x4, 0x27, 0x9, 0x27, 0x4, 0x28, 0x9, 0x28, 
-       0x4, 0x29, 0x9, 0x29, 0x4, 0x2a, 0x9, 0x2a, 0x4, 0x2b, 0x9, 0x2b, 
-       0x4, 0x2c, 0x9, 0x2c, 0x4, 0x2d, 0x9, 0x2d, 0x4, 0x2e, 0x9, 0x2e, 
-       0x4, 0x2f, 0x9, 0x2f, 0x4, 0x30, 0x9, 0x30, 0x4, 0x31, 0x9, 0x31, 
-       0x4, 0x32, 0x9, 0x32, 0x4, 0x33, 0x9, 0x33, 0x4, 0x34, 0x9, 0x34, 
-       0x4, 0x35, 0x9, 0x35, 0x4, 0x36, 0x9, 0x36, 0x4, 0x37, 0x9, 0x37, 
-       0x4, 0x38, 0x9, 0x38, 0x4, 0x39, 0x9, 0x39, 0x4, 0x3a, 0x9, 0x3a, 
-       0x4, 0x3b, 0x9, 0x3b, 0x4, 0x3c, 0x9, 0x3c, 0x4, 0x3d, 0x9, 0x3d, 
-       0x4, 0x3e, 0x9, 0x3e, 0x4, 0x3f, 0x9, 0x3f, 0x4, 0x40, 0x9, 0x40, 
-       0x4, 0x41, 0x9, 0x41, 0x4, 0x42, 0x9, 0x42, 0x4, 0x43, 0x9, 0x43, 
-       0x4, 0x44, 0x9, 0x44, 0x4, 0x45, 0x9, 0x45, 0x4, 0x46, 0x9, 0x46, 
-       0x4, 0x47, 0x9, 0x47, 0x4, 0x48, 0x9, 0x48, 0x3, 0x2, 0x3, 0x2, 0x3, 
-       0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x3, 0x6, 0x3, 0x98, 0xa, 0x3, 0xd, 
-       0x3, 0xe, 0x3, 0x99, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x4, 0x3, 
-       0x4, 0x3, 0x4, 0x3, 0x4, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 
-       0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 0x5, 0x3, 
-       0x6, 0x3, 0x6, 0x3, 0x7, 0x3, 0x7, 0x3, 0x8, 0x3, 0x8, 0x3, 0x9, 
-       0x3, 0x9, 0x3, 0xa, 0x3, 0xa, 0x3, 0xa, 0x3, 0xb, 0x3, 0xb, 0x3, 
-       0xc, 0x3, 0xc, 0x3, 0xd, 0x3, 0xd, 0x3, 0xd, 0x3, 0xe, 0x3, 0xe, 
-       0x3, 0xf, 0x3, 0xf, 0x3, 0xf, 0x3, 0x10, 0x3, 0x10, 0x3, 0x10, 0x3, 
-       0x10, 0x5, 0x10, 0xc8, 0xa, 0x10, 0x3, 0x11, 0x3, 0x11, 0x3, 0x12, 
-       0x3, 0x12, 0x3, 0x12, 0x3, 0x13, 0x3, 0x13, 0x3, 0x14, 0x3, 0x14, 
-       0x3, 0x14, 0x3, 0x15, 0x3, 0x15, 0x3, 0x16, 0x3, 0x16, 0x3, 0x17, 
-       0x3, 0x17, 0x3, 0x18, 0x3, 0x18, 0x3, 0x19, 0x3, 0x19, 0x3, 0x1a, 
-       0x3, 0x1a, 0x3, 0x1a, 0x3, 0x1b, 0x3, 0x1b, 0x3, 0x1c, 0x3, 0x1c, 
-       0x3, 0x1d, 0x3, 0x1d, 0x3, 0x1e, 0x3, 0x1e, 0x3, 0x1f, 0x3, 0x1f, 
-       0x3, 0x1f, 0x3, 0x20, 0x3, 0x20, 0x5, 0x20, 0xee, 0xa, 0x20, 0x3, 
-       0x21, 0x6, 0x21, 0xf1, 0xa, 0x21, 0xd, 0x21, 0xe, 0x21, 0xf2, 0x3, 
-       0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 
-       0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 
-       0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x3, 0x21, 0x5, 
-       0x21, 0x107, 0xa, 0x21, 0x6, 0x21, 0x109, 0xa, 0x21, 0xd, 0x21, 0xe, 
-       0x21, 0x10a, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x5, 0x22, 0x110, 0xa, 
-       0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 0x22, 0x3, 
-       0x22, 0x5, 0x22, 0x118, 0xa, 0x22, 0x5, 0x22, 0x11a, 0xa, 0x22, 0x3, 
-       0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 
-       0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 
-       0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x3, 0x23, 0x5, 
-       0x23, 0x12e, 0xa, 0x23, 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 
-       0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x3, 0x24, 0x5, 0x24, 0x138, 0xa, 
-       0x24, 0x3, 0x25, 0x6, 0x25, 0x13b, 0xa, 0x25, 0xd, 0x25, 0xe, 0x25, 
-       0x13c, 0x3, 0x26, 0x6, 0x26, 0x140, 0xa, 0x26, 0xd, 0x26, 0xe, 0x26, 
-       0x141, 0x3, 0x26, 0x3, 0x26, 0x5, 0x26, 0x146, 0xa, 0x26, 0x3, 0x26, 
-       0x6, 0x26, 0x149, 0xa, 0x26, 0xd, 0x26, 0xe, 0x26, 0x14a, 0x3, 0x27, 
-       0x3, 0x27, 0x3, 0x28, 0x6, 0x28, 0x150, 0xa, 0x28, 0xd, 0x28, 0xe, 
-       0x28, 0x151, 0x3, 0x29, 0x3, 0x29, 0x3, 0x2a, 0x3, 0x2a, 0x5, 0x2a, 
-       0x158, 0xa, 0x2a, 0x3, 0x2b, 0x3, 0x2b, 0x3, 0x2c, 0x3, 0x2c, 0x3, 
-       0x2c, 0x3, 0x2c, 0x7, 0x2c, 0x160, 0xa, 0x2c, 0xc, 0x2c, 0xe, 0x2c, 
-       0x163, 0xb, 0x2c, 0x3, 0x2c, 0x3, 0x2c, 0x3, 0x2d, 0x3, 0x2d, 0x3, 
-       0x2d, 0x3, 0x2d, 0x7, 0x2d, 0x16b, 0xa, 0x2d, 0xc, 0x2d, 0xe, 0x2d, 
-       0x16e, 0xb, 0x2d, 0x3, 0x2d, 0x3, 0x2d, 0x3, 0x2e, 0x3, 0x2e, 0x3, 
-       0x2e, 0x3, 0x2e, 0x7, 0x2e, 0x176, 0xa, 0x2e, 0xc, 0x2e, 0xe, 0x2e, 
-       0x179, 0xb, 0x2e, 0x3, 0x2e, 0x3, 0x2e, 0x3, 0x2f, 0x3, 0x2f, 0x3, 
-       0x30, 0x3, 0x30, 0x3, 0x31, 0x3, 0x31, 0x3, 0x32, 0x3, 0x32, 0x3, 
-       0x33, 0x3, 0x33, 0x3, 0x34, 0x3, 0x34, 0x3, 0x35, 0x3, 0x35, 0x3, 
-       0x36, 0x3, 0x36, 0x3, 0x37, 0x3, 0x37, 0x3, 0x38, 0x3, 0x38, 0x3, 
-       0x39, 0x3, 0x39, 0x3, 0x3a, 0x3, 0x3a, 0x3, 0x3b, 0x3, 0x3b, 0x3, 
-       0x3c, 0x3, 0x3c, 0x3, 0x3d, 0x3, 0x3d, 0x3, 0x3e, 0x3, 0x3e, 0x3, 
-       0x3f, 0x3, 0x3f, 0x3, 0x40, 0x3, 0x40, 0x3, 0x41, 0x3, 0x41, 0x3, 
-       0x42, 0x3, 0x42, 0x3, 0x43, 0x3, 0x43, 0x3, 0x44, 0x3, 0x44, 0x3, 
-       0x45, 0x3, 0x45, 0x3, 0x46, 0x3, 0x46, 0x3, 0x47, 0x3, 0x47, 0x3, 
-       0x48, 0x3, 0x48, 0x2, 0x2, 0x49, 0x3, 0x3, 0x5, 0x4, 0x7, 0x5, 0x9, 
-       0x6, 0xb, 0x7, 0xd, 0x8, 0xf, 0x9, 0x11, 0xa, 0x13, 0xb, 0x15, 0xc, 
-       0x17, 0xd, 0x19, 0xe, 0x1b, 0xf, 0x1d, 0x10, 0x1f, 0x11, 0x21, 0x12, 
-       0x23, 0x13, 0x25, 0x14, 0x27, 0x15, 0x29, 0x16, 0x2b, 0x17, 0x2d, 
-       0x18, 0x2f, 0x19, 0x31, 0x1a, 0x33, 0x1b, 0x35, 0x1c, 0x37, 0x1d, 
-       0x39, 0x1e, 0x3b, 0x1f, 0x3d, 0x20, 0x3f, 0x21, 0x41, 0x22, 0x43, 
-       0x23, 0x45, 0x2, 0x47, 0x2, 0x49, 0x24, 0x4b, 0x25, 0x4d, 0x2, 0x4f, 
-       0x26, 0x51, 0x27, 0x53, 0x2, 0x55, 0x2, 0x57, 0x2, 0x59, 0x2, 0x5b, 
-       0x2, 0x5d, 0x2, 0x5f, 0x2, 0x61, 0x2, 0x63, 0x2, 0x65, 0x2, 0x67, 
-       0x2, 0x69, 0x2, 0x6b, 0x2, 0x6d, 0x2, 0x6f, 0x2, 0x71, 0x2, 0x73, 
-       0x2, 0x75, 0x2, 0x77, 0x2, 0x79, 0x2, 0x7b, 0x2, 0x7d, 0x2, 0x7f, 
-       0x2, 0x81, 0x2, 0x83, 0x2, 0x85, 0x2, 0x87, 0x2, 0x89, 0x2, 0x8b, 
-       0x2, 0x8d, 0x2, 0x8f, 0x2, 0x3, 0x2, 0x23, 0x5, 0x2, 0xb, 0xd, 0xf, 
-       0xf, 0x22, 0x22, 0x4, 0x2, 0x2d, 0x2d, 0x2f, 0x2f, 0x4, 0x2, 0x47, 
-       0x47, 0x67, 0x67, 0x3, 0x2, 0x32, 0x3b, 0x8, 0x2, 0x25, 0x26, 0x32, 
-       0x3c, 0x42, 0x5c, 0x61, 0x61, 0x63, 0x7d, 0x7f, 0x7f, 0x3, 0x2, 0x24, 
-       0x24, 0x3, 0x2, 0x29, 0x29, 0x3, 0x2, 0x62, 0x62, 0x4, 0x2, 0x43, 
-       0x43, 0x63, 0x63, 0x4, 0x2, 0x44, 0x44, 0x64, 0x64, 0x4, 0x2, 0x45, 
-       0x45, 0x65, 0x65, 0x4, 0x2, 0x46, 0x46, 0x66, 0x66, 0x4, 0x2, 0x48, 
-       0x48, 0x68, 0x68, 0x4, 0x2, 0x49, 0x49, 0x69, 0x69, 0x4, 0x2, 0x4a, 
-       0x4a, 0x6a, 0x6a, 0x4, 0x2, 0x4b, 0x4b, 0x6b, 0x6b, 0x4, 0x2, 0x4c, 
-       0x4c, 0x6c, 0x6c, 0x4, 0x2, 0x4d, 0x4d, 0x6d, 0x6d, 0x4, 0x2, 0x4e, 
-       0x4e, 0x6e, 0x6e, 0x4, 0x2, 0x4f, 0x4f, 0x6f, 0x6f, 0x4, 0x2, 0x50, 
-       0x50, 0x70, 0x70, 0x4, 0x2, 0x51, 0x51, 0x71, 0x71, 0x4, 0x2, 0x52, 
-       0x52, 0x72, 0x72, 0x4, 0x2, 0x53, 0x53, 0x73, 0x73, 0x4, 0x2, 0x54, 
-       0x54, 0x74, 0x74, 0x4, 0x2, 0x55, 0x55, 0x75, 0x75, 0x4, 0x2, 0x56, 
-       0x56, 0x76, 0x76, 0x4, 0x2, 0x57, 0x57, 0x77, 0x77, 0x4, 0x2, 0x58, 
-       0x58, 0x78, 0x78, 0x4, 0x2, 0x59, 0x59, 0x79, 0x79, 0x4, 0x2, 0x5a, 
-       0x5a, 0x7a, 0x7a, 0x4, 0x2, 0x5b, 0x5b, 0x7b, 0x7b, 0x4, 0x2, 0x5c, 
-       0x5c, 0x7c, 0x7c, 0x2, 0x1ad, 0x2, 0x3, 0x3, 0x2, 0x2, 0x2, 0x2, 
-       0x5, 0x3, 0x2, 0x2, 0x2, 0x2, 0x7, 0x3, 0x2, 0x2, 0x2, 0x2, 0x9, 
-       0x3, 0x2, 0x2, 0x2, 0x2, 0xb, 0x3, 0x2, 0x2, 0x2, 0x2, 0xd, 0x3, 
-       0x2, 0x2, 0x2, 0x2, 0xf, 0x3, 0x2, 0x2, 0x2, 0x2, 0x11, 0x3, 0x2, 
-       0x2, 0x2, 0x2, 0x13, 0x3, 0x2, 0x2, 0x2, 0x2, 0x15, 0x3, 0x2, 0x2, 
-       0x2, 0x2, 0x17, 0x3, 0x2, 0x2, 0x2, 0x2, 0x19, 0x3, 0x2, 0x2, 0x2, 
-       0x2, 0x1b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x1d, 0x3, 0x2, 0x2, 0x2, 0x2, 
-       0x1f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x21, 0x3, 0x2, 0x2, 0x2, 0x2, 0x23, 
-       0x3, 0x2, 0x2, 0x2, 0x2, 0x25, 0x3, 0x2, 0x2, 0x2, 0x2, 0x27, 0x3, 
-       0x2, 0x2, 0x2, 0x2, 0x29, 0x3, 0x2, 0x2, 0x2, 0x2, 0x2b, 0x3, 0x2, 
-       0x2, 0x2, 0x2, 0x2d, 0x3, 0x2, 0x2, 0x2, 0x2, 0x2f, 0x3, 0x2, 0x2, 
-       0x2, 0x2, 0x31, 0x3, 0x2, 0x2, 0x2, 0x2, 0x33, 0x3, 0x2, 0x2, 0x2, 
-       0x2, 0x35, 0x3, 0x2, 0x2, 0x2, 0x2, 0x37, 0x3, 0x2, 0x2, 0x2, 0x2, 
-       0x39, 0x3, 0x2, 0x2, 0x2, 0x2, 0x3b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x3d, 
-       0x3, 0x2, 0x2, 0x2, 0x2, 0x3f, 0x3, 0x2, 0x2, 0x2, 0x2, 0x41, 0x3, 
-       0x2, 0x2, 0x2, 0x2, 0x43, 0x3, 0x2, 0x2, 0x2, 0x2, 0x49, 0x3, 0x2, 
-       0x2, 0x2, 0x2, 0x4b, 0x3, 0x2, 0x2, 0x2, 0x2, 0x4f, 0x3, 0x2, 0x2, 
-       0x2, 0x2, 0x51, 0x3, 0x2, 0x2, 0x2, 0x3, 0x91, 0x3, 0x2, 0x2, 0x2, 
-       0x5, 0x97, 0x3, 0x2, 0x2, 0x2, 0x7, 0x9d, 0x3, 0x2, 0x2, 0x2, 0x9, 
-       0xa2, 0x3, 0x2, 0x2, 0x2, 0xb, 0xac, 0x3, 0x2, 0x2, 0x2, 0xd, 0xae, 
-       0x3, 0x2, 0x2, 0x2, 0xf, 0xb0, 0x3, 0x2, 0x2, 0x2, 0x11, 0xb2, 0x3, 
-       0x2, 0x2, 0x2, 0x13, 0xb4, 0x3, 0x2, 0x2, 0x2, 0x15, 0xb7, 0x3, 0x2, 
-       0x2, 0x2, 0x17, 0xb9, 0x3, 0x2, 0x2, 0x2, 0x19, 0xbb, 0x3, 0x2, 0x2, 
-       0x2, 0x1b, 0xbe, 0x3, 0x2, 0x2, 0x2, 0x1d, 0xc0, 0x3, 0x2, 0x2, 0x2, 
-       0x1f, 0xc7, 0x3, 0x2, 0x2, 0x2, 0x21, 0xc9, 0x3, 0x2, 0x2, 0x2, 0x23, 
-       0xcb, 0x3, 0x2, 0x2, 0x2, 0x25, 0xce, 0x3, 0x2, 0x2, 0x2, 0x27, 0xd0, 
-       0x3, 0x2, 0x2, 0x2, 0x29, 0xd3, 0x3, 0x2, 0x2, 0x2, 0x2b, 0xd5, 0x3, 
-       0x2, 0x2, 0x2, 0x2d, 0xd7, 0x3, 0x2, 0x2, 0x2, 0x2f, 0xd9, 0x3, 0x2, 
-       0x2, 0x2, 0x31, 0xdb, 0x3, 0x2, 0x2, 0x2, 0x33, 0xdd, 0x3, 0x2, 0x2, 
-       0x2, 0x35, 0xe0, 0x3, 0x2, 0x2, 0x2, 0x37, 0xe2, 0x3, 0x2, 0x2, 0x2, 
-       0x39, 0xe4, 0x3, 0x2, 0x2, 0x2, 0x3b, 0xe6, 0x3, 0x2, 0x2, 0x2, 0x3d, 
-       0xe8, 0x3, 0x2, 0x2, 0x2, 0x3f, 0xed, 0x3, 0x2, 0x2, 0x2, 0x41, 0x108, 
-       0x3, 0x2, 0x2, 0x2, 0x43, 0x10c, 0x3, 0x2, 0x2, 0x2, 0x45, 0x12d, 
-       0x3, 0x2, 0x2, 0x2, 0x47, 0x12f, 0x3, 0x2, 0x2, 0x2, 0x49, 0x13a, 
-       0x3, 0x2, 0x2, 0x2, 0x4b, 0x13f, 0x3, 0x2, 0x2, 0x2, 0x4d, 0x14c, 
-       0x3, 0x2, 0x2, 0x2, 0x4f, 0x14f, 0x3, 0x2, 0x2, 0x2, 0x51, 0x153, 
-       0x3, 0x2, 0x2, 0x2, 0x53, 0x157, 0x3, 0x2, 0x2, 0x2, 0x55, 0x159, 
-       0x3, 0x2, 0x2, 0x2, 0x57, 0x15b, 0x3, 0x2, 0x2, 0x2, 0x59, 0x166, 
-       0x3, 0x2, 0x2, 0x2, 0x5b, 0x171, 0x3, 0x2, 0x2, 0x2, 0x5d, 0x17c, 
-       0x3, 0x2, 0x2, 0x2, 0x5f, 0x17e, 0x3, 0x2, 0x2, 0x2, 0x61, 0x180, 
-       0x3, 0x2, 0x2, 0x2, 0x63, 0x182, 0x3, 0x2, 0x2, 0x2, 0x65, 0x184, 
-       0x3, 0x2, 0x2, 0x2, 0x67, 0x186, 0x3, 0x2, 0x2, 0x2, 0x69, 0x188, 
-       0x3, 0x2, 0x2, 0x2, 0x6b, 0x18a, 0x3, 0x2, 0x2, 0x2, 0x6d, 0x18c, 
-       0x3, 0x2, 0x2, 0x2, 0x6f, 0x18e, 0x3, 0x2, 0x2, 0x2, 0x71, 0x190, 
-       0x3, 0x2, 0x2, 0x2, 0x73, 0x192, 0x3, 0x2, 0x2, 0x2, 0x75, 0x194, 
-       0x3, 0x2, 0x2, 0x2, 0x77, 0x196, 0x3, 0x2, 0x2, 0x2, 0x79, 0x198, 
-       0x3, 0x2, 0x2, 0x2, 0x7b, 0x19a, 0x3, 0x2, 0x2, 0x2, 0x7d, 0x19c, 
-       0x3, 0x2, 0x2, 0x2, 0x7f, 0x19e, 0x3, 0x2, 0x2, 0x2, 0x81, 0x1a0, 
-       0x3, 0x2, 0x2, 0x2, 0x83, 0x1a2, 0x3, 0x2, 0x2, 0x2, 0x85, 0x1a4, 
-       0x3, 0x2, 0x2, 0x2, 0x87, 0x1a6, 0x3, 0x2, 0x2, 0x2, 0x89, 0x1a8, 
-       0x3, 0x2, 0x2, 0x2, 0x8b, 0x1aa, 0x3, 0x2, 0x2, 0x2, 0x8d, 0x1ac, 
-       0x3, 0x2, 0x2, 0x2, 0x8f, 0x1ae, 0x3, 0x2, 0x2, 0x2, 0x91, 0x92, 
-       0x5, 0x7f, 0x40, 0x2, 0x92, 0x93, 0x5, 0x79, 0x3d, 0x2, 0x93, 0x94, 
-       0x5, 0x79, 0x3d, 0x2, 0x94, 0x95, 0x5, 0x83, 0x42, 0x2, 0x95, 0x4, 
-       0x3, 0x2, 0x2, 0x2, 0x96, 0x98, 0x9, 0x2, 0x2, 0x2, 0x97, 0x96, 0x3, 
-       0x2, 0x2, 0x2, 0x98, 0x99, 0x3, 0x2, 0x2, 0x2, 0x99, 0x97, 0x3, 0x2, 
-       0x2, 0x2, 0x99, 0x9a, 0x3, 0x2, 0x2, 0x2, 0x9a, 0x9b, 0x3, 0x2, 0x2, 
-       0x2, 0x9b, 0x9c, 0x8, 0x3, 0x2, 0x2, 0x9c, 0x6, 0x3, 0x2, 0x2, 0x2, 
-       0x9d, 0x9e, 0x5, 0x83, 0x42, 0x2, 0x9e, 0x9f, 0x5, 0x6d, 0x37, 0x2, 
-       0x9f, 0xa0, 0x5, 0x75, 0x3b, 0x2, 0xa0, 0xa1, 0x5, 0x65, 0x33, 0x2, 
-       0xa1, 0x8, 0x3, 0x2, 0x2, 0x2, 0xa2, 0xa3, 0x5, 0x83, 0x42, 0x2, 
-       0xa3, 0xa4, 0x5, 0x6d, 0x37, 0x2, 0xa4, 0xa5, 0x5, 0x75, 0x3b, 0x2, 
-       0xa5, 0xa6, 0x5, 0x65, 0x33, 0x2, 0xa6, 0xa7, 0x5, 0x81, 0x41, 0x2, 
-       0xa7, 0xa8, 0x5, 0x83, 0x42, 0x2, 0xa8, 0xa9, 0x5, 0x5d, 0x2f, 0x2, 
-       0xa9, 0xaa, 0x5, 0x75, 0x3b, 0x2, 0xaa, 0xab, 0x5, 0x7b, 0x3e, 0x2, 
-       0xab, 0xa, 0x3, 0x2, 0x2, 0x2, 0xac, 0xad, 0x7, 0x2f, 0x2, 0x2, 0xad, 
-       0xc, 0x3, 0x2, 0x2, 0x2, 0xae, 0xaf, 0x7, 0x2d, 0x2, 0x2, 0xaf, 0xe, 
-       0x3, 0x2, 0x2, 0x2, 0xb0, 0xb1, 0x7, 0x31, 0x2, 0x2, 0xb1, 0x10, 
-       0x3, 0x2, 0x2, 0x2, 0xb2, 0xb3, 0x7, 0x27, 0x2, 0x2, 0xb3, 0x12, 
-       0x3, 0x2, 0x2, 0x2, 0xb4, 0xb5, 0x7, 0x3f, 0x2, 0x2, 0xb5, 0xb6, 
-       0x7, 0x3f, 0x2, 0x2, 0xb6, 0x14, 0x3, 0x2, 0x2, 0x2, 0xb7, 0xb8, 
-       0x7, 0x3f, 0x2, 0x2, 0xb8, 0x16, 0x3, 0x2, 0x2, 0x2, 0xb9, 0xba, 
-       0x7, 0x40, 0x2, 0x2, 0xba, 0x18, 0x3, 0x2, 0x2, 0x2, 0xbb, 0xbc, 
-       0x7, 0x40, 0x2, 0x2, 0xbc, 0xbd, 0x7, 0x3f, 0x2, 0x2, 0xbd, 0x1a, 
-       0x3, 0x2, 0x2, 0x2, 0xbe, 0xbf, 0x7, 0x3e, 0x2, 0x2, 0xbf, 0x1c, 
-       0x3, 0x2, 0x2, 0x2, 0xc0, 0xc1, 0x7, 0x3e, 0x2, 0x2, 0xc1, 0xc2, 
-       0x7, 0x3f, 0x2, 0x2, 0xc2, 0x1e, 0x3, 0x2, 0x2, 0x2, 0xc3, 0xc4, 
-       0x7, 0x23, 0x2, 0x2, 0xc4, 0xc8, 0x7, 0x3f, 0x2, 0x2, 0xc5, 0xc6, 
-       0x7, 0x3e, 0x2, 0x2, 0xc6, 0xc8, 0x7, 0x40, 0x2, 0x2, 0xc7, 0xc3, 
-       0x3, 0x2, 0x2, 0x2, 0xc7, 0xc5, 0x3, 0x2, 0x2, 0x2, 0xc8, 0x20, 0x3, 
-       0x2, 0x2, 0x2, 0xc9, 0xca, 0x7, 0x28, 0x2, 0x2, 0xca, 0x22, 0x3, 
-       0x2, 0x2, 0x2, 0xcb, 0xcc, 0x7, 0x28, 0x2, 0x2, 0xcc, 0xcd, 0x7, 
-       0x28, 0x2, 0x2, 0xcd, 0x24, 0x3, 0x2, 0x2, 0x2, 0xce, 0xcf, 0x7, 
-       0x7e, 0x2, 0x2, 0xcf, 0x26, 0x3, 0x2, 0x2, 0x2, 0xd0, 0xd1, 0x7, 
-       0x7e, 0x2, 0x2, 0xd1, 0xd2, 0x7, 0x7e, 0x2, 0x2, 0xd2, 0x28, 0x3, 
-       0x2, 0x2, 0x2, 0xd3, 0xd4, 0x7, 0x23, 0x2, 0x2, 0xd4, 0x2a, 0x3, 
-       0x2, 0x2, 0x2, 0xd5, 0xd6, 0x7, 0x30, 0x2, 0x2, 0xd6, 0x2c, 0x3, 
-       0x2, 0x2, 0x2, 0xd7, 0xd8, 0x7, 0x2e, 0x2, 0x2, 0xd8, 0x2e, 0x3, 
-       0x2, 0x2, 0x2, 0xd9, 0xda, 0x7, 0x3d, 0x2, 0x2, 0xda, 0x30, 0x3, 
-       0x2, 0x2, 0x2, 0xdb, 0xdc, 0x7, 0x2c, 0x2, 0x2, 0xdc, 0x32, 0x3, 
-       0x2, 0x2, 0x2, 0xdd, 0xde, 0x7, 0x2c, 0x2, 0x2, 0xde, 0xdf, 0x7, 
-       0x2c, 0x2, 0x2, 0xdf, 0x34, 0x3, 0x2, 0x2, 0x2, 0xe0, 0xe1, 0x7, 
-       0x2a, 0x2, 0x2, 0xe1, 0x36, 0x3, 0x2, 0x2, 0x2, 0xe2, 0xe3, 0x7, 
-       0x2b, 0x2, 0x2, 0xe3, 0x38, 0x3, 0x2, 0x2, 0x2, 0xe4, 0xe5, 0x7, 
-       0x5d, 0x2, 0x2, 0xe5, 0x3a, 0x3, 0x2, 0x2, 0x2, 0xe6, 0xe7, 0x7, 
-       0x5f, 0x2, 0x2, 0xe7, 0x3c, 0x3, 0x2, 0x2, 0x2, 0xe8, 0xe9, 0x7, 
-       0x3c, 0x2, 0x2, 0xe9, 0xea, 0x7, 0x3c, 0x2, 0x2, 0xea, 0x3e, 0x3, 
-       0x2, 0x2, 0x2, 0xeb, 0xee, 0x5, 0x57, 0x2c, 0x2, 0xec, 0xee, 0x5, 
-       0x59, 0x2d, 0x2, 0xed, 0xeb, 0x3, 0x2, 0x2, 0x2, 0xed, 0xec, 0x3, 
-       0x2, 0x2, 0x2, 0xee, 0x40, 0x3, 0x2, 0x2, 0x2, 0xef, 0xf1, 0x5, 0x49, 
-       0x25, 0x2, 0xf0, 0xef, 0x3, 0x2, 0x2, 0x2, 0xf1, 0xf2, 0x3, 0x2, 
-       0x2, 0x2, 0xf2, 0xf0, 0x3, 0x2, 0x2, 0x2, 0xf2, 0xf3, 0x3, 0x2, 0x2, 
-       0x2, 0xf3, 0x106, 0x3, 0x2, 0x2, 0x2, 0xf4, 0x107, 0x5, 0x8d, 0x47, 
-       0x2, 0xf5, 0xf6, 0x5, 0x75, 0x3b, 0x2, 0xf6, 0xf7, 0x5, 0x79, 0x3d, 
-       0x2, 0xf7, 0x107, 0x3, 0x2, 0x2, 0x2, 0xf8, 0x107, 0x5, 0x89, 0x45, 
-       0x2, 0xf9, 0x107, 0x5, 0x63, 0x32, 0x2, 0xfa, 0x107, 0x5, 0x6b, 0x36, 
-       0x2, 0xfb, 0x107, 0x5, 0x75, 0x3b, 0x2, 0xfc, 0x107, 0x5, 0x81, 0x41, 
-       0x2, 0xfd, 0xfe, 0x5, 0x75, 0x3b, 0x2, 0xfe, 0xff, 0x5, 0x81, 0x41, 
-       0x2, 0xff, 0x107, 0x3, 0x2, 0x2, 0x2, 0x100, 0x101, 0x5, 0x85, 0x43, 
-       0x2, 0x101, 0x102, 0x5, 0x81, 0x41, 0x2, 0x102, 0x107, 0x3, 0x2, 
-       0x2, 0x2, 0x103, 0x104, 0x5, 0x77, 0x3c, 0x2, 0x104, 0x105, 0x5, 
-       0x81, 0x41, 0x2, 0x105, 0x107, 0x3, 0x2, 0x2, 0x2, 0x106, 0xf4, 0x3, 
-       0x2, 0x2, 0x2, 0x106, 0xf5, 0x3, 0x2, 0x2, 0x2, 0x106, 0xf8, 0x3, 
-       0x2, 0x2, 0x2, 0x106, 0xf9, 0x3, 0x2, 0x2, 0x2, 0x106, 0xfa, 0x3, 
-       0x2, 0x2, 0x2, 0x106, 0xfb, 0x3, 0x2, 0x2, 0x2, 0x106, 0xfc, 0x3, 
-       0x2, 0x2, 0x2, 0x106, 0xfd, 0x3, 0x2, 0x2, 0x2, 0x106, 0x100, 0x3, 
-       0x2, 0x2, 0x2, 0x106, 0x103, 0x3, 0x2, 0x2, 0x2, 0x107, 0x109, 0x3, 
-       0x2, 0x2, 0x2, 0x108, 0xf0, 0x3, 0x2, 0x2, 0x2, 0x109, 0x10a, 0x3, 
-       0x2, 0x2, 0x2, 0x10a, 0x108, 0x3, 0x2, 0x2, 0x2, 0x10a, 0x10b, 0x3, 
-       0x2, 0x2, 0x2, 0x10b, 0x42, 0x3, 0x2, 0x2, 0x2, 0x10c, 0x119, 0x5, 
-       0x45, 0x23, 0x2, 0x10d, 0x110, 0x5, 0x83, 0x42, 0x2, 0x10e, 0x110, 
-       0x5, 0x5, 0x3, 0x2, 0x10f, 0x10d, 0x3, 0x2, 0x2, 0x2, 0x10f, 0x10e, 
-       0x3, 0x2, 0x2, 0x2, 0x110, 0x111, 0x3, 0x2, 0x2, 0x2, 0x111, 0x117, 
-       0x5, 0x47, 0x24, 0x2, 0x112, 0x113, 0x9, 0x3, 0x2, 0x2, 0x113, 0x114, 
-       0x5, 0x49, 0x25, 0x2, 0x114, 0x115, 0x7, 0x3c, 0x2, 0x2, 0x115, 0x116, 
-       0x5, 0x49, 0x25, 0x2, 0x116, 0x118, 0x3, 0x2, 0x2, 0x2, 0x117, 0x112, 
-       0x3, 0x2, 0x2, 0x2, 0x117, 0x118, 0x3, 0x2, 0x2, 0x2, 0x118, 0x11a, 
-       0x3, 0x2, 0x2, 0x2, 0x119, 0x10f, 0x3, 0x2, 0x2, 0x2, 0x119, 0x11a, 
-       0x3, 0x2, 0x2, 0x2, 0x11a, 0x44, 0x3, 0x2, 0x2, 0x2, 0x11b, 0x11c, 
-       0x5, 0x49, 0x25, 0x2, 0x11c, 0x11d, 0x7, 0x2f, 0x2, 0x2, 0x11d, 0x11e, 
-       0x5, 0x49, 0x25, 0x2, 0x11e, 0x11f, 0x7, 0x2f, 0x2, 0x2, 0x11f, 0x120, 
-       0x5, 0x49, 0x25, 0x2, 0x120, 0x12e, 0x3, 0x2, 0x2, 0x2, 0x121, 0x122, 
-       0x5, 0x49, 0x25, 0x2, 0x122, 0x123, 0x7, 0x31, 0x2, 0x2, 0x123, 0x124, 
-       0x5, 0x49, 0x25, 0x2, 0x124, 0x125, 0x7, 0x31, 0x2, 0x2, 0x125, 0x126, 
-       0x5, 0x49, 0x25, 0x2, 0x126, 0x12e, 0x3, 0x2, 0x2, 0x2, 0x127, 0x128, 
-       0x5, 0x49, 0x25, 0x2, 0x128, 0x129, 0x7, 0x30, 0x2, 0x2, 0x129, 0x12a, 
-       0x5, 0x49, 0x25, 0x2, 0x12a, 0x12b, 0x7, 0x30, 0x2, 0x2, 0x12b, 0x12c, 
-       0x5, 0x49, 0x25, 0x2, 0x12c, 0x12e, 0x3, 0x2, 0x2, 0x2, 0x12d, 0x11b, 
-       0x3, 0x2, 0x2, 0x2, 0x12d, 0x121, 0x3, 0x2, 0x2, 0x2, 0x12d, 0x127, 
-       0x3, 0x2, 0x2, 0x2, 0x12e, 0x46, 0x3, 0x2, 0x2, 0x2, 0x12f, 0x130, 
-       0x5, 0x49, 0x25, 0x2, 0x130, 0x131, 0x7, 0x3c, 0x2, 0x2, 0x131, 0x132, 
-       0x5, 0x49, 0x25, 0x2, 0x132, 0x133, 0x7, 0x3c, 0x2, 0x2, 0x133, 0x137, 
-       0x5, 0x49, 0x25, 0x2, 0x134, 0x135, 0x5, 0x2b, 0x16, 0x2, 0x135, 
-       0x136, 0x5, 0x49, 0x25, 0x2, 0x136, 0x138, 0x3, 0x2, 0x2, 0x2, 0x137, 
-       0x134, 0x3, 0x2, 0x2, 0x2, 0x137, 0x138, 0x3, 0x2, 0x2, 0x2, 0x138, 
-       0x48, 0x3, 0x2, 0x2, 0x2, 0x139, 0x13b, 0x5, 0x4d, 0x27, 0x2, 0x13a, 
-       0x139, 0x3, 0x2, 0x2, 0x2, 0x13b, 0x13c, 0x3, 0x2, 0x2, 0x2, 0x13c, 
-       0x13a, 0x3, 0x2, 0x2, 0x2, 0x13c, 0x13d, 0x3, 0x2, 0x2, 0x2, 0x13d, 
-       0x4a, 0x3, 0x2, 0x2, 0x2, 0x13e, 0x140, 0x5, 0x4d, 0x27, 0x2, 0x13f, 
-       0x13e, 0x3, 0x2, 0x2, 0x2, 0x140, 0x141, 0x3, 0x2, 0x2, 0x2, 0x141, 
-       0x13f, 0x3, 0x2, 0x2, 0x2, 0x141, 0x142, 0x3, 0x2, 0x2, 0x2, 0x142, 
-       0x143, 0x3, 0x2, 0x2, 0x2, 0x143, 0x145, 0x9, 0x4, 0x2, 0x2, 0x144, 
-       0x146, 0x9, 0x3, 0x2, 0x2, 0x145, 0x144, 0x3, 0x2, 0x2, 0x2, 0x145, 
-       0x146, 0x3, 0x2, 0x2, 0x2, 0x146, 0x148, 0x3, 0x2, 0x2, 0x2, 0x147, 
-       0x149, 0x5, 0x4d, 0x27, 0x2, 0x148, 0x147, 0x3, 0x2, 0x2, 0x2, 0x149, 
-       0x14a, 0x3, 0x2, 0x2, 0x2, 0x14a, 0x148, 0x3, 0x2, 0x2, 0x2, 0x14a, 
-       0x14b, 0x3, 0x2, 0x2, 0x2, 0x14b, 0x4c, 0x3, 0x2, 0x2, 0x2, 0x14c, 
-       0x14d, 0x9, 0x5, 0x2, 0x2, 0x14d, 0x4e, 0x3, 0x2, 0x2, 0x2, 0x14e, 
-       0x150, 0x5, 0x53, 0x2a, 0x2, 0x14f, 0x14e, 0x3, 0x2, 0x2, 0x2, 0x150, 
-       0x151, 0x3, 0x2, 0x2, 0x2, 0x151, 0x14f, 0x3, 0x2, 0x2, 0x2, 0x151, 
-       0x152, 0x3, 0x2, 0x2, 0x2, 0x152, 0x50, 0x3, 0x2, 0x2, 0x2, 0x153, 
-       0x154, 0x5, 0x5b, 0x2e, 0x2, 0x154, 0x52, 0x3, 0x2, 0x2, 0x2, 0x155, 
-       0x158, 0x9, 0x6, 0x2, 0x2, 0x156, 0x158, 0x5, 0x55, 0x2b, 0x2, 0x157, 
-       0x155, 0x3, 0x2, 0x2, 0x2, 0x157, 0x156, 0x3, 0x2, 0x2, 0x2, 0x158, 
-       0x54, 0x3, 0x2, 0x2, 0x2, 0x159, 0x15a, 0x4, 0x2e82, 0xa001, 0x2, 
-       0x15a, 0x56, 0x3, 0x2, 0x2, 0x2, 0x15b, 0x161, 0x7, 0x24, 0x2, 0x2, 
-       0x15c, 0x15d, 0x7, 0x24, 0x2, 0x2, 0x15d, 0x160, 0x7, 0x24, 0x2, 
-       0x2, 0x15e, 0x160, 0xa, 0x7, 0x2, 0x2, 0x15f, 0x15c, 0x3, 0x2, 0x2, 
-       0x2, 0x15f, 0x15e, 0x3, 0x2, 0x2, 0x2, 0x160, 0x163, 0x3, 0x2, 0x2, 
-       0x2, 0x161, 0x15f, 0x3, 0x2, 0x2, 0x2, 0x161, 0x162, 0x3, 0x2, 0x2, 
-       0x2, 0x162, 0x164, 0x3, 0x2, 0x2, 0x2, 0x163, 0x161, 0x3, 0x2, 0x2, 
-       0x2, 0x164, 0x165, 0x7, 0x24, 0x2, 0x2, 0x165, 0x58, 0x3, 0x2, 0x2, 
-       0x2, 0x166, 0x16c, 0x7, 0x29, 0x2, 0x2, 0x167, 0x168, 0x7, 0x29, 
-       0x2, 0x2, 0x168, 0x16b, 0x7, 0x29, 0x2, 0x2, 0x169, 0x16b, 0xa, 0x8, 
-       0x2, 0x2, 0x16a, 0x167, 0x3, 0x2, 0x2, 0x2, 0x16a, 0x169, 0x3, 0x2, 
-       0x2, 0x2, 0x16b, 0x16e, 0x3, 0x2, 0x2, 0x2, 0x16c, 0x16a, 0x3, 0x2, 
-       0x2, 0x2, 0x16c, 0x16d, 0x3, 0x2, 0x2, 0x2, 0x16d, 0x16f, 0x3, 0x2, 
-       0x2, 0x2, 0x16e, 0x16c, 0x3, 0x2, 0x2, 0x2, 0x16f, 0x170, 0x7, 0x29, 
-       0x2, 0x2, 0x170, 0x5a, 0x3, 0x2, 0x2, 0x2, 0x171, 0x177, 0x7, 0x62, 
-       0x2, 0x2, 0x172, 0x173, 0x7, 0x62, 0x2, 0x2, 0x173, 0x176, 0x7, 0x62, 
-       0x2, 0x2, 0x174, 0x176, 0xa, 0x9, 0x2, 0x2, 0x175, 0x172, 0x3, 0x2, 
-       0x2, 0x2, 0x175, 0x174, 0x3, 0x2, 0x2, 0x2, 0x176, 0x179, 0x3, 0x2, 
-       0x2, 0x2, 0x177, 0x175, 0x3, 0x2, 0x2, 0x2, 0x177, 0x178, 0x3, 0x2, 
-       0x2, 0x2, 0x178, 0x17a, 0x3, 0x2, 0x2, 0x2, 0x179, 0x177, 0x3, 0x2, 
-       0x2, 0x2, 0x17a, 0x17b, 0x7, 0x62, 0x2, 0x2, 0x17b, 0x5c, 0x3, 0x2, 
-       0x2, 0x2, 0x17c, 0x17d, 0x9, 0xa, 0x2, 0x2, 0x17d, 0x5e, 0x3, 0x2, 
-       0x2, 0x2, 0x17e, 0x17f, 0x9, 0xb, 0x2, 0x2, 0x17f, 0x60, 0x3, 0x2, 
-       0x2, 0x2, 0x180, 0x181, 0x9, 0xc, 0x2, 0x2, 0x181, 0x62, 0x3, 0x2, 
-       0x2, 0x2, 0x182, 0x183, 0x9, 0xd, 0x2, 0x2, 0x183, 0x64, 0x3, 0x2, 
-       0x2, 0x2, 0x184, 0x185, 0x9, 0x4, 0x2, 0x2, 0x185, 0x66, 0x3, 0x2, 
-       0x2, 0x2, 0x186, 0x187, 0x9, 0xe, 0x2, 0x2, 0x187, 0x68, 0x3, 0x2, 
-       0x2, 0x2, 0x188, 0x189, 0x9, 0xf, 0x2, 0x2, 0x189, 0x6a, 0x3, 0x2, 
-       0x2, 0x2, 0x18a, 0x18b, 0x9, 0x10, 0x2, 0x2, 0x18b, 0x6c, 0x3, 0x2, 
-       0x2, 0x2, 0x18c, 0x18d, 0x9, 0x11, 0x2, 0x2, 0x18d, 0x6e, 0x3, 0x2, 
-       0x2, 0x2, 0x18e, 0x18f, 0x9, 0x12, 0x2, 0x2, 0x18f, 0x70, 0x3, 0x2, 
-       0x2, 0x2, 0x190, 0x191, 0x9, 0x13, 0x2, 0x2, 0x191, 0x72, 0x3, 0x2, 
-       0x2, 0x2, 0x192, 0x193, 0x9, 0x14, 0x2, 0x2, 0x193, 0x74, 0x3, 0x2, 
-       0x2, 0x2, 0x194, 0x195, 0x9, 0x15, 0x2, 0x2, 0x195, 0x76, 0x3, 0x2, 
-       0x2, 0x2, 0x196, 0x197, 0x9, 0x16, 0x2, 0x2, 0x197, 0x78, 0x3, 0x2, 
-       0x2, 0x2, 0x198, 0x199, 0x9, 0x17, 0x2, 0x2, 0x199, 0x7a, 0x3, 0x2, 
-       0x2, 0x2, 0x19a, 0x19b, 0x9, 0x18, 0x2, 0x2, 0x19b, 0x7c, 0x3, 0x2, 
-       0x2, 0x2, 0x19c, 0x19d, 0x9, 0x19, 0x2, 0x2, 0x19d, 0x7e, 0x3, 0x2, 
-       0x2, 0x2, 0x19e, 0x19f, 0x9, 0x1a, 0x2, 0x2, 0x19f, 0x80, 0x3, 0x2, 
-       0x2, 0x2, 0x1a0, 0x1a1, 0x9, 0x1b, 0x2, 0x2, 0x1a1, 0x82, 0x3, 0x2, 
-       0x2, 0x2, 0x1a2, 0x1a3, 0x9, 0x1c, 0x2, 0x2, 0x1a3, 0x84, 0x3, 0x2, 
-       0x2, 0x2, 0x1a4, 0x1a5, 0x9, 0x1d, 0x2, 0x2, 0x1a5, 0x86, 0x3, 0x2, 
-       0x2, 0x2, 0x1a6, 0x1a7, 0x9, 0x1e, 0x2, 0x2, 0x1a7, 0x88, 0x3, 0x2, 
-       0x2, 0x2, 0x1a8, 0x1a9, 0x9, 0x1f, 0x2, 0x2, 0x1a9, 0x8a, 0x3, 0x2, 
-       0x2, 0x2, 0x1aa, 0x1ab, 0x9, 0x20, 0x2, 0x2, 0x1ab, 0x8c, 0x3, 0x2, 
-       0x2, 0x2, 0x1ac, 0x1ad, 0x9, 0x21, 0x2, 0x2, 0x1ad, 0x8e, 0x3, 0x2, 
-       0x2, 0x2, 0x1ae, 0x1af, 0x9, 0x22, 0x2, 0x2, 0x1af, 0x90, 0x3, 0x2, 
-       0x2, 0x2, 0x1a, 0x2, 0x99, 0xc7, 0xed, 0xf2, 0x106, 0x10a, 0x10f, 
-       0x117, 0x119, 0x12d, 0x137, 0x13c, 0x141, 0x145, 0x14a, 0x151, 0x157, 
-       0x15f, 0x161, 0x16a, 0x16c, 0x175, 0x177, 0x3, 0x2, 0x3, 0x2, 
-  };
+    static const uint16_t serializedATNSegment0[] = {
+        0x3,   0x608b, 0xa72a, 0x8133, 0xb9ed, 0x417c, 0x3be7, 0x7786, 0x5964,
+        0x2,   0x27,   0x1b0,  0x8,    0x1,    0x4,    0x2,    0x9,    0x2,
+        0x4,   0x3,    0x9,    0x3,    0x4,    0x4,    0x9,    0x4,    0x4,
+        0x5,   0x9,    0x5,    0x4,    0x6,    0x9,    0x6,    0x4,    0x7,
+        0x9,   0x7,    0x4,    0x8,    0x9,    0x8,    0x4,    0x9,    0x9,
+        0x9,   0x4,    0xa,    0x9,    0xa,    0x4,    0xb,    0x9,    0xb,
+        0x4,   0xc,    0x9,    0xc,    0x4,    0xd,    0x9,    0xd,    0x4,
+        0xe,   0x9,    0xe,    0x4,    0xf,    0x9,    0xf,    0x4,    0x10,
+        0x9,   0x10,   0x4,    0x11,   0x9,    0x11,   0x4,    0x12,   0x9,
+        0x12,  0x4,    0x13,   0x9,    0x13,   0x4,    0x14,   0x9,    0x14,
+        0x4,   0x15,   0x9,    0x15,   0x4,    0x16,   0x9,    0x16,   0x4,
+        0x17,  0x9,    0x17,   0x4,    0x18,   0x9,    0x18,   0x4,    0x19,
+        0x9,   0x19,   0x4,    0x1a,   0x9,    0x1a,   0x4,    0x1b,   0x9,
+        0x1b,  0x4,    0x1c,   0x9,    0x1c,   0x4,    0x1d,   0x9,    0x1d,
+        0x4,   0x1e,   0x9,    0x1e,   0x4,    0x1f,   0x9,    0x1f,   0x4,
+        0x20,  0x9,    0x20,   0x4,    0x21,   0x9,    0x21,   0x4,    0x22,
+        0x9,   0x22,   0x4,    0x23,   0x9,    0x23,   0x4,    0x24,   0x9,
+        0x24,  0x4,    0x25,   0x9,    0x25,   0x4,    0x26,   0x9,    0x26,
+        0x4,   0x27,   0x9,    0x27,   0x4,    0x28,   0x9,    0x28,   0x4,
+        0x29,  0x9,    0x29,   0x4,    0x2a,   0x9,    0x2a,   0x4,    0x2b,
+        0x9,   0x2b,   0x4,    0x2c,   0x9,    0x2c,   0x4,    0x2d,   0x9,
+        0x2d,  0x4,    0x2e,   0x9,    0x2e,   0x4,    0x2f,   0x9,    0x2f,
+        0x4,   0x30,   0x9,    0x30,   0x4,    0x31,   0x9,    0x31,   0x4,
+        0x32,  0x9,    0x32,   0x4,    0x33,   0x9,    0x33,   0x4,    0x34,
+        0x9,   0x34,   0x4,    0x35,   0x9,    0x35,   0x4,    0x36,   0x9,
+        0x36,  0x4,    0x37,   0x9,    0x37,   0x4,    0x38,   0x9,    0x38,
+        0x4,   0x39,   0x9,    0x39,   0x4,    0x3a,   0x9,    0x3a,   0x4,
+        0x3b,  0x9,    0x3b,   0x4,    0x3c,   0x9,    0x3c,   0x4,    0x3d,
+        0x9,   0x3d,   0x4,    0x3e,   0x9,    0x3e,   0x4,    0x3f,   0x9,
+        0x3f,  0x4,    0x40,   0x9,    0x40,   0x4,    0x41,   0x9,    0x41,
+        0x4,   0x42,   0x9,    0x42,   0x4,    0x43,   0x9,    0x43,   0x4,
+        0x44,  0x9,    0x44,   0x4,    0x45,   0x9,    0x45,   0x4,    0x46,
+        0x9,   0x46,   0x4,    0x47,   0x9,    0x47,   0x4,    0x48,   0x9,
+        0x48,  0x3,    0x2,    0x3,    0x2,    0x3,    0x2,    0x3,    0x2,
+        0x3,   0x2,    0x3,    0x3,    0x6,    0x3,    0x98,   0xa,    0x3,
+        0xd,   0x3,    0xe,    0x3,    0x99,   0x3,    0x3,    0x3,    0x3,
+        0x3,   0x4,    0x3,    0x4,    0x3,    0x4,    0x3,    0x4,    0x3,
+        0x4,   0x3,    0x5,    0x3,    0x5,    0x3,    0x5,    0x3,    0x5,
+        0x3,   0x5,    0x3,    0x5,    0x3,    0x5,    0x3,    0x5,    0x3,
+        0x5,   0x3,    0x5,    0x3,    0x6,    0x3,    0x6,    0x3,    0x7,
+        0x3,   0x7,    0x3,    0x8,    0x3,    0x8,    0x3,    0x9,    0x3,
+        0x9,   0x3,    0xa,    0x3,    0xa,    0x3,    0xa,    0x3,    0xb,
+        0x3,   0xb,    0x3,    0xc,    0x3,    0xc,    0x3,    0xd,    0x3,
+        0xd,   0x3,    0xd,    0x3,    0xe,    0x3,    0xe,    0x3,    0xf,
+        0x3,   0xf,    0x3,    0xf,    0x3,    0x10,   0x3,    0x10,   0x3,
+        0x10,  0x3,    0x10,   0x5,    0x10,   0xc8,   0xa,    0x10,   0x3,
+        0x11,  0x3,    0x11,   0x3,    0x12,   0x3,    0x12,   0x3,    0x12,
+        0x3,   0x13,   0x3,    0x13,   0x3,    0x14,   0x3,    0x14,   0x3,
+        0x14,  0x3,    0x15,   0x3,    0x15,   0x3,    0x16,   0x3,    0x16,
+        0x3,   0x17,   0x3,    0x17,   0x3,    0x18,   0x3,    0x18,   0x3,
+        0x19,  0x3,    0x19,   0x3,    0x1a,   0x3,    0x1a,   0x3,    0x1a,
+        0x3,   0x1b,   0x3,    0x1b,   0x3,    0x1c,   0x3,    0x1c,   0x3,
+        0x1d,  0x3,    0x1d,   0x3,    0x1e,   0x3,    0x1e,   0x3,    0x1f,
+        0x3,   0x1f,   0x3,    0x1f,   0x3,    0x20,   0x3,    0x20,   0x5,
+        0x20,  0xee,   0xa,    0x20,   0x3,    0x21,   0x6,    0x21,   0xf1,
+        0xa,   0x21,   0xd,    0x21,   0xe,    0x21,   0xf2,   0x3,    0x21,
+        0x3,   0x21,   0x3,    0x21,   0x3,    0x21,   0x3,    0x21,   0x3,
+        0x21,  0x3,    0x21,   0x3,    0x21,   0x3,    0x21,   0x3,    0x21,
+        0x3,   0x21,   0x3,    0x21,   0x3,    0x21,   0x3,    0x21,   0x3,
+        0x21,  0x3,    0x21,   0x3,    0x21,   0x3,    0x21,   0x5,    0x21,
+        0x107, 0xa,    0x21,   0x6,    0x21,   0x109,  0xa,    0x21,   0xd,
+        0x21,  0xe,    0x21,   0x10a,  0x3,    0x22,   0x3,    0x22,   0x3,
+        0x22,  0x5,    0x22,   0x110,  0xa,    0x22,   0x3,    0x22,   0x3,
+        0x22,  0x3,    0x22,   0x3,    0x22,   0x3,    0x22,   0x3,    0x22,
+        0x5,   0x22,   0x118,  0xa,    0x22,   0x5,    0x22,   0x11a,  0xa,
+        0x22,  0x3,    0x23,   0x3,    0x23,   0x3,    0x23,   0x3,    0x23,
+        0x3,   0x23,   0x3,    0x23,   0x3,    0x23,   0x3,    0x23,   0x3,
+        0x23,  0x3,    0x23,   0x3,    0x23,   0x3,    0x23,   0x3,    0x23,
+        0x3,   0x23,   0x3,    0x23,   0x3,    0x23,   0x3,    0x23,   0x3,
+        0x23,  0x5,    0x23,   0x12e,  0xa,    0x23,   0x3,    0x24,   0x3,
+        0x24,  0x3,    0x24,   0x3,    0x24,   0x3,    0x24,   0x3,    0x24,
+        0x3,   0x24,   0x3,    0x24,   0x5,    0x24,   0x138,  0xa,    0x24,
+        0x3,   0x25,   0x6,    0x25,   0x13b,  0xa,    0x25,   0xd,    0x25,
+        0xe,   0x25,   0x13c,  0x3,    0x26,   0x6,    0x26,   0x140,  0xa,
+        0x26,  0xd,    0x26,   0xe,    0x26,   0x141,  0x3,    0x26,   0x3,
+        0x26,  0x5,    0x26,   0x146,  0xa,    0x26,   0x3,    0x26,   0x6,
+        0x26,  0x149,  0xa,    0x26,   0xd,    0x26,   0xe,    0x26,   0x14a,
+        0x3,   0x27,   0x3,    0x27,   0x3,    0x28,   0x6,    0x28,   0x150,
+        0xa,   0x28,   0xd,    0x28,   0xe,    0x28,   0x151,  0x3,    0x29,
+        0x3,   0x29,   0x3,    0x2a,   0x3,    0x2a,   0x5,    0x2a,   0x158,
+        0xa,   0x2a,   0x3,    0x2b,   0x3,    0x2b,   0x3,    0x2c,   0x3,
+        0x2c,  0x3,    0x2c,   0x3,    0x2c,   0x7,    0x2c,   0x160,  0xa,
+        0x2c,  0xc,    0x2c,   0xe,    0x2c,   0x163,  0xb,    0x2c,   0x3,
+        0x2c,  0x3,    0x2c,   0x3,    0x2d,   0x3,    0x2d,   0x3,    0x2d,
+        0x3,   0x2d,   0x7,    0x2d,   0x16b,  0xa,    0x2d,   0xc,    0x2d,
+        0xe,   0x2d,   0x16e,  0xb,    0x2d,   0x3,    0x2d,   0x3,    0x2d,
+        0x3,   0x2e,   0x3,    0x2e,   0x3,    0x2e,   0x3,    0x2e,   0x7,
+        0x2e,  0x176,  0xa,    0x2e,   0xc,    0x2e,   0xe,    0x2e,   0x179,
+        0xb,   0x2e,   0x3,    0x2e,   0x3,    0x2e,   0x3,    0x2f,   0x3,
+        0x2f,  0x3,    0x30,   0x3,    0x30,   0x3,    0x31,   0x3,    0x31,
+        0x3,   0x32,   0x3,    0x32,   0x3,    0x33,   0x3,    0x33,   0x3,
+        0x34,  0x3,    0x34,   0x3,    0x35,   0x3,    0x35,   0x3,    0x36,
+        0x3,   0x36,   0x3,    0x37,   0x3,    0x37,   0x3,    0x38,   0x3,
+        0x38,  0x3,    0x39,   0x3,    0x39,   0x3,    0x3a,   0x3,    0x3a,
+        0x3,   0x3b,   0x3,    0x3b,   0x3,    0x3c,   0x3,    0x3c,   0x3,
+        0x3d,  0x3,    0x3d,   0x3,    0x3e,   0x3,    0x3e,   0x3,    0x3f,
+        0x3,   0x3f,   0x3,    0x40,   0x3,    0x40,   0x3,    0x41,   0x3,
+        0x41,  0x3,    0x42,   0x3,    0x42,   0x3,    0x43,   0x3,    0x43,
+        0x3,   0x44,   0x3,    0x44,   0x3,    0x45,   0x3,    0x45,   0x3,
+        0x46,  0x3,    0x46,   0x3,    0x47,   0x3,    0x47,   0x3,    0x48,
+        0x3,   0x48,   0x2,    0x2,    0x49,   0x3,    0x3,    0x5,    0x4,
+        0x7,   0x5,    0x9,    0x6,    0xb,    0x7,    0xd,    0x8,    0xf,
+        0x9,   0x11,   0xa,    0x13,   0xb,    0x15,   0xc,    0x17,   0xd,
+        0x19,  0xe,    0x1b,   0xf,    0x1d,   0x10,   0x1f,   0x11,   0x21,
+        0x12,  0x23,   0x13,   0x25,   0x14,   0x27,   0x15,   0x29,   0x16,
+        0x2b,  0x17,   0x2d,   0x18,   0x2f,   0x19,   0x31,   0x1a,   0x33,
+        0x1b,  0x35,   0x1c,   0x37,   0x1d,   0x39,   0x1e,   0x3b,   0x1f,
+        0x3d,  0x20,   0x3f,   0x21,   0x41,   0x22,   0x43,   0x23,   0x45,
+        0x2,   0x47,   0x2,    0x49,   0x24,   0x4b,   0x25,   0x4d,   0x2,
+        0x4f,  0x26,   0x51,   0x27,   0x53,   0x2,    0x55,   0x2,    0x57,
+        0x2,   0x59,   0x2,    0x5b,   0x2,    0x5d,   0x2,    0x5f,   0x2,
+        0x61,  0x2,    0x63,   0x2,    0x65,   0x2,    0x67,   0x2,    0x69,
+        0x2,   0x6b,   0x2,    0x6d,   0x2,    0x6f,   0x2,    0x71,   0x2,
+        0x73,  0x2,    0x75,   0x2,    0x77,   0x2,    0x79,   0x2,    0x7b,
+        0x2,   0x7d,   0x2,    0x7f,   0x2,    0x81,   0x2,    0x83,   0x2,
+        0x85,  0x2,    0x87,   0x2,    0x89,   0x2,    0x8b,   0x2,    0x8d,
+        0x2,   0x8f,   0x2,    0x3,    0x2,    0x23,   0x5,    0x2,    0xb,
+        0xd,   0xf,    0xf,    0x22,   0x22,   0x4,    0x2,    0x2d,   0x2d,
+        0x2f,  0x2f,   0x4,    0x2,    0x47,   0x47,   0x67,   0x67,   0x3,
+        0x2,   0x32,   0x3b,   0x8,    0x2,    0x25,   0x26,   0x32,   0x3c,
+        0x42,  0x5c,   0x61,   0x61,   0x63,   0x7d,   0x7f,   0x7f,   0x3,
+        0x2,   0x24,   0x24,   0x3,    0x2,    0x29,   0x29,   0x3,    0x2,
+        0x62,  0x62,   0x4,    0x2,    0x43,   0x43,   0x63,   0x63,   0x4,
+        0x2,   0x44,   0x44,   0x64,   0x64,   0x4,    0x2,    0x45,   0x45,
+        0x65,  0x65,   0x4,    0x2,    0x46,   0x46,   0x66,   0x66,   0x4,
+        0x2,   0x48,   0x48,   0x68,   0x68,   0x4,    0x2,    0x49,   0x49,
+        0x69,  0x69,   0x4,    0x2,    0x4a,   0x4a,   0x6a,   0x6a,   0x4,
+        0x2,   0x4b,   0x4b,   0x6b,   0x6b,   0x4,    0x2,    0x4c,   0x4c,
+        0x6c,  0x6c,   0x4,    0x2,    0x4d,   0x4d,   0x6d,   0x6d,   0x4,
+        0x2,   0x4e,   0x4e,   0x6e,   0x6e,   0x4,    0x2,    0x4f,   0x4f,
+        0x6f,  0x6f,   0x4,    0x2,    0x50,   0x50,   0x70,   0x70,   0x4,
+        0x2,   0x51,   0x51,   0x71,   0x71,   0x4,    0x2,    0x52,   0x52,
+        0x72,  0x72,   0x4,    0x2,    0x53,   0x53,   0x73,   0x73,   0x4,
+        0x2,   0x54,   0x54,   0x74,   0x74,   0x4,    0x2,    0x55,   0x55,
+        0x75,  0x75,   0x4,    0x2,    0x56,   0x56,   0x76,   0x76,   0x4,
+        0x2,   0x57,   0x57,   0x77,   0x77,   0x4,    0x2,    0x58,   0x58,
+        0x78,  0x78,   0x4,    0x2,    0x59,   0x59,   0x79,   0x79,   0x4,
+        0x2,   0x5a,   0x5a,   0x7a,   0x7a,   0x4,    0x2,    0x5b,   0x5b,
+        0x7b,  0x7b,   0x4,    0x2,    0x5c,   0x5c,   0x7c,   0x7c,   0x2,
+        0x1ad, 0x2,    0x3,    0x3,    0x2,    0x2,    0x2,    0x2,    0x5,
+        0x3,   0x2,    0x2,    0x2,    0x2,    0x7,    0x3,    0x2,    0x2,
+        0x2,   0x2,    0x9,    0x3,    0x2,    0x2,    0x2,    0x2,    0xb,
+        0x3,   0x2,    0x2,    0x2,    0x2,    0xd,    0x3,    0x2,    0x2,
+        0x2,   0x2,    0xf,    0x3,    0x2,    0x2,    0x2,    0x2,    0x11,
+        0x3,   0x2,    0x2,    0x2,    0x2,    0x13,   0x3,    0x2,    0x2,
+        0x2,   0x2,    0x15,   0x3,    0x2,    0x2,    0x2,    0x2,    0x17,
+        0x3,   0x2,    0x2,    0x2,    0x2,    0x19,   0x3,    0x2,    0x2,
+        0x2,   0x2,    0x1b,   0x3,    0x2,    0x2,    0x2,    0x2,    0x1d,
+        0x3,   0x2,    0x2,    0x2,    0x2,    0x1f,   0x3,    0x2,    0x2,
+        0x2,   0x2,    0x21,   0x3,    0x2,    0x2,    0x2,    0x2,    0x23,
+        0x3,   0x2,    0x2,    0x2,    0x2,    0x25,   0x3,    0x2,    0x2,
+        0x2,   0x2,    0x27,   0x3,    0x2,    0x2,    0x2,    0x2,    0x29,
+        0x3,   0x2,    0x2,    0x2,    0x2,    0x2b,   0x3,    0x2,    0x2,
+        0x2,   0x2,    0x2d,   0x3,    0x2,    0x2,    0x2,    0x2,    0x2f,
+        0x3,   0x2,    0x2,    0x2,    0x2,    0x31,   0x3,    0x2,    0x2,
+        0x2,   0x2,    0x33,   0x3,    0x2,    0x2,    0x2,    0x2,    0x35,
+        0x3,   0x2,    0x2,    0x2,    0x2,    0x37,   0x3,    0x2,    0x2,
+        0x2,   0x2,    0x39,   0x3,    0x2,    0x2,    0x2,    0x2,    0x3b,
+        0x3,   0x2,    0x2,    0x2,    0x2,    0x3d,   0x3,    0x2,    0x2,
+        0x2,   0x2,    0x3f,   0x3,    0x2,    0x2,    0x2,    0x2,    0x41,
+        0x3,   0x2,    0x2,    0x2,    0x2,    0x43,   0x3,    0x2,    0x2,
+        0x2,   0x2,    0x49,   0x3,    0x2,    0x2,    0x2,    0x2,    0x4b,
+        0x3,   0x2,    0x2,    0x2,    0x2,    0x4f,   0x3,    0x2,    0x2,
+        0x2,   0x2,    0x51,   0x3,    0x2,    0x2,    0x2,    0x3,    0x91,
+        0x3,   0x2,    0x2,    0x2,    0x5,    0x97,   0x3,    0x2,    0x2,
+        0x2,   0x7,    0x9d,   0x3,    0x2,    0x2,    0x2,    0x9,    0xa2,
+        0x3,   0x2,    0x2,    0x2,    0xb,    0xac,   0x3,    0x2,    0x2,
+        0x2,   0xd,    0xae,   0x3,    0x2,    0x2,    0x2,    0xf,    0xb0,
+        0x3,   0x2,    0x2,    0x2,    0x11,   0xb2,   0x3,    0x2,    0x2,
+        0x2,   0x13,   0xb4,   0x3,    0x2,    0x2,    0x2,    0x15,   0xb7,
+        0x3,   0x2,    0x2,    0x2,    0x17,   0xb9,   0x3,    0x2,    0x2,
+        0x2,   0x19,   0xbb,   0x3,    0x2,    0x2,    0x2,    0x1b,   0xbe,
+        0x3,   0x2,    0x2,    0x2,    0x1d,   0xc0,   0x3,    0x2,    0x2,
+        0x2,   0x1f,   0xc7,   0x3,    0x2,    0x2,    0x2,    0x21,   0xc9,
+        0x3,   0x2,    0x2,    0x2,    0x23,   0xcb,   0x3,    0x2,    0x2,
+        0x2,   0x25,   0xce,   0x3,    0x2,    0x2,    0x2,    0x27,   0xd0,
+        0x3,   0x2,    0x2,    0x2,    0x29,   0xd3,   0x3,    0x2,    0x2,
+        0x2,   0x2b,   0xd5,   0x3,    0x2,    0x2,    0x2,    0x2d,   0xd7,
+        0x3,   0x2,    0x2,    0x2,    0x2f,   0xd9,   0x3,    0x2,    0x2,
+        0x2,   0x31,   0xdb,   0x3,    0x2,    0x2,    0x2,    0x33,   0xdd,
+        0x3,   0x2,    0x2,    0x2,    0x35,   0xe0,   0x3,    0x2,    0x2,
+        0x2,   0x37,   0xe2,   0x3,    0x2,    0x2,    0x2,    0x39,   0xe4,
+        0x3,   0x2,    0x2,    0x2,    0x3b,   0xe6,   0x3,    0x2,    0x2,
+        0x2,   0x3d,   0xe8,   0x3,    0x2,    0x2,    0x2,    0x3f,   0xed,
+        0x3,   0x2,    0x2,    0x2,    0x41,   0x108,  0x3,    0x2,    0x2,
+        0x2,   0x43,   0x10c,  0x3,    0x2,    0x2,    0x2,    0x45,   0x12d,
+        0x3,   0x2,    0x2,    0x2,    0x47,   0x12f,  0x3,    0x2,    0x2,
+        0x2,   0x49,   0x13a,  0x3,    0x2,    0x2,    0x2,    0x4b,   0x13f,
+        0x3,   0x2,    0x2,    0x2,    0x4d,   0x14c,  0x3,    0x2,    0x2,
+        0x2,   0x4f,   0x14f,  0x3,    0x2,    0x2,    0x2,    0x51,   0x153,
+        0x3,   0x2,    0x2,    0x2,    0x53,   0x157,  0x3,    0x2,    0x2,
+        0x2,   0x55,   0x159,  0x3,    0x2,    0x2,    0x2,    0x57,   0x15b,
+        0x3,   0x2,    0x2,    0x2,    0x59,   0x166,  0x3,    0x2,    0x2,
+        0x2,   0x5b,   0x171,  0x3,    0x2,    0x2,    0x2,    0x5d,   0x17c,
+        0x3,   0x2,    0x2,    0x2,    0x5f,   0x17e,  0x3,    0x2,    0x2,
+        0x2,   0x61,   0x180,  0x3,    0x2,    0x2,    0x2,    0x63,   0x182,
+        0x3,   0x2,    0x2,    0x2,    0x65,   0x184,  0x3,    0x2,    0x2,
+        0x2,   0x67,   0x186,  0x3,    0x2,    0x2,    0x2,    0x69,   0x188,
+        0x3,   0x2,    0x2,    0x2,    0x6b,   0x18a,  0x3,    0x2,    0x2,
+        0x2,   0x6d,   0x18c,  0x3,    0x2,    0x2,    0x2,    0x6f,   0x18e,
+        0x3,   0x2,    0x2,    0x2,    0x71,   0x190,  0x3,    0x2,    0x2,
+        0x2,   0x73,   0x192,  0x3,    0x2,    0x2,    0x2,    0x75,   0x194,
+        0x3,   0x2,    0x2,    0x2,    0x77,   0x196,  0x3,    0x2,    0x2,
+        0x2,   0x79,   0x198,  0x3,    0x2,    0x2,    0x2,    0x7b,   0x19a,
+        0x3,   0x2,    0x2,    0x2,    0x7d,   0x19c,  0x3,    0x2,    0x2,
+        0x2,   0x7f,   0x19e,  0x3,    0x2,    0x2,    0x2,    0x81,   0x1a0,
+        0x3,   0x2,    0x2,    0x2,    0x83,   0x1a2,  0x3,    0x2,    0x2,
+        0x2,   0x85,   0x1a4,  0x3,    0x2,    0x2,    0x2,    0x87,   0x1a6,
+        0x3,   0x2,    0x2,    0x2,    0x89,   0x1a8,  0x3,    0x2,    0x2,
+        0x2,   0x8b,   0x1aa,  0x3,    0x2,    0x2,    0x2,    0x8d,   0x1ac,
+        0x3,   0x2,    0x2,    0x2,    0x8f,   0x1ae,  0x3,    0x2,    0x2,
+        0x2,   0x91,   0x92,   0x5,    0x7f,   0x40,   0x2,    0x92,   0x93,
+        0x5,   0x79,   0x3d,   0x2,    0x93,   0x94,   0x5,    0x79,   0x3d,
+        0x2,   0x94,   0x95,   0x5,    0x83,   0x42,   0x2,    0x95,   0x4,
+        0x3,   0x2,    0x2,    0x2,    0x96,   0x98,   0x9,    0x2,    0x2,
+        0x2,   0x97,   0x96,   0x3,    0x2,    0x2,    0x2,    0x98,   0x99,
+        0x3,   0x2,    0x2,    0x2,    0x99,   0x97,   0x3,    0x2,    0x2,
+        0x2,   0x99,   0x9a,   0x3,    0x2,    0x2,    0x2,    0x9a,   0x9b,
+        0x3,   0x2,    0x2,    0x2,    0x9b,   0x9c,   0x8,    0x3,    0x2,
+        0x2,   0x9c,   0x6,    0x3,    0x2,    0x2,    0x2,    0x9d,   0x9e,
+        0x5,   0x83,   0x42,   0x2,    0x9e,   0x9f,   0x5,    0x6d,   0x37,
+        0x2,   0x9f,   0xa0,   0x5,    0x75,   0x3b,   0x2,    0xa0,   0xa1,
+        0x5,   0x65,   0x33,   0x2,    0xa1,   0x8,    0x3,    0x2,    0x2,
+        0x2,   0xa2,   0xa3,   0x5,    0x83,   0x42,   0x2,    0xa3,   0xa4,
+        0x5,   0x6d,   0x37,   0x2,    0xa4,   0xa5,   0x5,    0x75,   0x3b,
+        0x2,   0xa5,   0xa6,   0x5,    0x65,   0x33,   0x2,    0xa6,   0xa7,
+        0x5,   0x81,   0x41,   0x2,    0xa7,   0xa8,   0x5,    0x83,   0x42,
+        0x2,   0xa8,   0xa9,   0x5,    0x5d,   0x2f,   0x2,    0xa9,   0xaa,
+        0x5,   0x75,   0x3b,   0x2,    0xaa,   0xab,   0x5,    0x7b,   0x3e,
+        0x2,   0xab,   0xa,    0x3,    0x2,    0x2,    0x2,    0xac,   0xad,
+        0x7,   0x2f,   0x2,    0x2,    0xad,   0xc,    0x3,    0x2,    0x2,
+        0x2,   0xae,   0xaf,   0x7,    0x2d,   0x2,    0x2,    0xaf,   0xe,
+        0x3,   0x2,    0x2,    0x2,    0xb0,   0xb1,   0x7,    0x31,   0x2,
+        0x2,   0xb1,   0x10,   0x3,    0x2,    0x2,    0x2,    0xb2,   0xb3,
+        0x7,   0x27,   0x2,    0x2,    0xb3,   0x12,   0x3,    0x2,    0x2,
+        0x2,   0xb4,   0xb5,   0x7,    0x3f,   0x2,    0x2,    0xb5,   0xb6,
+        0x7,   0x3f,   0x2,    0x2,    0xb6,   0x14,   0x3,    0x2,    0x2,
+        0x2,   0xb7,   0xb8,   0x7,    0x3f,   0x2,    0x2,    0xb8,   0x16,
+        0x3,   0x2,    0x2,    0x2,    0xb9,   0xba,   0x7,    0x40,   0x2,
+        0x2,   0xba,   0x18,   0x3,    0x2,    0x2,    0x2,    0xbb,   0xbc,
+        0x7,   0x40,   0x2,    0x2,    0xbc,   0xbd,   0x7,    0x3f,   0x2,
+        0x2,   0xbd,   0x1a,   0x3,    0x2,    0x2,    0x2,    0xbe,   0xbf,
+        0x7,   0x3e,   0x2,    0x2,    0xbf,   0x1c,   0x3,    0x2,    0x2,
+        0x2,   0xc0,   0xc1,   0x7,    0x3e,   0x2,    0x2,    0xc1,   0xc2,
+        0x7,   0x3f,   0x2,    0x2,    0xc2,   0x1e,   0x3,    0x2,    0x2,
+        0x2,   0xc3,   0xc4,   0x7,    0x23,   0x2,    0x2,    0xc4,   0xc8,
+        0x7,   0x3f,   0x2,    0x2,    0xc5,   0xc6,   0x7,    0x3e,   0x2,
+        0x2,   0xc6,   0xc8,   0x7,    0x40,   0x2,    0x2,    0xc7,   0xc3,
+        0x3,   0x2,    0x2,    0x2,    0xc7,   0xc5,   0x3,    0x2,    0x2,
+        0x2,   0xc8,   0x20,   0x3,    0x2,    0x2,    0x2,    0xc9,   0xca,
+        0x7,   0x28,   0x2,    0x2,    0xca,   0x22,   0x3,    0x2,    0x2,
+        0x2,   0xcb,   0xcc,   0x7,    0x28,   0x2,    0x2,    0xcc,   0xcd,
+        0x7,   0x28,   0x2,    0x2,    0xcd,   0x24,   0x3,    0x2,    0x2,
+        0x2,   0xce,   0xcf,   0x7,    0x7e,   0x2,    0x2,    0xcf,   0x26,
+        0x3,   0x2,    0x2,    0x2,    0xd0,   0xd1,   0x7,    0x7e,   0x2,
+        0x2,   0xd1,   0xd2,   0x7,    0x7e,   0x2,    0x2,    0xd2,   0x28,
+        0x3,   0x2,    0x2,    0x2,    0xd3,   0xd4,   0x7,    0x23,   0x2,
+        0x2,   0xd4,   0x2a,   0x3,    0x2,    0x2,    0x2,    0xd5,   0xd6,
+        0x7,   0x30,   0x2,    0x2,    0xd6,   0x2c,   0x3,    0x2,    0x2,
+        0x2,   0xd7,   0xd8,   0x7,    0x2e,   0x2,    0x2,    0xd8,   0x2e,
+        0x3,   0x2,    0x2,    0x2,    0xd9,   0xda,   0x7,    0x3d,   0x2,
+        0x2,   0xda,   0x30,   0x3,    0x2,    0x2,    0x2,    0xdb,   0xdc,
+        0x7,   0x2c,   0x2,    0x2,    0xdc,   0x32,   0x3,    0x2,    0x2,
+        0x2,   0xdd,   0xde,   0x7,    0x2c,   0x2,    0x2,    0xde,   0xdf,
+        0x7,   0x2c,   0x2,    0x2,    0xdf,   0x34,   0x3,    0x2,    0x2,
+        0x2,   0xe0,   0xe1,   0x7,    0x2a,   0x2,    0x2,    0xe1,   0x36,
+        0x3,   0x2,    0x2,    0x2,    0xe2,   0xe3,   0x7,    0x2b,   0x2,
+        0x2,   0xe3,   0x38,   0x3,    0x2,    0x2,    0x2,    0xe4,   0xe5,
+        0x7,   0x5d,   0x2,    0x2,    0xe5,   0x3a,   0x3,    0x2,    0x2,
+        0x2,   0xe6,   0xe7,   0x7,    0x5f,   0x2,    0x2,    0xe7,   0x3c,
+        0x3,   0x2,    0x2,    0x2,    0xe8,   0xe9,   0x7,    0x3c,   0x2,
+        0x2,   0xe9,   0xea,   0x7,    0x3c,   0x2,    0x2,    0xea,   0x3e,
+        0x3,   0x2,    0x2,    0x2,    0xeb,   0xee,   0x5,    0x57,   0x2c,
+        0x2,   0xec,   0xee,   0x5,    0x59,   0x2d,   0x2,    0xed,   0xeb,
+        0x3,   0x2,    0x2,    0x2,    0xed,   0xec,   0x3,    0x2,    0x2,
+        0x2,   0xee,   0x40,   0x3,    0x2,    0x2,    0x2,    0xef,   0xf1,
+        0x5,   0x49,   0x25,   0x2,    0xf0,   0xef,   0x3,    0x2,    0x2,
+        0x2,   0xf1,   0xf2,   0x3,    0x2,    0x2,    0x2,    0xf2,   0xf0,
+        0x3,   0x2,    0x2,    0x2,    0xf2,   0xf3,   0x3,    0x2,    0x2,
+        0x2,   0xf3,   0x106,  0x3,    0x2,    0x2,    0x2,    0xf4,   0x107,
+        0x5,   0x8d,   0x47,   0x2,    0xf5,   0xf6,   0x5,    0x75,   0x3b,
+        0x2,   0xf6,   0xf7,   0x5,    0x79,   0x3d,   0x2,    0xf7,   0x107,
+        0x3,   0x2,    0x2,    0x2,    0xf8,   0x107,  0x5,    0x89,   0x45,
+        0x2,   0xf9,   0x107,  0x5,    0x63,   0x32,   0x2,    0xfa,   0x107,
+        0x5,   0x6b,   0x36,   0x2,    0xfb,   0x107,  0x5,    0x75,   0x3b,
+        0x2,   0xfc,   0x107,  0x5,    0x81,   0x41,   0x2,    0xfd,   0xfe,
+        0x5,   0x75,   0x3b,   0x2,    0xfe,   0xff,   0x5,    0x81,   0x41,
+        0x2,   0xff,   0x107,  0x3,    0x2,    0x2,    0x2,    0x100,  0x101,
+        0x5,   0x85,   0x43,   0x2,    0x101,  0x102,  0x5,    0x81,   0x41,
+        0x2,   0x102,  0x107,  0x3,    0x2,    0x2,    0x2,    0x103,  0x104,
+        0x5,   0x77,   0x3c,   0x2,    0x104,  0x105,  0x5,    0x81,   0x41,
+        0x2,   0x105,  0x107,  0x3,    0x2,    0x2,    0x2,    0x106,  0xf4,
+        0x3,   0x2,    0x2,    0x2,    0x106,  0xf5,   0x3,    0x2,    0x2,
+        0x2,   0x106,  0xf8,   0x3,    0x2,    0x2,    0x2,    0x106,  0xf9,
+        0x3,   0x2,    0x2,    0x2,    0x106,  0xfa,   0x3,    0x2,    0x2,
+        0x2,   0x106,  0xfb,   0x3,    0x2,    0x2,    0x2,    0x106,  0xfc,
+        0x3,   0x2,    0x2,    0x2,    0x106,  0xfd,   0x3,    0x2,    0x2,
+        0x2,   0x106,  0x100,  0x3,    0x2,    0x2,    0x2,    0x106,  0x103,
+        0x3,   0x2,    0x2,    0x2,    0x107,  0x109,  0x3,    0x2,    0x2,
+        0x2,   0x108,  0xf0,   0x3,    0x2,    0x2,    0x2,    0x109,  0x10a,
+        0x3,   0x2,    0x2,    0x2,    0x10a,  0x108,  0x3,    0x2,    0x2,
+        0x2,   0x10a,  0x10b,  0x3,    0x2,    0x2,    0x2,    0x10b,  0x42,
+        0x3,   0x2,    0x2,    0x2,    0x10c,  0x119,  0x5,    0x45,   0x23,
+        0x2,   0x10d,  0x110,  0x5,    0x83,   0x42,   0x2,    0x10e,  0x110,
+        0x5,   0x5,    0x3,    0x2,    0x10f,  0x10d,  0x3,    0x2,    0x2,
+        0x2,   0x10f,  0x10e,  0x3,    0x2,    0x2,    0x2,    0x110,  0x111,
+        0x3,   0x2,    0x2,    0x2,    0x111,  0x117,  0x5,    0x47,   0x24,
+        0x2,   0x112,  0x113,  0x9,    0x3,    0x2,    0x2,    0x113,  0x114,
+        0x5,   0x49,   0x25,   0x2,    0x114,  0x115,  0x7,    0x3c,   0x2,
+        0x2,   0x115,  0x116,  0x5,    0x49,   0x25,   0x2,    0x116,  0x118,
+        0x3,   0x2,    0x2,    0x2,    0x117,  0x112,  0x3,    0x2,    0x2,
+        0x2,   0x117,  0x118,  0x3,    0x2,    0x2,    0x2,    0x118,  0x11a,
+        0x3,   0x2,    0x2,    0x2,    0x119,  0x10f,  0x3,    0x2,    0x2,
+        0x2,   0x119,  0x11a,  0x3,    0x2,    0x2,    0x2,    0x11a,  0x44,
+        0x3,   0x2,    0x2,    0x2,    0x11b,  0x11c,  0x5,    0x49,   0x25,
+        0x2,   0x11c,  0x11d,  0x7,    0x2f,   0x2,    0x2,    0x11d,  0x11e,
+        0x5,   0x49,   0x25,   0x2,    0x11e,  0x11f,  0x7,    0x2f,   0x2,
+        0x2,   0x11f,  0x120,  0x5,    0x49,   0x25,   0x2,    0x120,  0x12e,
+        0x3,   0x2,    0x2,    0x2,    0x121,  0x122,  0x5,    0x49,   0x25,
+        0x2,   0x122,  0x123,  0x7,    0x31,   0x2,    0x2,    0x123,  0x124,
+        0x5,   0x49,   0x25,   0x2,    0x124,  0x125,  0x7,    0x31,   0x2,
+        0x2,   0x125,  0x126,  0x5,    0x49,   0x25,   0x2,    0x126,  0x12e,
+        0x3,   0x2,    0x2,    0x2,    0x127,  0x128,  0x5,    0x49,   0x25,
+        0x2,   0x128,  0x129,  0x7,    0x30,   0x2,    0x2,    0x129,  0x12a,
+        0x5,   0x49,   0x25,   0x2,    0x12a,  0x12b,  0x7,    0x30,   0x2,
+        0x2,   0x12b,  0x12c,  0x5,    0x49,   0x25,   0x2,    0x12c,  0x12e,
+        0x3,   0x2,    0x2,    0x2,    0x12d,  0x11b,  0x3,    0x2,    0x2,
+        0x2,   0x12d,  0x121,  0x3,    0x2,    0x2,    0x2,    0x12d,  0x127,
+        0x3,   0x2,    0x2,    0x2,    0x12e,  0x46,   0x3,    0x2,    0x2,
+        0x2,   0x12f,  0x130,  0x5,    0x49,   0x25,   0x2,    0x130,  0x131,
+        0x7,   0x3c,   0x2,    0x2,    0x131,  0x132,  0x5,    0x49,   0x25,
+        0x2,   0x132,  0x133,  0x7,    0x3c,   0x2,    0x2,    0x133,  0x137,
+        0x5,   0x49,   0x25,   0x2,    0x134,  0x135,  0x5,    0x2b,   0x16,
+        0x2,   0x135,  0x136,  0x5,    0x49,   0x25,   0x2,    0x136,  0x138,
+        0x3,   0x2,    0x2,    0x2,    0x137,  0x134,  0x3,    0x2,    0x2,
+        0x2,   0x137,  0x138,  0x3,    0x2,    0x2,    0x2,    0x138,  0x48,
+        0x3,   0x2,    0x2,    0x2,    0x139,  0x13b,  0x5,    0x4d,   0x27,
+        0x2,   0x13a,  0x139,  0x3,    0x2,    0x2,    0x2,    0x13b,  0x13c,
+        0x3,   0x2,    0x2,    0x2,    0x13c,  0x13a,  0x3,    0x2,    0x2,
+        0x2,   0x13c,  0x13d,  0x3,    0x2,    0x2,    0x2,    0x13d,  0x4a,
+        0x3,   0x2,    0x2,    0x2,    0x13e,  0x140,  0x5,    0x4d,   0x27,
+        0x2,   0x13f,  0x13e,  0x3,    0x2,    0x2,    0x2,    0x140,  0x141,
+        0x3,   0x2,    0x2,    0x2,    0x141,  0x13f,  0x3,    0x2,    0x2,
+        0x2,   0x141,  0x142,  0x3,    0x2,    0x2,    0x2,    0x142,  0x143,
+        0x3,   0x2,    0x2,    0x2,    0x143,  0x145,  0x9,    0x4,    0x2,
+        0x2,   0x144,  0x146,  0x9,    0x3,    0x2,    0x2,    0x145,  0x144,
+        0x3,   0x2,    0x2,    0x2,    0x145,  0x146,  0x3,    0x2,    0x2,
+        0x2,   0x146,  0x148,  0x3,    0x2,    0x2,    0x2,    0x147,  0x149,
+        0x5,   0x4d,   0x27,   0x2,    0x148,  0x147,  0x3,    0x2,    0x2,
+        0x2,   0x149,  0x14a,  0x3,    0x2,    0x2,    0x2,    0x14a,  0x148,
+        0x3,   0x2,    0x2,    0x2,    0x14a,  0x14b,  0x3,    0x2,    0x2,
+        0x2,   0x14b,  0x4c,   0x3,    0x2,    0x2,    0x2,    0x14c,  0x14d,
+        0x9,   0x5,    0x2,    0x2,    0x14d,  0x4e,   0x3,    0x2,    0x2,
+        0x2,   0x14e,  0x150,  0x5,    0x53,   0x2a,   0x2,    0x14f,  0x14e,
+        0x3,   0x2,    0x2,    0x2,    0x150,  0x151,  0x3,    0x2,    0x2,
+        0x2,   0x151,  0x14f,  0x3,    0x2,    0x2,    0x2,    0x151,  0x152,
+        0x3,   0x2,    0x2,    0x2,    0x152,  0x50,   0x3,    0x2,    0x2,
+        0x2,   0x153,  0x154,  0x5,    0x5b,   0x2e,   0x2,    0x154,  0x52,
+        0x3,   0x2,    0x2,    0x2,    0x155,  0x158,  0x9,    0x6,    0x2,
+        0x2,   0x156,  0x158,  0x5,    0x55,   0x2b,   0x2,    0x157,  0x155,
+        0x3,   0x2,    0x2,    0x2,    0x157,  0x156,  0x3,    0x2,    0x2,
+        0x2,   0x158,  0x54,   0x3,    0x2,    0x2,    0x2,    0x159,  0x15a,
+        0x4,   0x2e82, 0xa001, 0x2,    0x15a,  0x56,   0x3,    0x2,    0x2,
+        0x2,   0x15b,  0x161,  0x7,    0x24,   0x2,    0x2,    0x15c,  0x15d,
+        0x7,   0x24,   0x2,    0x2,    0x15d,  0x160,  0x7,    0x24,   0x2,
+        0x2,   0x15e,  0x160,  0xa,    0x7,    0x2,    0x2,    0x15f,  0x15c,
+        0x3,   0x2,    0x2,    0x2,    0x15f,  0x15e,  0x3,    0x2,    0x2,
+        0x2,   0x160,  0x163,  0x3,    0x2,    0x2,    0x2,    0x161,  0x15f,
+        0x3,   0x2,    0x2,    0x2,    0x161,  0x162,  0x3,    0x2,    0x2,
+        0x2,   0x162,  0x164,  0x3,    0x2,    0x2,    0x2,    0x163,  0x161,
+        0x3,   0x2,    0x2,    0x2,    0x164,  0x165,  0x7,    0x24,   0x2,
+        0x2,   0x165,  0x58,   0x3,    0x2,    0x2,    0x2,    0x166,  0x16c,
+        0x7,   0x29,   0x2,    0x2,    0x167,  0x168,  0x7,    0x29,   0x2,
+        0x2,   0x168,  0x16b,  0x7,    0x29,   0x2,    0x2,    0x169,  0x16b,
+        0xa,   0x8,    0x2,    0x2,    0x16a,  0x167,  0x3,    0x2,    0x2,
+        0x2,   0x16a,  0x169,  0x3,    0x2,    0x2,    0x2,    0x16b,  0x16e,
+        0x3,   0x2,    0x2,    0x2,    0x16c,  0x16a,  0x3,    0x2,    0x2,
+        0x2,   0x16c,  0x16d,  0x3,    0x2,    0x2,    0x2,    0x16d,  0x16f,
+        0x3,   0x2,    0x2,    0x2,    0x16e,  0x16c,  0x3,    0x2,    0x2,
+        0x2,   0x16f,  0x170,  0x7,    0x29,   0x2,    0x2,    0x170,  0x5a,
+        0x3,   0x2,    0x2,    0x2,    0x171,  0x177,  0x7,    0x62,   0x2,
+        0x2,   0x172,  0x173,  0x7,    0x62,   0x2,    0x2,    0x173,  0x176,
+        0x7,   0x62,   0x2,    0x2,    0x174,  0x176,  0xa,    0x9,    0x2,
+        0x2,   0x175,  0x172,  0x3,    0x2,    0x2,    0x2,    0x175,  0x174,
+        0x3,   0x2,    0x2,    0x2,    0x176,  0x179,  0x3,    0x2,    0x2,
+        0x2,   0x177,  0x175,  0x3,    0x2,    0x2,    0x2,    0x177,  0x178,
+        0x3,   0x2,    0x2,    0x2,    0x178,  0x17a,  0x3,    0x2,    0x2,
+        0x2,   0x179,  0x177,  0x3,    0x2,    0x2,    0x2,    0x17a,  0x17b,
+        0x7,   0x62,   0x2,    0x2,    0x17b,  0x5c,   0x3,    0x2,    0x2,
+        0x2,   0x17c,  0x17d,  0x9,    0xa,    0x2,    0x2,    0x17d,  0x5e,
+        0x3,   0x2,    0x2,    0x2,    0x17e,  0x17f,  0x9,    0xb,    0x2,
+        0x2,   0x17f,  0x60,   0x3,    0x2,    0x2,    0x2,    0x180,  0x181,
+        0x9,   0xc,    0x2,    0x2,    0x181,  0x62,   0x3,    0x2,    0x2,
+        0x2,   0x182,  0x183,  0x9,    0xd,    0x2,    0x2,    0x183,  0x64,
+        0x3,   0x2,    0x2,    0x2,    0x184,  0x185,  0x9,    0x4,    0x2,
+        0x2,   0x185,  0x66,   0x3,    0x2,    0x2,    0x2,    0x186,  0x187,
+        0x9,   0xe,    0x2,    0x2,    0x187,  0x68,   0x3,    0x2,    0x2,
+        0x2,   0x188,  0x189,  0x9,    0xf,    0x2,    0x2,    0x189,  0x6a,
+        0x3,   0x2,    0x2,    0x2,    0x18a,  0x18b,  0x9,    0x10,   0x2,
+        0x2,   0x18b,  0x6c,   0x3,    0x2,    0x2,    0x2,    0x18c,  0x18d,
+        0x9,   0x11,   0x2,    0x2,    0x18d,  0x6e,   0x3,    0x2,    0x2,
+        0x2,   0x18e,  0x18f,  0x9,    0x12,   0x2,    0x2,    0x18f,  0x70,
+        0x3,   0x2,    0x2,    0x2,    0x190,  0x191,  0x9,    0x13,   0x2,
+        0x2,   0x191,  0x72,   0x3,    0x2,    0x2,    0x2,    0x192,  0x193,
+        0x9,   0x14,   0x2,    0x2,    0x193,  0x74,   0x3,    0x2,    0x2,
+        0x2,   0x194,  0x195,  0x9,    0x15,   0x2,    0x2,    0x195,  0x76,
+        0x3,   0x2,    0x2,    0x2,    0x196,  0x197,  0x9,    0x16,   0x2,
+        0x2,   0x197,  0x78,   0x3,    0x2,    0x2,    0x2,    0x198,  0x199,
+        0x9,   0x17,   0x2,    0x2,    0x199,  0x7a,   0x3,    0x2,    0x2,
+        0x2,   0x19a,  0x19b,  0x9,    0x18,   0x2,    0x2,    0x19b,  0x7c,
+        0x3,   0x2,    0x2,    0x2,    0x19c,  0x19d,  0x9,    0x19,   0x2,
+        0x2,   0x19d,  0x7e,   0x3,    0x2,    0x2,    0x2,    0x19e,  0x19f,
+        0x9,   0x1a,   0x2,    0x2,    0x19f,  0x80,   0x3,    0x2,    0x2,
+        0x2,   0x1a0,  0x1a1,  0x9,    0x1b,   0x2,    0x2,    0x1a1,  0x82,
+        0x3,   0x2,    0x2,    0x2,    0x1a2,  0x1a3,  0x9,    0x1c,   0x2,
+        0x2,   0x1a3,  0x84,   0x3,    0x2,    0x2,    0x2,    0x1a4,  0x1a5,
+        0x9,   0x1d,   0x2,    0x2,    0x1a5,  0x86,   0x3,    0x2,    0x2,
+        0x2,   0x1a6,  0x1a7,  0x9,    0x1e,   0x2,    0x2,    0x1a7,  0x88,
+        0x3,   0x2,    0x2,    0x2,    0x1a8,  0x1a9,  0x9,    0x1f,   0x2,
+        0x2,   0x1a9,  0x8a,   0x3,    0x2,    0x2,    0x2,    0x1aa,  0x1ab,
+        0x9,   0x20,   0x2,    0x2,    0x1ab,  0x8c,   0x3,    0x2,    0x2,
+        0x2,   0x1ac,  0x1ad,  0x9,    0x21,   0x2,    0x2,    0x1ad,  0x8e,
+        0x3,   0x2,    0x2,    0x2,    0x1ae,  0x1af,  0x9,    0x22,   0x2,
+        0x2,   0x1af,  0x90,   0x3,    0x2,    0x2,    0x2,    0x1a,   0x2,
+        0x99,  0xc7,   0xed,   0xf2,   0x106,  0x10a,  0x10f,  0x117,  0x119,
+        0x12d, 0x137,  0x13c,  0x141,  0x145,  0x14a,  0x151,  0x157,  0x15f,
+        0x161, 0x16a,  0x16c,  0x175,  0x177,  0x3,    0x2,    0x3,    0x2,
+    };
 
-  _serializedATN.insert(_serializedATN.end(), serializedATNSegment0,
-    serializedATNSegment0 + sizeof(serializedATNSegment0) / sizeof(serializedATNSegment0[0]));
+    _serializedATN.insert(
+        _serializedATN.end(), serializedATNSegment0,
+        serializedATNSegment0 +
+            sizeof(serializedATNSegment0) / sizeof(serializedATNSegment0[0]));
 
+    atn::ATNDeserializer deserializer;
+    _atn = deserializer.deserialize(_serializedATN);
 
-  atn::ATNDeserializer deserializer;
-  _atn = deserializer.deserialize(_serializedATN);
-
-  size_t count = _atn.getNumberOfDecisions();
-  _decisionToDFA.reserve(count);
-  for (size_t i = 0; i < count; i++) { 
-    _decisionToDFA.emplace_back(_atn.getDecisionState(i), i);
-  }
+    size_t count = _atn.getNumberOfDecisions();
+    _decisionToDFA.reserve(count);
+    for (size_t i = 0; i < count; i++) {
+        _decisionToDFA.emplace_back(_atn.getDecisionState(i), i);
+    }
 }
 
 PathLexer::Initializer PathLexer::_init;
diff --git a/cpp/src/parser/generated/PathLexer.h b/cpp/src/parser/generated/PathLexer.h
index 058ac66..e5c41cf 100644
--- a/cpp/src/parser/generated/PathLexer.h
+++ b/cpp/src/parser/generated/PathLexer.h
@@ -20,61 +20,85 @@
 
 #pragma once
 
-
 #include "antlr4-runtime.h"
 
+class PathLexer : public antlr4::Lexer {
+   public:
+    enum {
+        ROOT = 1,
+        WS = 2,
+        TIME = 3,
+        TIMESTAMP = 4,
+        MINUS = 5,
+        PLUS = 6,
+        DIV = 7,
+        MOD = 8,
+        OPERATOR_DEQ = 9,
+        OPERATOR_SEQ = 10,
+        OPERATOR_GT = 11,
+        OPERATOR_GTE = 12,
+        OPERATOR_LT = 13,
+        OPERATOR_LTE = 14,
+        OPERATOR_NEQ = 15,
+        OPERATOR_BITWISE_AND = 16,
+        OPERATOR_LOGICAL_AND = 17,
+        OPERATOR_BITWISE_OR = 18,
+        OPERATOR_LOGICAL_OR = 19,
+        OPERATOR_NOT = 20,
+        DOT = 21,
+        COMMA = 22,
+        SEMI = 23,
+        STAR = 24,
+        DOUBLE_STAR = 25,
+        LR_BRACKET = 26,
+        RR_BRACKET = 27,
+        LS_BRACKET = 28,
+        RS_BRACKET = 29,
+        DOUBLE_COLON = 30,
+        STRING_LITERAL = 31,
+        DURATION_LITERAL = 32,
+        DATETIME_LITERAL = 33,
+        INTEGER_LITERAL = 34,
+        EXPONENT_NUM_PART = 35,
+        ID = 36,
+        QUOTED_ID = 37
+    };
 
+    explicit PathLexer(antlr4::CharStream* input);
+    ~PathLexer();
 
+    virtual std::string getGrammarFileName() const override;
+    virtual const std::vector<std::string>& getRuleNames() const override;
 
-class  PathLexer : public antlr4::Lexer {
-public:
-  enum {
-    ROOT = 1, WS = 2, TIME = 3, TIMESTAMP = 4, MINUS = 5, PLUS = 6, DIV = 7, 
-    MOD = 8, OPERATOR_DEQ = 9, OPERATOR_SEQ = 10, OPERATOR_GT = 11, OPERATOR_GTE = 12, 
-    OPERATOR_LT = 13, OPERATOR_LTE = 14, OPERATOR_NEQ = 15, OPERATOR_BITWISE_AND = 16, 
-    OPERATOR_LOGICAL_AND = 17, OPERATOR_BITWISE_OR = 18, OPERATOR_LOGICAL_OR = 19, 
-    OPERATOR_NOT = 20, DOT = 21, COMMA = 22, SEMI = 23, STAR = 24, DOUBLE_STAR = 25, 
-    LR_BRACKET = 26, RR_BRACKET = 27, LS_BRACKET = 28, RS_BRACKET = 29, 
-    DOUBLE_COLON = 30, STRING_LITERAL = 31, DURATION_LITERAL = 32, DATETIME_LITERAL = 33, 
-    INTEGER_LITERAL = 34, EXPONENT_NUM_PART = 35, ID = 36, QUOTED_ID = 37
-  };
+    virtual const std::vector<std::string>& getChannelNames() const override;
+    virtual const std::vector<std::string>& getModeNames() const override;
+    virtual const std::vector<std::string>& getTokenNames()
+        const override;  // deprecated, use vocabulary instead
+    virtual antlr4::dfa::Vocabulary& getVocabulary() const override;
 
-  explicit PathLexer(antlr4::CharStream *input);
-  ~PathLexer();
+    virtual const std::vector<uint16_t> getSerializedATN() const override;
+    virtual const antlr4::atn::ATN& getATN() const override;
 
-  virtual std::string getGrammarFileName() const override;
-  virtual const std::vector<std::string>& getRuleNames() const override;
+   private:
+    static std::vector<antlr4::dfa::DFA> _decisionToDFA;
+    static antlr4::atn::PredictionContextCache _sharedContextCache;
+    static std::vector<std::string> _ruleNames;
+    static std::vector<std::string> _tokenNames;
+    static std::vector<std::string> _channelNames;
+    static std::vector<std::string> _modeNames;
 
-  virtual const std::vector<std::string>& getChannelNames() const override;
-  virtual const std::vector<std::string>& getModeNames() const override;
-  virtual const std::vector<std::string>& getTokenNames() const override; // deprecated, use vocabulary instead
-  virtual antlr4::dfa::Vocabulary& getVocabulary() const override;
+    static std::vector<std::string> _literalNames;
+    static std::vector<std::string> _symbolicNames;
+    static antlr4::dfa::Vocabulary _vocabulary;
+    static antlr4::atn::ATN _atn;
+    static std::vector<uint16_t> _serializedATN;
 
-  virtual const std::vector<uint16_t> getSerializedATN() const override;
-  virtual const antlr4::atn::ATN& getATN() const override;
+    // Individual action functions triggered by action() above.
 
-private:
-  static std::vector<antlr4::dfa::DFA> _decisionToDFA;
-  static antlr4::atn::PredictionContextCache _sharedContextCache;
-  static std::vector<std::string> _ruleNames;
-  static std::vector<std::string> _tokenNames;
-  static std::vector<std::string> _channelNames;
-  static std::vector<std::string> _modeNames;
+    // Individual semantic predicate functions triggered by sempred() above.
 
-  static std::vector<std::string> _literalNames;
-  static std::vector<std::string> _symbolicNames;
-  static antlr4::dfa::Vocabulary _vocabulary;
-  static antlr4::atn::ATN _atn;
-  static std::vector<uint16_t> _serializedATN;
-
-
-  // Individual action functions triggered by action() above.
-
-  // Individual semantic predicate functions triggered by sempred() above.
-
-  struct Initializer {
-    Initializer();
-  };
-  static Initializer _init;
+    struct Initializer {
+        Initializer();
+    };
+    static Initializer _init;
 };
-
diff --git a/cpp/src/parser/generated/PathParser.cpp b/cpp/src/parser/generated/PathParser.cpp
index 8b6ea11..6dc7a4b 100644
--- a/cpp/src/parser/generated/PathParser.cpp
+++ b/cpp/src/parser/generated/PathParser.cpp
@@ -18,725 +18,718 @@
  */
 // Generated from PathParser.g4 by ANTLR 4.9.3
 
+#include "PathParser.h"
 
 #include "PathParserListener.h"
 #include "PathParserVisitor.h"
 
-#include "PathParser.h"
-
-
 using namespace antlrcpp;
 using namespace antlr4;
 
 PathParser::PathParser(TokenStream *input) : Parser(input) {
-  _interpreter = new atn::ParserATNSimulator(this, _atn, _decisionToDFA, _sharedContextCache);
+    _interpreter = new atn::ParserATNSimulator(this, _atn, _decisionToDFA,
+                                               _sharedContextCache);
 }
 
-PathParser::~PathParser() {
-  delete _interpreter;
+PathParser::~PathParser() { delete _interpreter; }
+
+std::string PathParser::getGrammarFileName() const { return "PathParser.g4"; }
+
+const std::vector<std::string> &PathParser::getRuleNames() const {
+    return _ruleNames;
 }
 
-std::string PathParser::getGrammarFileName() const {
-  return "PathParser.g4";
+dfa::Vocabulary &PathParser::getVocabulary() const { return _vocabulary; }
+
+//----------------- PathContext
+//------------------------------------------------------------------
+
+PathParser::PathContext::PathContext(ParserRuleContext *parent,
+                                     size_t invokingState)
+    : ParserRuleContext(parent, invokingState) {}
+
+PathParser::PrefixPathContext *PathParser::PathContext::prefixPath() {
+    return getRuleContext<PathParser::PrefixPathContext>(0);
 }
 
-const std::vector<std::string>& PathParser::getRuleNames() const {
-  return _ruleNames;
+tree::TerminalNode *PathParser::PathContext::EOF() {
+    return getToken(PathParser::EOF, 0);
 }
 
-dfa::Vocabulary& PathParser::getVocabulary() const {
-  return _vocabulary;
+PathParser::SuffixPathContext *PathParser::PathContext::suffixPath() {
+    return getRuleContext<PathParser::SuffixPathContext>(0);
 }
 
-
-//----------------- PathContext ------------------------------------------------------------------
-
-PathParser::PathContext::PathContext(ParserRuleContext *parent, size_t invokingState)
-  : ParserRuleContext(parent, invokingState) {
-}
-
-PathParser::PrefixPathContext* PathParser::PathContext::prefixPath() {
-  return getRuleContext<PathParser::PrefixPathContext>(0);
-}
-
-tree::TerminalNode* PathParser::PathContext::EOF() {
-  return getToken(PathParser::EOF, 0);
-}
-
-PathParser::SuffixPathContext* PathParser::PathContext::suffixPath() {
-  return getRuleContext<PathParser::SuffixPathContext>(0);
-}
-
-
 size_t PathParser::PathContext::getRuleIndex() const {
-  return PathParser::RulePath;
+    return PathParser::RulePath;
 }
 
 void PathParser::PathContext::enterRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->enterPath(this);
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->enterPath(this);
 }
 
 void PathParser::PathContext::exitRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->exitPath(this);
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->exitPath(this);
 }
 
-
 antlrcpp::Any PathParser::PathContext::accept(tree::ParseTreeVisitor *visitor) {
-  if (auto parserVisitor = dynamic_cast<PathParserVisitor*>(visitor))
-    return parserVisitor->visitPath(this);
-  else
-    return visitor->visitChildren(this);
+    if (auto parserVisitor = dynamic_cast<PathParserVisitor *>(visitor))
+        return parserVisitor->visitPath(this);
+    else
+        return visitor->visitChildren(this);
 }
 
-PathParser::PathContext* PathParser::path() {
-  PathContext *_localctx = _tracker.createInstance<PathContext>(_ctx, getState());
-  enterRule(_localctx, 0, PathParser::RulePath);
+PathParser::PathContext *PathParser::path() {
+    PathContext *_localctx =
+        _tracker.createInstance<PathContext>(_ctx, getState());
+    enterRule(_localctx, 0, PathParser::RulePath);
 
 #if __cplusplus > 201703L
-  auto onExit = finally([=, this] {
+    auto onExit = finally([=, this] {
 #else
-  auto onExit = finally([=] {
+    auto onExit = finally([=] {
 #endif
-    exitRule();
-  });
-  try {
-    setState(22);
-    _errHandler->sync(this);
-    switch (_input->LA(1)) {
-      case PathParser::ROOT: {
-        enterOuterAlt(_localctx, 1);
-        setState(16);
-        prefixPath();
-        setState(17);
-        match(PathParser::EOF);
-        break;
-      }
+        exitRule();
+    });
+    try {
+        setState(22);
+        _errHandler->sync(this);
+        switch (_input->LA(1)) {
+            case PathParser::ROOT: {
+                enterOuterAlt(_localctx, 1);
+                setState(16);
+                prefixPath();
+                setState(17);
+                match(PathParser::EOF);
+                break;
+            }
 
-      case PathParser::STAR:
-      case PathParser::DOUBLE_STAR:
-      case PathParser::DURATION_LITERAL:
-      case PathParser::INTEGER_LITERAL:
-      case PathParser::ID:
-      case PathParser::QUOTED_ID: {
-        enterOuterAlt(_localctx, 2);
-        setState(19);
-        suffixPath();
-        setState(20);
-        match(PathParser::EOF);
-        break;
-      }
+            case PathParser::STAR:
+            case PathParser::DOUBLE_STAR:
+            case PathParser::DURATION_LITERAL:
+            case PathParser::INTEGER_LITERAL:
+            case PathParser::ID:
+            case PathParser::QUOTED_ID: {
+                enterOuterAlt(_localctx, 2);
+                setState(19);
+                suffixPath();
+                setState(20);
+                match(PathParser::EOF);
+                break;
+            }
 
-    default:
-      throw NoViableAltException(this);
+            default:
+                throw NoViableAltException(this);
+        }
+
+    } catch (RecognitionException &e) {
+        _errHandler->reportError(this, e);
+        _localctx->exception = std::current_exception();
+        _errHandler->recover(this, _localctx->exception);
     }
-   
-  }
-  catch (RecognitionException &e) {
-    _errHandler->reportError(this, e);
-    _localctx->exception = std::current_exception();
-    _errHandler->recover(this, _localctx->exception);
-  }
 
-  return _localctx;
+    return _localctx;
 }
 
-//----------------- PrefixPathContext ------------------------------------------------------------------
+//----------------- PrefixPathContext
+//------------------------------------------------------------------
 
-PathParser::PrefixPathContext::PrefixPathContext(ParserRuleContext *parent, size_t invokingState)
-  : ParserRuleContext(parent, invokingState) {
-}
+PathParser::PrefixPathContext::PrefixPathContext(ParserRuleContext *parent,
+                                                 size_t invokingState)
+    : ParserRuleContext(parent, invokingState) {}
 
-tree::TerminalNode* PathParser::PrefixPathContext::ROOT() {
-  return getToken(PathParser::ROOT, 0);
+tree::TerminalNode *PathParser::PrefixPathContext::ROOT() {
+    return getToken(PathParser::ROOT, 0);
 }
 
 std::vector<tree::TerminalNode *> PathParser::PrefixPathContext::DOT() {
-  return getTokens(PathParser::DOT);
+    return getTokens(PathParser::DOT);
 }
 
-tree::TerminalNode* PathParser::PrefixPathContext::DOT(size_t i) {
-  return getToken(PathParser::DOT, i);
+tree::TerminalNode *PathParser::PrefixPathContext::DOT(size_t i) {
+    return getToken(PathParser::DOT, i);
 }
 
-std::vector<PathParser::NodeNameContext *> PathParser::PrefixPathContext::nodeName() {
-  return getRuleContexts<PathParser::NodeNameContext>();
+std::vector<PathParser::NodeNameContext *>
+PathParser::PrefixPathContext::nodeName() {
+    return getRuleContexts<PathParser::NodeNameContext>();
 }
 
-PathParser::NodeNameContext* PathParser::PrefixPathContext::nodeName(size_t i) {
-  return getRuleContext<PathParser::NodeNameContext>(i);
+PathParser::NodeNameContext *PathParser::PrefixPathContext::nodeName(size_t i) {
+    return getRuleContext<PathParser::NodeNameContext>(i);
 }
 
-
 size_t PathParser::PrefixPathContext::getRuleIndex() const {
-  return PathParser::RulePrefixPath;
+    return PathParser::RulePrefixPath;
 }
 
-void PathParser::PrefixPathContext::enterRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->enterPrefixPath(this);
+void PathParser::PrefixPathContext::enterRule(
+    tree::ParseTreeListener *listener) {
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->enterPrefixPath(this);
 }
 
-void PathParser::PrefixPathContext::exitRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->exitPrefixPath(this);
+void PathParser::PrefixPathContext::exitRule(
+    tree::ParseTreeListener *listener) {
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->exitPrefixPath(this);
 }
 
-
-antlrcpp::Any PathParser::PrefixPathContext::accept(tree::ParseTreeVisitor *visitor) {
-  if (auto parserVisitor = dynamic_cast<PathParserVisitor*>(visitor))
-    return parserVisitor->visitPrefixPath(this);
-  else
-    return visitor->visitChildren(this);
+antlrcpp::Any PathParser::PrefixPathContext::accept(
+    tree::ParseTreeVisitor *visitor) {
+    if (auto parserVisitor = dynamic_cast<PathParserVisitor *>(visitor))
+        return parserVisitor->visitPrefixPath(this);
+    else
+        return visitor->visitChildren(this);
 }
 
-PathParser::PrefixPathContext* PathParser::prefixPath() {
-  PrefixPathContext *_localctx = _tracker.createInstance<PrefixPathContext>(_ctx, getState());
-  enterRule(_localctx, 2, PathParser::RulePrefixPath);
-  size_t _la = 0;
+PathParser::PrefixPathContext *PathParser::prefixPath() {
+    PrefixPathContext *_localctx =
+        _tracker.createInstance<PrefixPathContext>(_ctx, getState());
+    enterRule(_localctx, 2, PathParser::RulePrefixPath);
+    size_t _la = 0;
 
 #if __cplusplus > 201703L
-  auto onExit = finally([=, this] {
+    auto onExit = finally([=, this] {
 #else
-  auto onExit = finally([=] {
+    auto onExit = finally([=] {
 #endif
-    exitRule();
-  });
-  try {
-    enterOuterAlt(_localctx, 1);
-    setState(24);
-    match(PathParser::ROOT);
-    setState(29);
-    _errHandler->sync(this);
-    _la = _input->LA(1);
-    while (_la == PathParser::DOT) {
-      setState(25);
-      match(PathParser::DOT);
-      setState(26);
-      nodeName();
-      setState(31);
-      _errHandler->sync(this);
-      _la = _input->LA(1);
+        exitRule();
+    });
+    try {
+        enterOuterAlt(_localctx, 1);
+        setState(24);
+        match(PathParser::ROOT);
+        setState(29);
+        _errHandler->sync(this);
+        _la = _input->LA(1);
+        while (_la == PathParser::DOT) {
+            setState(25);
+            match(PathParser::DOT);
+            setState(26);
+            nodeName();
+            setState(31);
+            _errHandler->sync(this);
+            _la = _input->LA(1);
+        }
+
+    } catch (RecognitionException &e) {
+        _errHandler->reportError(this, e);
+        _localctx->exception = std::current_exception();
+        _errHandler->recover(this, _localctx->exception);
     }
-   
-  }
-  catch (RecognitionException &e) {
-    _errHandler->reportError(this, e);
-    _localctx->exception = std::current_exception();
-    _errHandler->recover(this, _localctx->exception);
-  }
 
-  return _localctx;
+    return _localctx;
 }
 
-//----------------- SuffixPathContext ------------------------------------------------------------------
+//----------------- SuffixPathContext
+//------------------------------------------------------------------
 
-PathParser::SuffixPathContext::SuffixPathContext(ParserRuleContext *parent, size_t invokingState)
-  : ParserRuleContext(parent, invokingState) {
+PathParser::SuffixPathContext::SuffixPathContext(ParserRuleContext *parent,
+                                                 size_t invokingState)
+    : ParserRuleContext(parent, invokingState) {}
+
+std::vector<PathParser::NodeNameContext *>
+PathParser::SuffixPathContext::nodeName() {
+    return getRuleContexts<PathParser::NodeNameContext>();
 }
 
-std::vector<PathParser::NodeNameContext *> PathParser::SuffixPathContext::nodeName() {
-  return getRuleContexts<PathParser::NodeNameContext>();
-}
-
-PathParser::NodeNameContext* PathParser::SuffixPathContext::nodeName(size_t i) {
-  return getRuleContext<PathParser::NodeNameContext>(i);
+PathParser::NodeNameContext *PathParser::SuffixPathContext::nodeName(size_t i) {
+    return getRuleContext<PathParser::NodeNameContext>(i);
 }
 
 std::vector<tree::TerminalNode *> PathParser::SuffixPathContext::DOT() {
-  return getTokens(PathParser::DOT);
+    return getTokens(PathParser::DOT);
 }
 
-tree::TerminalNode* PathParser::SuffixPathContext::DOT(size_t i) {
-  return getToken(PathParser::DOT, i);
+tree::TerminalNode *PathParser::SuffixPathContext::DOT(size_t i) {
+    return getToken(PathParser::DOT, i);
 }
 
-
 size_t PathParser::SuffixPathContext::getRuleIndex() const {
-  return PathParser::RuleSuffixPath;
+    return PathParser::RuleSuffixPath;
 }
 
-void PathParser::SuffixPathContext::enterRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->enterSuffixPath(this);
+void PathParser::SuffixPathContext::enterRule(
+    tree::ParseTreeListener *listener) {
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->enterSuffixPath(this);
 }
 
-void PathParser::SuffixPathContext::exitRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->exitSuffixPath(this);
+void PathParser::SuffixPathContext::exitRule(
+    tree::ParseTreeListener *listener) {
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->exitSuffixPath(this);
 }
 
-
-antlrcpp::Any PathParser::SuffixPathContext::accept(tree::ParseTreeVisitor *visitor) {
-  if (auto parserVisitor = dynamic_cast<PathParserVisitor*>(visitor))
-    return parserVisitor->visitSuffixPath(this);
-  else
-    return visitor->visitChildren(this);
+antlrcpp::Any PathParser::SuffixPathContext::accept(
+    tree::ParseTreeVisitor *visitor) {
+    if (auto parserVisitor = dynamic_cast<PathParserVisitor *>(visitor))
+        return parserVisitor->visitSuffixPath(this);
+    else
+        return visitor->visitChildren(this);
 }
 
-PathParser::SuffixPathContext* PathParser::suffixPath() {
-  SuffixPathContext *_localctx = _tracker.createInstance<SuffixPathContext>(_ctx, getState());
-  enterRule(_localctx, 4, PathParser::RuleSuffixPath);
-  size_t _la = 0;
+PathParser::SuffixPathContext *PathParser::suffixPath() {
+    SuffixPathContext *_localctx =
+        _tracker.createInstance<SuffixPathContext>(_ctx, getState());
+    enterRule(_localctx, 4, PathParser::RuleSuffixPath);
+    size_t _la = 0;
 
 #if __cplusplus > 201703L
-  auto onExit = finally([=, this] {
+    auto onExit = finally([=, this] {
 #else
-  auto onExit = finally([=] {
+    auto onExit = finally([=] {
 #endif
-    exitRule();
-  });
-  try {
-    enterOuterAlt(_localctx, 1);
-    setState(32);
-    nodeName();
-    setState(37);
-    _errHandler->sync(this);
-    _la = _input->LA(1);
-    while (_la == PathParser::DOT) {
-      setState(33);
-      match(PathParser::DOT);
-      setState(34);
-      nodeName();
-      setState(39);
-      _errHandler->sync(this);
-      _la = _input->LA(1);
+        exitRule();
+    });
+    try {
+        enterOuterAlt(_localctx, 1);
+        setState(32);
+        nodeName();
+        setState(37);
+        _errHandler->sync(this);
+        _la = _input->LA(1);
+        while (_la == PathParser::DOT) {
+            setState(33);
+            match(PathParser::DOT);
+            setState(34);
+            nodeName();
+            setState(39);
+            _errHandler->sync(this);
+            _la = _input->LA(1);
+        }
+
+    } catch (RecognitionException &e) {
+        _errHandler->reportError(this, e);
+        _localctx->exception = std::current_exception();
+        _errHandler->recover(this, _localctx->exception);
     }
-   
-  }
-  catch (RecognitionException &e) {
-    _errHandler->reportError(this, e);
-    _localctx->exception = std::current_exception();
-    _errHandler->recover(this, _localctx->exception);
-  }
 
-  return _localctx;
+    return _localctx;
 }
 
-//----------------- NodeNameContext ------------------------------------------------------------------
+//----------------- NodeNameContext
+//------------------------------------------------------------------
 
-PathParser::NodeNameContext::NodeNameContext(ParserRuleContext *parent, size_t invokingState)
-  : ParserRuleContext(parent, invokingState) {
+PathParser::NodeNameContext::NodeNameContext(ParserRuleContext *parent,
+                                             size_t invokingState)
+    : ParserRuleContext(parent, invokingState) {}
+
+std::vector<PathParser::WildcardContext *>
+PathParser::NodeNameContext::wildcard() {
+    return getRuleContexts<PathParser::WildcardContext>();
 }
 
-std::vector<PathParser::WildcardContext *> PathParser::NodeNameContext::wildcard() {
-  return getRuleContexts<PathParser::WildcardContext>();
+PathParser::WildcardContext *PathParser::NodeNameContext::wildcard(size_t i) {
+    return getRuleContext<PathParser::WildcardContext>(i);
 }
 
-PathParser::WildcardContext* PathParser::NodeNameContext::wildcard(size_t i) {
-  return getRuleContext<PathParser::WildcardContext>(i);
+PathParser::NodeNameSliceContext *PathParser::NodeNameContext::nodeNameSlice() {
+    return getRuleContext<PathParser::NodeNameSliceContext>(0);
 }
 
-PathParser::NodeNameSliceContext* PathParser::NodeNameContext::nodeNameSlice() {
-  return getRuleContext<PathParser::NodeNameSliceContext>(0);
+PathParser::NodeNameWithoutWildcardContext *
+PathParser::NodeNameContext::nodeNameWithoutWildcard() {
+    return getRuleContext<PathParser::NodeNameWithoutWildcardContext>(0);
 }
 
-PathParser::NodeNameWithoutWildcardContext* PathParser::NodeNameContext::nodeNameWithoutWildcard() {
-  return getRuleContext<PathParser::NodeNameWithoutWildcardContext>(0);
-}
-
-
 size_t PathParser::NodeNameContext::getRuleIndex() const {
-  return PathParser::RuleNodeName;
+    return PathParser::RuleNodeName;
 }
 
 void PathParser::NodeNameContext::enterRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->enterNodeName(this);
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->enterNodeName(this);
 }
 
 void PathParser::NodeNameContext::exitRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->exitNodeName(this);
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->exitNodeName(this);
 }
 
-
-antlrcpp::Any PathParser::NodeNameContext::accept(tree::ParseTreeVisitor *visitor) {
-  if (auto parserVisitor = dynamic_cast<PathParserVisitor*>(visitor))
-    return parserVisitor->visitNodeName(this);
-  else
-    return visitor->visitChildren(this);
+antlrcpp::Any PathParser::NodeNameContext::accept(
+    tree::ParseTreeVisitor *visitor) {
+    if (auto parserVisitor = dynamic_cast<PathParserVisitor *>(visitor))
+        return parserVisitor->visitNodeName(this);
+    else
+        return visitor->visitChildren(this);
 }
 
-PathParser::NodeNameContext* PathParser::nodeName() {
-  NodeNameContext *_localctx = _tracker.createInstance<NodeNameContext>(_ctx, getState());
-  enterRule(_localctx, 6, PathParser::RuleNodeName);
-  size_t _la = 0;
+PathParser::NodeNameContext *PathParser::nodeName() {
+    NodeNameContext *_localctx =
+        _tracker.createInstance<NodeNameContext>(_ctx, getState());
+    enterRule(_localctx, 6, PathParser::RuleNodeName);
+    size_t _la = 0;
 
 #if __cplusplus > 201703L
-  auto onExit = finally([=, this] {
+    auto onExit = finally([=, this] {
 #else
-  auto onExit = finally([=] {
+    auto onExit = finally([=] {
 #endif
-    exitRule();
-  });
-  try {
-    setState(50);
-    _errHandler->sync(this);
-    switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(_input, 4, _ctx)) {
-    case 1: {
-      enterOuterAlt(_localctx, 1);
-      setState(40);
-      wildcard();
-      break;
+        exitRule();
+    });
+    try {
+        setState(50);
+        _errHandler->sync(this);
+        switch (getInterpreter<atn::ParserATNSimulator>()->adaptivePredict(
+            _input, 4, _ctx)) {
+            case 1: {
+                enterOuterAlt(_localctx, 1);
+                setState(40);
+                wildcard();
+                break;
+            }
+
+            case 2: {
+                enterOuterAlt(_localctx, 2);
+                setState(41);
+                wildcard();
+                setState(42);
+                nodeNameSlice();
+                setState(44);
+                _errHandler->sync(this);
+
+                _la = _input->LA(1);
+                if (_la == PathParser::STAR
+
+                    || _la == PathParser::DOUBLE_STAR) {
+                    setState(43);
+                    wildcard();
+                }
+                break;
+            }
+
+            case 3: {
+                enterOuterAlt(_localctx, 3);
+                setState(46);
+                nodeNameSlice();
+                setState(47);
+                wildcard();
+                break;
+            }
+
+            case 4: {
+                enterOuterAlt(_localctx, 4);
+                setState(49);
+                nodeNameWithoutWildcard();
+                break;
+            }
+
+            default:
+                break;
+        }
+
+    } catch (RecognitionException &e) {
+        _errHandler->reportError(this, e);
+        _localctx->exception = std::current_exception();
+        _errHandler->recover(this, _localctx->exception);
     }
 
-    case 2: {
-      enterOuterAlt(_localctx, 2);
-      setState(41);
-      wildcard();
-      setState(42);
-      nodeNameSlice();
-      setState(44);
-      _errHandler->sync(this);
-
-      _la = _input->LA(1);
-      if (_la == PathParser::STAR
-
-      || _la == PathParser::DOUBLE_STAR) {
-        setState(43);
-        wildcard();
-      }
-      break;
-    }
-
-    case 3: {
-      enterOuterAlt(_localctx, 3);
-      setState(46);
-      nodeNameSlice();
-      setState(47);
-      wildcard();
-      break;
-    }
-
-    case 4: {
-      enterOuterAlt(_localctx, 4);
-      setState(49);
-      nodeNameWithoutWildcard();
-      break;
-    }
-
-    default:
-      break;
-    }
-   
-  }
-  catch (RecognitionException &e) {
-    _errHandler->reportError(this, e);
-    _localctx->exception = std::current_exception();
-    _errHandler->recover(this, _localctx->exception);
-  }
-
-  return _localctx;
+    return _localctx;
 }
 
-//----------------- NodeNameWithoutWildcardContext ------------------------------------------------------------------
+//----------------- NodeNameWithoutWildcardContext
+//------------------------------------------------------------------
 
-PathParser::NodeNameWithoutWildcardContext::NodeNameWithoutWildcardContext(ParserRuleContext *parent, size_t invokingState)
-  : ParserRuleContext(parent, invokingState) {
+PathParser::NodeNameWithoutWildcardContext::NodeNameWithoutWildcardContext(
+    ParserRuleContext *parent, size_t invokingState)
+    : ParserRuleContext(parent, invokingState) {}
+
+PathParser::IdentifierContext *
+PathParser::NodeNameWithoutWildcardContext::identifier() {
+    return getRuleContext<PathParser::IdentifierContext>(0);
 }
 
-PathParser::IdentifierContext* PathParser::NodeNameWithoutWildcardContext::identifier() {
-  return getRuleContext<PathParser::IdentifierContext>(0);
-}
-
-
 size_t PathParser::NodeNameWithoutWildcardContext::getRuleIndex() const {
-  return PathParser::RuleNodeNameWithoutWildcard;
+    return PathParser::RuleNodeNameWithoutWildcard;
 }
 
-void PathParser::NodeNameWithoutWildcardContext::enterRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->enterNodeNameWithoutWildcard(this);
+void PathParser::NodeNameWithoutWildcardContext::enterRule(
+    tree::ParseTreeListener *listener) {
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr)
+        parserListener->enterNodeNameWithoutWildcard(this);
 }
 
-void PathParser::NodeNameWithoutWildcardContext::exitRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->exitNodeNameWithoutWildcard(this);
+void PathParser::NodeNameWithoutWildcardContext::exitRule(
+    tree::ParseTreeListener *listener) {
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr)
+        parserListener->exitNodeNameWithoutWildcard(this);
 }
 
-
-antlrcpp::Any PathParser::NodeNameWithoutWildcardContext::accept(tree::ParseTreeVisitor *visitor) {
-  if (auto parserVisitor = dynamic_cast<PathParserVisitor*>(visitor))
-    return parserVisitor->visitNodeNameWithoutWildcard(this);
-  else
-    return visitor->visitChildren(this);
+antlrcpp::Any PathParser::NodeNameWithoutWildcardContext::accept(
+    tree::ParseTreeVisitor *visitor) {
+    if (auto parserVisitor = dynamic_cast<PathParserVisitor *>(visitor))
+        return parserVisitor->visitNodeNameWithoutWildcard(this);
+    else
+        return visitor->visitChildren(this);
 }
 
-PathParser::NodeNameWithoutWildcardContext* PathParser::nodeNameWithoutWildcard() {
-  NodeNameWithoutWildcardContext *_localctx = _tracker.createInstance<NodeNameWithoutWildcardContext>(_ctx, getState());
-  enterRule(_localctx, 8, PathParser::RuleNodeNameWithoutWildcard);
+PathParser::NodeNameWithoutWildcardContext *
+PathParser::nodeNameWithoutWildcard() {
+    NodeNameWithoutWildcardContext *_localctx =
+        _tracker.createInstance<NodeNameWithoutWildcardContext>(_ctx,
+                                                                getState());
+    enterRule(_localctx, 8, PathParser::RuleNodeNameWithoutWildcard);
 
 #if __cplusplus > 201703L
-  auto onExit = finally([=, this] {
+    auto onExit = finally([=, this] {
 #else
-  auto onExit = finally([=] {
+    auto onExit = finally([=] {
 #endif
-    exitRule();
-  });
-  try {
-    enterOuterAlt(_localctx, 1);
-    setState(52);
-    identifier();
-   
-  }
-  catch (RecognitionException &e) {
-    _errHandler->reportError(this, e);
-    _localctx->exception = std::current_exception();
-    _errHandler->recover(this, _localctx->exception);
-  }
+        exitRule();
+    });
+    try {
+        enterOuterAlt(_localctx, 1);
+        setState(52);
+        identifier();
 
-  return _localctx;
+    } catch (RecognitionException &e) {
+        _errHandler->reportError(this, e);
+        _localctx->exception = std::current_exception();
+        _errHandler->recover(this, _localctx->exception);
+    }
+
+    return _localctx;
 }
 
-//----------------- NodeNameSliceContext ------------------------------------------------------------------
+//----------------- NodeNameSliceContext
+//------------------------------------------------------------------
 
-PathParser::NodeNameSliceContext::NodeNameSliceContext(ParserRuleContext *parent, size_t invokingState)
-  : ParserRuleContext(parent, invokingState) {
+PathParser::NodeNameSliceContext::NodeNameSliceContext(
+    ParserRuleContext *parent, size_t invokingState)
+    : ParserRuleContext(parent, invokingState) {}
+
+PathParser::IdentifierContext *PathParser::NodeNameSliceContext::identifier() {
+    return getRuleContext<PathParser::IdentifierContext>(0);
 }
 
-PathParser::IdentifierContext* PathParser::NodeNameSliceContext::identifier() {
-  return getRuleContext<PathParser::IdentifierContext>(0);
+tree::TerminalNode *PathParser::NodeNameSliceContext::INTEGER_LITERAL() {
+    return getToken(PathParser::INTEGER_LITERAL, 0);
 }
 
-tree::TerminalNode* PathParser::NodeNameSliceContext::INTEGER_LITERAL() {
-  return getToken(PathParser::INTEGER_LITERAL, 0);
-}
-
-
 size_t PathParser::NodeNameSliceContext::getRuleIndex() const {
-  return PathParser::RuleNodeNameSlice;
+    return PathParser::RuleNodeNameSlice;
 }
 
-void PathParser::NodeNameSliceContext::enterRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->enterNodeNameSlice(this);
+void PathParser::NodeNameSliceContext::enterRule(
+    tree::ParseTreeListener *listener) {
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->enterNodeNameSlice(this);
 }
 
-void PathParser::NodeNameSliceContext::exitRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->exitNodeNameSlice(this);
+void PathParser::NodeNameSliceContext::exitRule(
+    tree::ParseTreeListener *listener) {
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->exitNodeNameSlice(this);
 }
 
-
-antlrcpp::Any PathParser::NodeNameSliceContext::accept(tree::ParseTreeVisitor *visitor) {
-  if (auto parserVisitor = dynamic_cast<PathParserVisitor*>(visitor))
-    return parserVisitor->visitNodeNameSlice(this);
-  else
-    return visitor->visitChildren(this);
+antlrcpp::Any PathParser::NodeNameSliceContext::accept(
+    tree::ParseTreeVisitor *visitor) {
+    if (auto parserVisitor = dynamic_cast<PathParserVisitor *>(visitor))
+        return parserVisitor->visitNodeNameSlice(this);
+    else
+        return visitor->visitChildren(this);
 }
 
-PathParser::NodeNameSliceContext* PathParser::nodeNameSlice() {
-  NodeNameSliceContext *_localctx = _tracker.createInstance<NodeNameSliceContext>(_ctx, getState());
-  enterRule(_localctx, 10, PathParser::RuleNodeNameSlice);
+PathParser::NodeNameSliceContext *PathParser::nodeNameSlice() {
+    NodeNameSliceContext *_localctx =
+        _tracker.createInstance<NodeNameSliceContext>(_ctx, getState());
+    enterRule(_localctx, 10, PathParser::RuleNodeNameSlice);
 
 #if __cplusplus > 201703L
-  auto onExit = finally([=, this] {
+    auto onExit = finally([=, this] {
 #else
-  auto onExit = finally([=] {
+    auto onExit = finally([=] {
 #endif
-    exitRule();
-  });
-  try {
-    setState(56);
-    _errHandler->sync(this);
-    switch (_input->LA(1)) {
-      case PathParser::DURATION_LITERAL:
-      case PathParser::ID:
-      case PathParser::QUOTED_ID: {
-        enterOuterAlt(_localctx, 1);
-        setState(54);
-        identifier();
-        break;
-      }
+        exitRule();
+    });
+    try {
+        setState(56);
+        _errHandler->sync(this);
+        switch (_input->LA(1)) {
+            case PathParser::DURATION_LITERAL:
+            case PathParser::ID:
+            case PathParser::QUOTED_ID: {
+                enterOuterAlt(_localctx, 1);
+                setState(54);
+                identifier();
+                break;
+            }
 
-      case PathParser::INTEGER_LITERAL: {
-        enterOuterAlt(_localctx, 2);
-        setState(55);
-        match(PathParser::INTEGER_LITERAL);
-        break;
-      }
+            case PathParser::INTEGER_LITERAL: {
+                enterOuterAlt(_localctx, 2);
+                setState(55);
+                match(PathParser::INTEGER_LITERAL);
+                break;
+            }
 
-    default:
-      throw NoViableAltException(this);
+            default:
+                throw NoViableAltException(this);
+        }
+
+    } catch (RecognitionException &e) {
+        _errHandler->reportError(this, e);
+        _localctx->exception = std::current_exception();
+        _errHandler->recover(this, _localctx->exception);
     }
-   
-  }
-  catch (RecognitionException &e) {
-    _errHandler->reportError(this, e);
-    _localctx->exception = std::current_exception();
-    _errHandler->recover(this, _localctx->exception);
-  }
 
-  return _localctx;
+    return _localctx;
 }
 
-//----------------- IdentifierContext ------------------------------------------------------------------
+//----------------- IdentifierContext
+//------------------------------------------------------------------
 
-PathParser::IdentifierContext::IdentifierContext(ParserRuleContext *parent, size_t invokingState)
-  : ParserRuleContext(parent, invokingState) {
+PathParser::IdentifierContext::IdentifierContext(ParserRuleContext *parent,
+                                                 size_t invokingState)
+    : ParserRuleContext(parent, invokingState) {}
+
+tree::TerminalNode *PathParser::IdentifierContext::DURATION_LITERAL() {
+    return getToken(PathParser::DURATION_LITERAL, 0);
 }
 
-tree::TerminalNode* PathParser::IdentifierContext::DURATION_LITERAL() {
-  return getToken(PathParser::DURATION_LITERAL, 0);
+tree::TerminalNode *PathParser::IdentifierContext::ID() {
+    return getToken(PathParser::ID, 0);
 }
 
-tree::TerminalNode* PathParser::IdentifierContext::ID() {
-  return getToken(PathParser::ID, 0);
+tree::TerminalNode *PathParser::IdentifierContext::QUOTED_ID() {
+    return getToken(PathParser::QUOTED_ID, 0);
 }
 
-tree::TerminalNode* PathParser::IdentifierContext::QUOTED_ID() {
-  return getToken(PathParser::QUOTED_ID, 0);
-}
-
-
 size_t PathParser::IdentifierContext::getRuleIndex() const {
-  return PathParser::RuleIdentifier;
+    return PathParser::RuleIdentifier;
 }
 
-void PathParser::IdentifierContext::enterRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->enterIdentifier(this);
+void PathParser::IdentifierContext::enterRule(
+    tree::ParseTreeListener *listener) {
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->enterIdentifier(this);
 }
 
-void PathParser::IdentifierContext::exitRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->exitIdentifier(this);
+void PathParser::IdentifierContext::exitRule(
+    tree::ParseTreeListener *listener) {
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->exitIdentifier(this);
 }
 
-
-antlrcpp::Any PathParser::IdentifierContext::accept(tree::ParseTreeVisitor *visitor) {
-  if (auto parserVisitor = dynamic_cast<PathParserVisitor*>(visitor))
-    return parserVisitor->visitIdentifier(this);
-  else
-    return visitor->visitChildren(this);
+antlrcpp::Any PathParser::IdentifierContext::accept(
+    tree::ParseTreeVisitor *visitor) {
+    if (auto parserVisitor = dynamic_cast<PathParserVisitor *>(visitor))
+        return parserVisitor->visitIdentifier(this);
+    else
+        return visitor->visitChildren(this);
 }
 
-PathParser::IdentifierContext* PathParser::identifier() {
-  IdentifierContext *_localctx = _tracker.createInstance<IdentifierContext>(_ctx, getState());
-  enterRule(_localctx, 12, PathParser::RuleIdentifier);
-  size_t _la = 0;
+PathParser::IdentifierContext *PathParser::identifier() {
+    IdentifierContext *_localctx =
+        _tracker.createInstance<IdentifierContext>(_ctx, getState());
+    enterRule(_localctx, 12, PathParser::RuleIdentifier);
+    size_t _la = 0;
 
 #if __cplusplus > 201703L
-  auto onExit = finally([=, this] {
+    auto onExit = finally([=, this] {
 #else
-  auto onExit = finally([=] {
+    auto onExit = finally([=] {
 #endif
-    exitRule();
-  });
-  try {
-    enterOuterAlt(_localctx, 1);
-    setState(58);
-    _la = _input->LA(1);
-    if (!((((_la & ~ 0x3fULL) == 0) &&
-      ((1ULL << _la) & ((1ULL << PathParser::DURATION_LITERAL)
-      | (1ULL << PathParser::ID)
-      | (1ULL << PathParser::QUOTED_ID))) != 0))) {
-    _errHandler->recoverInline(this);
+        exitRule();
+    });
+    try {
+        enterOuterAlt(_localctx, 1);
+        setState(58);
+        _la = _input->LA(1);
+        if (!((((_la & ~0x3fULL) == 0) &&
+               ((1ULL << _la) & ((1ULL << PathParser::DURATION_LITERAL) |
+                                 (1ULL << PathParser::ID) |
+                                 (1ULL << PathParser::QUOTED_ID))) != 0))) {
+            _errHandler->recoverInline(this);
+        } else {
+            _errHandler->reportMatch(this);
+            consume();
+        }
+
+    } catch (RecognitionException &e) {
+        _errHandler->reportError(this, e);
+        _localctx->exception = std::current_exception();
+        _errHandler->recover(this, _localctx->exception);
     }
-    else {
-      _errHandler->reportMatch(this);
-      consume();
-    }
-   
-  }
-  catch (RecognitionException &e) {
-    _errHandler->reportError(this, e);
-    _localctx->exception = std::current_exception();
-    _errHandler->recover(this, _localctx->exception);
-  }
 
-  return _localctx;
+    return _localctx;
 }
 
-//----------------- WildcardContext ------------------------------------------------------------------
+//----------------- WildcardContext
+//------------------------------------------------------------------
 
-PathParser::WildcardContext::WildcardContext(ParserRuleContext *parent, size_t invokingState)
-  : ParserRuleContext(parent, invokingState) {
+PathParser::WildcardContext::WildcardContext(ParserRuleContext *parent,
+                                             size_t invokingState)
+    : ParserRuleContext(parent, invokingState) {}
+
+tree::TerminalNode *PathParser::WildcardContext::STAR() {
+    return getToken(PathParser::STAR, 0);
 }
 
-tree::TerminalNode* PathParser::WildcardContext::STAR() {
-  return getToken(PathParser::STAR, 0);
+tree::TerminalNode *PathParser::WildcardContext::DOUBLE_STAR() {
+    return getToken(PathParser::DOUBLE_STAR, 0);
 }
 
-tree::TerminalNode* PathParser::WildcardContext::DOUBLE_STAR() {
-  return getToken(PathParser::DOUBLE_STAR, 0);
-}
-
-
 size_t PathParser::WildcardContext::getRuleIndex() const {
-  return PathParser::RuleWildcard;
+    return PathParser::RuleWildcard;
 }
 
 void PathParser::WildcardContext::enterRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->enterWildcard(this);
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->enterWildcard(this);
 }
 
 void PathParser::WildcardContext::exitRule(tree::ParseTreeListener *listener) {
-  auto parserListener = dynamic_cast<PathParserListener *>(listener);
-  if (parserListener != nullptr)
-    parserListener->exitWildcard(this);
+    auto parserListener = dynamic_cast<PathParserListener *>(listener);
+    if (parserListener != nullptr) parserListener->exitWildcard(this);
 }
 
-
-antlrcpp::Any PathParser::WildcardContext::accept(tree::ParseTreeVisitor *visitor) {
-  if (auto parserVisitor = dynamic_cast<PathParserVisitor*>(visitor))
-    return parserVisitor->visitWildcard(this);
-  else
-    return visitor->visitChildren(this);
+antlrcpp::Any PathParser::WildcardContext::accept(
+    tree::ParseTreeVisitor *visitor) {
+    if (auto parserVisitor = dynamic_cast<PathParserVisitor *>(visitor))
+        return parserVisitor->visitWildcard(this);
+    else
+        return visitor->visitChildren(this);
 }
 
-PathParser::WildcardContext* PathParser::wildcard() {
-  WildcardContext *_localctx = _tracker.createInstance<WildcardContext>(_ctx, getState());
-  enterRule(_localctx, 14, PathParser::RuleWildcard);
-  size_t _la = 0;
+PathParser::WildcardContext *PathParser::wildcard() {
+    WildcardContext *_localctx =
+        _tracker.createInstance<WildcardContext>(_ctx, getState());
+    enterRule(_localctx, 14, PathParser::RuleWildcard);
+    size_t _la = 0;
 
 #if __cplusplus > 201703L
-  auto onExit = finally([=, this] {
+    auto onExit = finally([=, this] {
 #else
-  auto onExit = finally([=] {
+    auto onExit = finally([=] {
 #endif
-    exitRule();
-  });
-  try {
-    enterOuterAlt(_localctx, 1);
-    setState(60);
-    _la = _input->LA(1);
-    if (!(_la == PathParser::STAR
+        exitRule();
+    });
+    try {
+        enterOuterAlt(_localctx, 1);
+        setState(60);
+        _la = _input->LA(1);
+        if (!(_la == PathParser::STAR
 
-    || _la == PathParser::DOUBLE_STAR)) {
-    _errHandler->recoverInline(this);
-    }
-    else {
-      _errHandler->reportMatch(this);
-      consume();
-    }
-   
-  }
-  catch (RecognitionException &e) {
-    _errHandler->reportError(this, e);
-    _localctx->exception = std::current_exception();
-    _errHandler->recover(this, _localctx->exception);
-  }
+              || _la == PathParser::DOUBLE_STAR)) {
+            _errHandler->recoverInline(this);
+        } else {
+            _errHandler->reportMatch(this);
+            consume();
+        }
 
-  return _localctx;
+    } catch (RecognitionException &e) {
+        _errHandler->reportError(this, e);
+        _localctx->exception = std::current_exception();
+        _errHandler->recover(this, _localctx->exception);
+    }
+
+    return _localctx;
 }
 
 // Static vars and initialization.
@@ -747,104 +740,156 @@
 atn::ATN PathParser::_atn;
 std::vector<uint16_t> PathParser::_serializedATN;
 
-std::vector<std::string> PathParser::_ruleNames = {
-  "path", "prefixPath", "suffixPath", "nodeName", "nodeNameWithoutWildcard", 
-  "nodeNameSlice", "identifier", "wildcard"
-};
+std::vector<std::string> PathParser::_ruleNames = {"path",
+                                                   "prefixPath",
+                                                   "suffixPath",
+                                                   "nodeName",
+                                                   "nodeNameWithoutWildcard",
+                                                   "nodeNameSlice",
+                                                   "identifier",
+                                                   "wildcard"};
 
 std::vector<std::string> PathParser::_literalNames = {
-  "", "", "", "", "", "'-'", "'+'", "'/'", "'%'", "'=='", "'='", "'>'", 
-  "'>='", "'<'", "'<='", "", "'&'", "'&&'", "'|'", "'||'", "'!'", "'.'", 
-  "','", "';'", "'*'", "'**'", "'('", "')'", "'['", "']'", "'::'"
-};
+    "",    "",     "",    "",     "",     "'-'", "'+'",  "'/'",
+    "'%'", "'=='", "'='", "'>'",  "'>='", "'<'", "'<='", "",
+    "'&'", "'&&'", "'|'", "'||'", "'!'",  "'.'", "','",  "';'",
+    "'*'", "'**'", "'('", "')'",  "'['",  "']'", "'::'"};
 
-std::vector<std::string> PathParser::_symbolicNames = {
-  "", "ROOT", "WS", "TIME", "TIMESTAMP", "MINUS", "PLUS", "DIV", "MOD", 
-  "OPERATOR_DEQ", "OPERATOR_SEQ", "OPERATOR_GT", "OPERATOR_GTE", "OPERATOR_LT", 
-  "OPERATOR_LTE", "OPERATOR_NEQ", "OPERATOR_BITWISE_AND", "OPERATOR_LOGICAL_AND", 
-  "OPERATOR_BITWISE_OR", "OPERATOR_LOGICAL_OR", "OPERATOR_NOT", "DOT", "COMMA", 
-  "SEMI", "STAR", "DOUBLE_STAR", "LR_BRACKET", "RR_BRACKET", "LS_BRACKET", 
-  "RS_BRACKET", "DOUBLE_COLON", "STRING_LITERAL", "DURATION_LITERAL", "DATETIME_LITERAL", 
-  "INTEGER_LITERAL", "EXPONENT_NUM_PART", "ID", "QUOTED_ID"
-};
+std::vector<std::string> PathParser::_symbolicNames = {"",
+                                                       "ROOT",
+                                                       "WS",
+                                                       "TIME",
+                                                       "TIMESTAMP",
+                                                       "MINUS",
+                                                       "PLUS",
+                                                       "DIV",
+                                                       "MOD",
+                                                       "OPERATOR_DEQ",
+                                                       "OPERATOR_SEQ",
+                                                       "OPERATOR_GT",
+                                                       "OPERATOR_GTE",
+                                                       "OPERATOR_LT",
+                                                       "OPERATOR_LTE",
+                                                       "OPERATOR_NEQ",
+                                                       "OPERATOR_BITWISE_AND",
+                                                       "OPERATOR_LOGICAL_AND",
+                                                       "OPERATOR_BITWISE_OR",
+                                                       "OPERATOR_LOGICAL_OR",
+                                                       "OPERATOR_NOT",
+                                                       "DOT",
+                                                       "COMMA",
+                                                       "SEMI",
+                                                       "STAR",
+                                                       "DOUBLE_STAR",
+                                                       "LR_BRACKET",
+                                                       "RR_BRACKET",
+                                                       "LS_BRACKET",
+                                                       "RS_BRACKET",
+                                                       "DOUBLE_COLON",
+                                                       "STRING_LITERAL",
+                                                       "DURATION_LITERAL",
+                                                       "DATETIME_LITERAL",
+                                                       "INTEGER_LITERAL",
+                                                       "EXPONENT_NUM_PART",
+                                                       "ID",
+                                                       "QUOTED_ID"};
 
 dfa::Vocabulary PathParser::_vocabulary(_literalNames, _symbolicNames);
 
 std::vector<std::string> PathParser::_tokenNames;
 
 PathParser::Initializer::Initializer() {
-	for (size_t i = 0; i < _symbolicNames.size(); ++i) {
-		std::string name = _vocabulary.getLiteralName(i);
-		if (name.empty()) {
-			name = _vocabulary.getSymbolicName(i);
-		}
+    for (size_t i = 0; i < _symbolicNames.size(); ++i) {
+        std::string name = _vocabulary.getLiteralName(i);
+        if (name.empty()) {
+            name = _vocabulary.getSymbolicName(i);
+        }
 
-		if (name.empty()) {
-			_tokenNames.push_back("<INVALID>");
-		} else {
-      _tokenNames.push_back(name);
+        if (name.empty()) {
+            _tokenNames.push_back("<INVALID>");
+        } else {
+            _tokenNames.push_back(name);
+        }
     }
-	}
 
-  static const uint16_t serializedATNSegment0[] = {
-    0x3, 0x608b, 0xa72a, 0x8133, 0xb9ed, 0x417c, 0x3be7, 0x7786, 0x5964, 
-       0x3, 0x27, 0x41, 0x4, 0x2, 0x9, 0x2, 0x4, 0x3, 0x9, 0x3, 0x4, 0x4, 
-       0x9, 0x4, 0x4, 0x5, 0x9, 0x5, 0x4, 0x6, 0x9, 0x6, 0x4, 0x7, 0x9, 
-       0x7, 0x4, 0x8, 0x9, 0x8, 0x4, 0x9, 0x9, 0x9, 0x3, 0x2, 0x3, 0x2, 
-       0x3, 0x2, 0x3, 0x2, 0x3, 0x2, 0x3, 0x2, 0x5, 0x2, 0x19, 0xa, 0x2, 
-       0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x7, 0x3, 0x1e, 0xa, 0x3, 0xc, 0x3, 
-       0xe, 0x3, 0x21, 0xb, 0x3, 0x3, 0x4, 0x3, 0x4, 0x3, 0x4, 0x7, 0x4, 
-       0x26, 0xa, 0x4, 0xc, 0x4, 0xe, 0x4, 0x29, 0xb, 0x4, 0x3, 0x5, 0x3, 
-       0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x2f, 0xa, 0x5, 0x3, 0x5, 0x3, 
-       0x5, 0x3, 0x5, 0x3, 0x5, 0x5, 0x5, 0x35, 0xa, 0x5, 0x3, 0x6, 0x3, 
-       0x6, 0x3, 0x7, 0x3, 0x7, 0x5, 0x7, 0x3b, 0xa, 0x7, 0x3, 0x8, 0x3, 
-       0x8, 0x3, 0x9, 0x3, 0x9, 0x3, 0x9, 0x2, 0x2, 0xa, 0x2, 0x4, 0x6, 
-       0x8, 0xa, 0xc, 0xe, 0x10, 0x2, 0x4, 0x4, 0x2, 0x22, 0x22, 0x26, 0x27, 
-       0x3, 0x2, 0x1a, 0x1b, 0x2, 0x40, 0x2, 0x18, 0x3, 0x2, 0x2, 0x2, 0x4, 
-       0x1a, 0x3, 0x2, 0x2, 0x2, 0x6, 0x22, 0x3, 0x2, 0x2, 0x2, 0x8, 0x34, 
-       0x3, 0x2, 0x2, 0x2, 0xa, 0x36, 0x3, 0x2, 0x2, 0x2, 0xc, 0x3a, 0x3, 
-       0x2, 0x2, 0x2, 0xe, 0x3c, 0x3, 0x2, 0x2, 0x2, 0x10, 0x3e, 0x3, 0x2, 
-       0x2, 0x2, 0x12, 0x13, 0x5, 0x4, 0x3, 0x2, 0x13, 0x14, 0x7, 0x2, 0x2, 
-       0x3, 0x14, 0x19, 0x3, 0x2, 0x2, 0x2, 0x15, 0x16, 0x5, 0x6, 0x4, 0x2, 
-       0x16, 0x17, 0x7, 0x2, 0x2, 0x3, 0x17, 0x19, 0x3, 0x2, 0x2, 0x2, 0x18, 
-       0x12, 0x3, 0x2, 0x2, 0x2, 0x18, 0x15, 0x3, 0x2, 0x2, 0x2, 0x19, 0x3, 
-       0x3, 0x2, 0x2, 0x2, 0x1a, 0x1f, 0x7, 0x3, 0x2, 0x2, 0x1b, 0x1c, 0x7, 
-       0x17, 0x2, 0x2, 0x1c, 0x1e, 0x5, 0x8, 0x5, 0x2, 0x1d, 0x1b, 0x3, 
-       0x2, 0x2, 0x2, 0x1e, 0x21, 0x3, 0x2, 0x2, 0x2, 0x1f, 0x1d, 0x3, 0x2, 
-       0x2, 0x2, 0x1f, 0x20, 0x3, 0x2, 0x2, 0x2, 0x20, 0x5, 0x3, 0x2, 0x2, 
-       0x2, 0x21, 0x1f, 0x3, 0x2, 0x2, 0x2, 0x22, 0x27, 0x5, 0x8, 0x5, 0x2, 
-       0x23, 0x24, 0x7, 0x17, 0x2, 0x2, 0x24, 0x26, 0x5, 0x8, 0x5, 0x2, 
-       0x25, 0x23, 0x3, 0x2, 0x2, 0x2, 0x26, 0x29, 0x3, 0x2, 0x2, 0x2, 0x27, 
-       0x25, 0x3, 0x2, 0x2, 0x2, 0x27, 0x28, 0x3, 0x2, 0x2, 0x2, 0x28, 0x7, 
-       0x3, 0x2, 0x2, 0x2, 0x29, 0x27, 0x3, 0x2, 0x2, 0x2, 0x2a, 0x35, 0x5, 
-       0x10, 0x9, 0x2, 0x2b, 0x2c, 0x5, 0x10, 0x9, 0x2, 0x2c, 0x2e, 0x5, 
-       0xc, 0x7, 0x2, 0x2d, 0x2f, 0x5, 0x10, 0x9, 0x2, 0x2e, 0x2d, 0x3, 
-       0x2, 0x2, 0x2, 0x2e, 0x2f, 0x3, 0x2, 0x2, 0x2, 0x2f, 0x35, 0x3, 0x2, 
-       0x2, 0x2, 0x30, 0x31, 0x5, 0xc, 0x7, 0x2, 0x31, 0x32, 0x5, 0x10, 
-       0x9, 0x2, 0x32, 0x35, 0x3, 0x2, 0x2, 0x2, 0x33, 0x35, 0x5, 0xa, 0x6, 
-       0x2, 0x34, 0x2a, 0x3, 0x2, 0x2, 0x2, 0x34, 0x2b, 0x3, 0x2, 0x2, 0x2, 
-       0x34, 0x30, 0x3, 0x2, 0x2, 0x2, 0x34, 0x33, 0x3, 0x2, 0x2, 0x2, 0x35, 
-       0x9, 0x3, 0x2, 0x2, 0x2, 0x36, 0x37, 0x5, 0xe, 0x8, 0x2, 0x37, 0xb, 
-       0x3, 0x2, 0x2, 0x2, 0x38, 0x3b, 0x5, 0xe, 0x8, 0x2, 0x39, 0x3b, 0x7, 
-       0x24, 0x2, 0x2, 0x3a, 0x38, 0x3, 0x2, 0x2, 0x2, 0x3a, 0x39, 0x3, 
-       0x2, 0x2, 0x2, 0x3b, 0xd, 0x3, 0x2, 0x2, 0x2, 0x3c, 0x3d, 0x9, 0x2, 
-       0x2, 0x2, 0x3d, 0xf, 0x3, 0x2, 0x2, 0x2, 0x3e, 0x3f, 0x9, 0x3, 0x2, 
-       0x2, 0x3f, 0x11, 0x3, 0x2, 0x2, 0x2, 0x8, 0x18, 0x1f, 0x27, 0x2e, 
-       0x34, 0x3a, 
-  };
+    static const uint16_t serializedATNSegment0[] = {
+        0x3,  0x608b, 0xa72a, 0x8133, 0xb9ed, 0x417c, 0x3be7, 0x7786, 0x5964,
+        0x3,  0x27,   0x41,   0x4,    0x2,    0x9,    0x2,    0x4,    0x3,
+        0x9,  0x3,    0x4,    0x4,    0x9,    0x4,    0x4,    0x5,    0x9,
+        0x5,  0x4,    0x6,    0x9,    0x6,    0x4,    0x7,    0x9,    0x7,
+        0x4,  0x8,    0x9,    0x8,    0x4,    0x9,    0x9,    0x9,    0x3,
+        0x2,  0x3,    0x2,    0x3,    0x2,    0x3,    0x2,    0x3,    0x2,
+        0x3,  0x2,    0x5,    0x2,    0x19,   0xa,    0x2,    0x3,    0x3,
+        0x3,  0x3,    0x3,    0x3,    0x7,    0x3,    0x1e,   0xa,    0x3,
+        0xc,  0x3,    0xe,    0x3,    0x21,   0xb,    0x3,    0x3,    0x4,
+        0x3,  0x4,    0x3,    0x4,    0x7,    0x4,    0x26,   0xa,    0x4,
+        0xc,  0x4,    0xe,    0x4,    0x29,   0xb,    0x4,    0x3,    0x5,
+        0x3,  0x5,    0x3,    0x5,    0x3,    0x5,    0x5,    0x5,    0x2f,
+        0xa,  0x5,    0x3,    0x5,    0x3,    0x5,    0x3,    0x5,    0x3,
+        0x5,  0x5,    0x5,    0x35,   0xa,    0x5,    0x3,    0x6,    0x3,
+        0x6,  0x3,    0x7,    0x3,    0x7,    0x5,    0x7,    0x3b,   0xa,
+        0x7,  0x3,    0x8,    0x3,    0x8,    0x3,    0x9,    0x3,    0x9,
+        0x3,  0x9,    0x2,    0x2,    0xa,    0x2,    0x4,    0x6,    0x8,
+        0xa,  0xc,    0xe,    0x10,   0x2,    0x4,    0x4,    0x2,    0x22,
+        0x22, 0x26,   0x27,   0x3,    0x2,    0x1a,   0x1b,   0x2,    0x40,
+        0x2,  0x18,   0x3,    0x2,    0x2,    0x2,    0x4,    0x1a,   0x3,
+        0x2,  0x2,    0x2,    0x6,    0x22,   0x3,    0x2,    0x2,    0x2,
+        0x8,  0x34,   0x3,    0x2,    0x2,    0x2,    0xa,    0x36,   0x3,
+        0x2,  0x2,    0x2,    0xc,    0x3a,   0x3,    0x2,    0x2,    0x2,
+        0xe,  0x3c,   0x3,    0x2,    0x2,    0x2,    0x10,   0x3e,   0x3,
+        0x2,  0x2,    0x2,    0x12,   0x13,   0x5,    0x4,    0x3,    0x2,
+        0x13, 0x14,   0x7,    0x2,    0x2,    0x3,    0x14,   0x19,   0x3,
+        0x2,  0x2,    0x2,    0x15,   0x16,   0x5,    0x6,    0x4,    0x2,
+        0x16, 0x17,   0x7,    0x2,    0x2,    0x3,    0x17,   0x19,   0x3,
+        0x2,  0x2,    0x2,    0x18,   0x12,   0x3,    0x2,    0x2,    0x2,
+        0x18, 0x15,   0x3,    0x2,    0x2,    0x2,    0x19,   0x3,    0x3,
+        0x2,  0x2,    0x2,    0x1a,   0x1f,   0x7,    0x3,    0x2,    0x2,
+        0x1b, 0x1c,   0x7,    0x17,   0x2,    0x2,    0x1c,   0x1e,   0x5,
+        0x8,  0x5,    0x2,    0x1d,   0x1b,   0x3,    0x2,    0x2,    0x2,
+        0x1e, 0x21,   0x3,    0x2,    0x2,    0x2,    0x1f,   0x1d,   0x3,
+        0x2,  0x2,    0x2,    0x1f,   0x20,   0x3,    0x2,    0x2,    0x2,
+        0x20, 0x5,    0x3,    0x2,    0x2,    0x2,    0x21,   0x1f,   0x3,
+        0x2,  0x2,    0x2,    0x22,   0x27,   0x5,    0x8,    0x5,    0x2,
+        0x23, 0x24,   0x7,    0x17,   0x2,    0x2,    0x24,   0x26,   0x5,
+        0x8,  0x5,    0x2,    0x25,   0x23,   0x3,    0x2,    0x2,    0x2,
+        0x26, 0x29,   0x3,    0x2,    0x2,    0x2,    0x27,   0x25,   0x3,
+        0x2,  0x2,    0x2,    0x27,   0x28,   0x3,    0x2,    0x2,    0x2,
+        0x28, 0x7,    0x3,    0x2,    0x2,    0x2,    0x29,   0x27,   0x3,
+        0x2,  0x2,    0x2,    0x2a,   0x35,   0x5,    0x10,   0x9,    0x2,
+        0x2b, 0x2c,   0x5,    0x10,   0x9,    0x2,    0x2c,   0x2e,   0x5,
+        0xc,  0x7,    0x2,    0x2d,   0x2f,   0x5,    0x10,   0x9,    0x2,
+        0x2e, 0x2d,   0x3,    0x2,    0x2,    0x2,    0x2e,   0x2f,   0x3,
+        0x2,  0x2,    0x2,    0x2f,   0x35,   0x3,    0x2,    0x2,    0x2,
+        0x30, 0x31,   0x5,    0xc,    0x7,    0x2,    0x31,   0x32,   0x5,
+        0x10, 0x9,    0x2,    0x32,   0x35,   0x3,    0x2,    0x2,    0x2,
+        0x33, 0x35,   0x5,    0xa,    0x6,    0x2,    0x34,   0x2a,   0x3,
+        0x2,  0x2,    0x2,    0x34,   0x2b,   0x3,    0x2,    0x2,    0x2,
+        0x34, 0x30,   0x3,    0x2,    0x2,    0x2,    0x34,   0x33,   0x3,
+        0x2,  0x2,    0x2,    0x35,   0x9,    0x3,    0x2,    0x2,    0x2,
+        0x36, 0x37,   0x5,    0xe,    0x8,    0x2,    0x37,   0xb,    0x3,
+        0x2,  0x2,    0x2,    0x38,   0x3b,   0x5,    0xe,    0x8,    0x2,
+        0x39, 0x3b,   0x7,    0x24,   0x2,    0x2,    0x3a,   0x38,   0x3,
+        0x2,  0x2,    0x2,    0x3a,   0x39,   0x3,    0x2,    0x2,    0x2,
+        0x3b, 0xd,    0x3,    0x2,    0x2,    0x2,    0x3c,   0x3d,   0x9,
+        0x2,  0x2,    0x2,    0x3d,   0xf,    0x3,    0x2,    0x2,    0x2,
+        0x3e, 0x3f,   0x9,    0x3,    0x2,    0x2,    0x3f,   0x11,   0x3,
+        0x2,  0x2,    0x2,    0x8,    0x18,   0x1f,   0x27,   0x2e,   0x34,
+        0x3a,
+    };
 
-  _serializedATN.insert(_serializedATN.end(), serializedATNSegment0,
-    serializedATNSegment0 + sizeof(serializedATNSegment0) / sizeof(serializedATNSegment0[0]));
+    _serializedATN.insert(
+        _serializedATN.end(), serializedATNSegment0,
+        serializedATNSegment0 +
+            sizeof(serializedATNSegment0) / sizeof(serializedATNSegment0[0]));
 
+    atn::ATNDeserializer deserializer;
+    _atn = deserializer.deserialize(_serializedATN);
 
-  atn::ATNDeserializer deserializer;
-  _atn = deserializer.deserialize(_serializedATN);
-
-  size_t count = _atn.getNumberOfDecisions();
-  _decisionToDFA.reserve(count);
-  for (size_t i = 0; i < count; i++) { 
-    _decisionToDFA.emplace_back(_atn.getDecisionState(i), i);
-  }
+    size_t count = _atn.getNumberOfDecisions();
+    _decisionToDFA.reserve(count);
+    for (size_t i = 0; i < count; i++) {
+        _decisionToDFA.emplace_back(_atn.getDecisionState(i), i);
+    }
 }
 
 PathParser::Initializer PathParser::_init;
diff --git a/cpp/src/parser/generated/PathParser.h b/cpp/src/parser/generated/PathParser.h
index 5d7afeb..2336c57 100644
--- a/cpp/src/parser/generated/PathParser.h
+++ b/cpp/src/parser/generated/PathParser.h
@@ -20,203 +20,254 @@
 
 #pragma once
 
-
 #include "antlr4-runtime.h"
 
+class PathParser : public antlr4::Parser {
+   public:
+    enum {
+        ROOT = 1,
+        WS = 2,
+        TIME = 3,
+        TIMESTAMP = 4,
+        MINUS = 5,
+        PLUS = 6,
+        DIV = 7,
+        MOD = 8,
+        OPERATOR_DEQ = 9,
+        OPERATOR_SEQ = 10,
+        OPERATOR_GT = 11,
+        OPERATOR_GTE = 12,
+        OPERATOR_LT = 13,
+        OPERATOR_LTE = 14,
+        OPERATOR_NEQ = 15,
+        OPERATOR_BITWISE_AND = 16,
+        OPERATOR_LOGICAL_AND = 17,
+        OPERATOR_BITWISE_OR = 18,
+        OPERATOR_LOGICAL_OR = 19,
+        OPERATOR_NOT = 20,
+        DOT = 21,
+        COMMA = 22,
+        SEMI = 23,
+        STAR = 24,
+        DOUBLE_STAR = 25,
+        LR_BRACKET = 26,
+        RR_BRACKET = 27,
+        LS_BRACKET = 28,
+        RS_BRACKET = 29,
+        DOUBLE_COLON = 30,
+        STRING_LITERAL = 31,
+        DURATION_LITERAL = 32,
+        DATETIME_LITERAL = 33,
+        INTEGER_LITERAL = 34,
+        EXPONENT_NUM_PART = 35,
+        ID = 36,
+        QUOTED_ID = 37
+    };
 
+    enum {
+        RulePath = 0,
+        RulePrefixPath = 1,
+        RuleSuffixPath = 2,
+        RuleNodeName = 3,
+        RuleNodeNameWithoutWildcard = 4,
+        RuleNodeNameSlice = 5,
+        RuleIdentifier = 6,
+        RuleWildcard = 7
+    };
 
+    explicit PathParser(antlr4::TokenStream *input);
+    ~PathParser();
 
-class  PathParser : public antlr4::Parser {
-public:
-  enum {
-    ROOT = 1, WS = 2, TIME = 3, TIMESTAMP = 4, MINUS = 5, PLUS = 6, DIV = 7, 
-    MOD = 8, OPERATOR_DEQ = 9, OPERATOR_SEQ = 10, OPERATOR_GT = 11, OPERATOR_GTE = 12, 
-    OPERATOR_LT = 13, OPERATOR_LTE = 14, OPERATOR_NEQ = 15, OPERATOR_BITWISE_AND = 16, 
-    OPERATOR_LOGICAL_AND = 17, OPERATOR_BITWISE_OR = 18, OPERATOR_LOGICAL_OR = 19, 
-    OPERATOR_NOT = 20, DOT = 21, COMMA = 22, SEMI = 23, STAR = 24, DOUBLE_STAR = 25, 
-    LR_BRACKET = 26, RR_BRACKET = 27, LS_BRACKET = 28, RS_BRACKET = 29, 
-    DOUBLE_COLON = 30, STRING_LITERAL = 31, DURATION_LITERAL = 32, DATETIME_LITERAL = 33, 
-    INTEGER_LITERAL = 34, EXPONENT_NUM_PART = 35, ID = 36, QUOTED_ID = 37
-  };
+    virtual std::string getGrammarFileName() const override;
+    virtual const antlr4::atn::ATN &getATN() const override { return _atn; };
+    virtual const std::vector<std::string> &getTokenNames() const override {
+        return _tokenNames;
+    };  // deprecated: use vocabulary instead.
+    virtual const std::vector<std::string> &getRuleNames() const override;
+    virtual antlr4::dfa::Vocabulary &getVocabulary() const override;
 
-  enum {
-    RulePath = 0, RulePrefixPath = 1, RuleSuffixPath = 2, RuleNodeName = 3, 
-    RuleNodeNameWithoutWildcard = 4, RuleNodeNameSlice = 5, RuleIdentifier = 6, 
-    RuleWildcard = 7
-  };
+    class PathContext;
+    class PrefixPathContext;
+    class SuffixPathContext;
+    class NodeNameContext;
+    class NodeNameWithoutWildcardContext;
+    class NodeNameSliceContext;
+    class IdentifierContext;
+    class WildcardContext;
 
-  explicit PathParser(antlr4::TokenStream *input);
-  ~PathParser();
+    class PathContext : public antlr4::ParserRuleContext {
+       public:
+        PathContext(antlr4::ParserRuleContext *parent, size_t invokingState);
+        virtual size_t getRuleIndex() const override;
+        PrefixPathContext *prefixPath();
+        antlr4::tree::TerminalNode *EOF();
+        SuffixPathContext *suffixPath();
 
-  virtual std::string getGrammarFileName() const override;
-  virtual const antlr4::atn::ATN& getATN() const override { return _atn; };
-  virtual const std::vector<std::string>& getTokenNames() const override { return _tokenNames; }; // deprecated: use vocabulary instead.
-  virtual const std::vector<std::string>& getRuleNames() const override;
-  virtual antlr4::dfa::Vocabulary& getVocabulary() const override;
+        virtual void enterRule(
+            antlr4::tree::ParseTreeListener *listener) override;
+        virtual void exitRule(
+            antlr4::tree::ParseTreeListener *listener) override;
 
+        virtual antlrcpp::Any accept(
+            antlr4::tree::ParseTreeVisitor *visitor) override;
+    };
 
-  class PathContext;
-  class PrefixPathContext;
-  class SuffixPathContext;
-  class NodeNameContext;
-  class NodeNameWithoutWildcardContext;
-  class NodeNameSliceContext;
-  class IdentifierContext;
-  class WildcardContext; 
+    PathContext *path();
 
-  class  PathContext : public antlr4::ParserRuleContext {
-  public:
-    PathContext(antlr4::ParserRuleContext *parent, size_t invokingState);
-    virtual size_t getRuleIndex() const override;
+    class PrefixPathContext : public antlr4::ParserRuleContext {
+       public:
+        PrefixPathContext(antlr4::ParserRuleContext *parent,
+                          size_t invokingState);
+        virtual size_t getRuleIndex() const override;
+        antlr4::tree::TerminalNode *ROOT();
+        std::vector<antlr4::tree::TerminalNode *> DOT();
+        antlr4::tree::TerminalNode *DOT(size_t i);
+        std::vector<NodeNameContext *> nodeName();
+        NodeNameContext *nodeName(size_t i);
+
+        virtual void enterRule(
+            antlr4::tree::ParseTreeListener *listener) override;
+        virtual void exitRule(
+            antlr4::tree::ParseTreeListener *listener) override;
+
+        virtual antlrcpp::Any accept(
+            antlr4::tree::ParseTreeVisitor *visitor) override;
+    };
+
     PrefixPathContext *prefixPath();
-    antlr4::tree::TerminalNode *EOF();
+
+    class SuffixPathContext : public antlr4::ParserRuleContext {
+       public:
+        SuffixPathContext(antlr4::ParserRuleContext *parent,
+                          size_t invokingState);
+        virtual size_t getRuleIndex() const override;
+        std::vector<NodeNameContext *> nodeName();
+        NodeNameContext *nodeName(size_t i);
+        std::vector<antlr4::tree::TerminalNode *> DOT();
+        antlr4::tree::TerminalNode *DOT(size_t i);
+
+        virtual void enterRule(
+            antlr4::tree::ParseTreeListener *listener) override;
+        virtual void exitRule(
+            antlr4::tree::ParseTreeListener *listener) override;
+
+        virtual antlrcpp::Any accept(
+            antlr4::tree::ParseTreeVisitor *visitor) override;
+    };
+
     SuffixPathContext *suffixPath();
 
-    virtual void enterRule(antlr4::tree::ParseTreeListener *listener) override;
-    virtual void exitRule(antlr4::tree::ParseTreeListener *listener) override;
+    class NodeNameContext : public antlr4::ParserRuleContext {
+       public:
+        NodeNameContext(antlr4::ParserRuleContext *parent,
+                        size_t invokingState);
+        virtual size_t getRuleIndex() const override;
+        std::vector<WildcardContext *> wildcard();
+        WildcardContext *wildcard(size_t i);
+        NodeNameSliceContext *nodeNameSlice();
+        NodeNameWithoutWildcardContext *nodeNameWithoutWildcard();
 
-    virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
-   
-  };
+        virtual void enterRule(
+            antlr4::tree::ParseTreeListener *listener) override;
+        virtual void exitRule(
+            antlr4::tree::ParseTreeListener *listener) override;
 
-  PathContext* path();
+        virtual antlrcpp::Any accept(
+            antlr4::tree::ParseTreeVisitor *visitor) override;
+    };
 
-  class  PrefixPathContext : public antlr4::ParserRuleContext {
-  public:
-    PrefixPathContext(antlr4::ParserRuleContext *parent, size_t invokingState);
-    virtual size_t getRuleIndex() const override;
-    antlr4::tree::TerminalNode *ROOT();
-    std::vector<antlr4::tree::TerminalNode *> DOT();
-    antlr4::tree::TerminalNode* DOT(size_t i);
-    std::vector<NodeNameContext *> nodeName();
-    NodeNameContext* nodeName(size_t i);
+    NodeNameContext *nodeName();
 
-    virtual void enterRule(antlr4::tree::ParseTreeListener *listener) override;
-    virtual void exitRule(antlr4::tree::ParseTreeListener *listener) override;
+    class NodeNameWithoutWildcardContext : public antlr4::ParserRuleContext {
+       public:
+        NodeNameWithoutWildcardContext(antlr4::ParserRuleContext *parent,
+                                       size_t invokingState);
+        virtual size_t getRuleIndex() const override;
+        IdentifierContext *identifier();
 
-    virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
-   
-  };
+        virtual void enterRule(
+            antlr4::tree::ParseTreeListener *listener) override;
+        virtual void exitRule(
+            antlr4::tree::ParseTreeListener *listener) override;
 
-  PrefixPathContext* prefixPath();
+        virtual antlrcpp::Any accept(
+            antlr4::tree::ParseTreeVisitor *visitor) override;
+    };
 
-  class  SuffixPathContext : public antlr4::ParserRuleContext {
-  public:
-    SuffixPathContext(antlr4::ParserRuleContext *parent, size_t invokingState);
-    virtual size_t getRuleIndex() const override;
-    std::vector<NodeNameContext *> nodeName();
-    NodeNameContext* nodeName(size_t i);
-    std::vector<antlr4::tree::TerminalNode *> DOT();
-    antlr4::tree::TerminalNode* DOT(size_t i);
-
-    virtual void enterRule(antlr4::tree::ParseTreeListener *listener) override;
-    virtual void exitRule(antlr4::tree::ParseTreeListener *listener) override;
-
-    virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
-   
-  };
-
-  SuffixPathContext* suffixPath();
-
-  class  NodeNameContext : public antlr4::ParserRuleContext {
-  public:
-    NodeNameContext(antlr4::ParserRuleContext *parent, size_t invokingState);
-    virtual size_t getRuleIndex() const override;
-    std::vector<WildcardContext *> wildcard();
-    WildcardContext* wildcard(size_t i);
-    NodeNameSliceContext *nodeNameSlice();
     NodeNameWithoutWildcardContext *nodeNameWithoutWildcard();
 
-    virtual void enterRule(antlr4::tree::ParseTreeListener *listener) override;
-    virtual void exitRule(antlr4::tree::ParseTreeListener *listener) override;
+    class NodeNameSliceContext : public antlr4::ParserRuleContext {
+       public:
+        NodeNameSliceContext(antlr4::ParserRuleContext *parent,
+                             size_t invokingState);
+        virtual size_t getRuleIndex() const override;
+        IdentifierContext *identifier();
+        antlr4::tree::TerminalNode *INTEGER_LITERAL();
 
-    virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
-   
-  };
+        virtual void enterRule(
+            antlr4::tree::ParseTreeListener *listener) override;
+        virtual void exitRule(
+            antlr4::tree::ParseTreeListener *listener) override;
 
-  NodeNameContext* nodeName();
+        virtual antlrcpp::Any accept(
+            antlr4::tree::ParseTreeVisitor *visitor) override;
+    };
 
-  class  NodeNameWithoutWildcardContext : public antlr4::ParserRuleContext {
-  public:
-    NodeNameWithoutWildcardContext(antlr4::ParserRuleContext *parent, size_t invokingState);
-    virtual size_t getRuleIndex() const override;
+    NodeNameSliceContext *nodeNameSlice();
+
+    class IdentifierContext : public antlr4::ParserRuleContext {
+       public:
+        IdentifierContext(antlr4::ParserRuleContext *parent,
+                          size_t invokingState);
+        virtual size_t getRuleIndex() const override;
+        antlr4::tree::TerminalNode *DURATION_LITERAL();
+        antlr4::tree::TerminalNode *ID();
+        antlr4::tree::TerminalNode *QUOTED_ID();
+
+        virtual void enterRule(
+            antlr4::tree::ParseTreeListener *listener) override;
+        virtual void exitRule(
+            antlr4::tree::ParseTreeListener *listener) override;
+
+        virtual antlrcpp::Any accept(
+            antlr4::tree::ParseTreeVisitor *visitor) override;
+    };
+
     IdentifierContext *identifier();
 
-    virtual void enterRule(antlr4::tree::ParseTreeListener *listener) override;
-    virtual void exitRule(antlr4::tree::ParseTreeListener *listener) override;
+    class WildcardContext : public antlr4::ParserRuleContext {
+       public:
+        WildcardContext(antlr4::ParserRuleContext *parent,
+                        size_t invokingState);
+        virtual size_t getRuleIndex() const override;
+        antlr4::tree::TerminalNode *STAR();
+        antlr4::tree::TerminalNode *DOUBLE_STAR();
 
-    virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
-   
-  };
+        virtual void enterRule(
+            antlr4::tree::ParseTreeListener *listener) override;
+        virtual void exitRule(
+            antlr4::tree::ParseTreeListener *listener) override;
 
-  NodeNameWithoutWildcardContext* nodeNameWithoutWildcard();
+        virtual antlrcpp::Any accept(
+            antlr4::tree::ParseTreeVisitor *visitor) override;
+    };
 
-  class  NodeNameSliceContext : public antlr4::ParserRuleContext {
-  public:
-    NodeNameSliceContext(antlr4::ParserRuleContext *parent, size_t invokingState);
-    virtual size_t getRuleIndex() const override;
-    IdentifierContext *identifier();
-    antlr4::tree::TerminalNode *INTEGER_LITERAL();
+    WildcardContext *wildcard();
 
-    virtual void enterRule(antlr4::tree::ParseTreeListener *listener) override;
-    virtual void exitRule(antlr4::tree::ParseTreeListener *listener) override;
+   private:
+    static std::vector<antlr4::dfa::DFA> _decisionToDFA;
+    static antlr4::atn::PredictionContextCache _sharedContextCache;
+    static std::vector<std::string> _ruleNames;
+    static std::vector<std::string> _tokenNames;
 
-    virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
-   
-  };
+    static std::vector<std::string> _literalNames;
+    static std::vector<std::string> _symbolicNames;
+    static antlr4::dfa::Vocabulary _vocabulary;
+    static antlr4::atn::ATN _atn;
+    static std::vector<uint16_t> _serializedATN;
 
-  NodeNameSliceContext* nodeNameSlice();
-
-  class  IdentifierContext : public antlr4::ParserRuleContext {
-  public:
-    IdentifierContext(antlr4::ParserRuleContext *parent, size_t invokingState);
-    virtual size_t getRuleIndex() const override;
-    antlr4::tree::TerminalNode *DURATION_LITERAL();
-    antlr4::tree::TerminalNode *ID();
-    antlr4::tree::TerminalNode *QUOTED_ID();
-
-    virtual void enterRule(antlr4::tree::ParseTreeListener *listener) override;
-    virtual void exitRule(antlr4::tree::ParseTreeListener *listener) override;
-
-    virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
-   
-  };
-
-  IdentifierContext* identifier();
-
-  class  WildcardContext : public antlr4::ParserRuleContext {
-  public:
-    WildcardContext(antlr4::ParserRuleContext *parent, size_t invokingState);
-    virtual size_t getRuleIndex() const override;
-    antlr4::tree::TerminalNode *STAR();
-    antlr4::tree::TerminalNode *DOUBLE_STAR();
-
-    virtual void enterRule(antlr4::tree::ParseTreeListener *listener) override;
-    virtual void exitRule(antlr4::tree::ParseTreeListener *listener) override;
-
-    virtual antlrcpp::Any accept(antlr4::tree::ParseTreeVisitor *visitor) override;
-   
-  };
-
-  WildcardContext* wildcard();
-
-
-private:
-  static std::vector<antlr4::dfa::DFA> _decisionToDFA;
-  static antlr4::atn::PredictionContextCache _sharedContextCache;
-  static std::vector<std::string> _ruleNames;
-  static std::vector<std::string> _tokenNames;
-
-  static std::vector<std::string> _literalNames;
-  static std::vector<std::string> _symbolicNames;
-  static antlr4::dfa::Vocabulary _vocabulary;
-  static antlr4::atn::ATN _atn;
-  static std::vector<uint16_t> _serializedATN;
-
-
-  struct Initializer {
-    Initializer();
-  };
-  static Initializer _init;
+    struct Initializer {
+        Initializer();
+    };
+    static Initializer _init;
 };
-
diff --git a/cpp/src/parser/generated/PathParserBaseListener.cpp b/cpp/src/parser/generated/PathParserBaseListener.cpp
index fbf0946..652da16 100644
--- a/cpp/src/parser/generated/PathParserBaseListener.cpp
+++ b/cpp/src/parser/generated/PathParserBaseListener.cpp
@@ -18,7 +18,4 @@
  */
 // Generated from PathParser.g4 by ANTLR 4.9.3
 
-
 #include "PathParserBaseListener.h"
-
-
diff --git a/cpp/src/parser/generated/PathParserBaseListener.h b/cpp/src/parser/generated/PathParserBaseListener.h
index 9d6c0cc..c66fe45 100644
--- a/cpp/src/parser/generated/PathParserBaseListener.h
+++ b/cpp/src/parser/generated/PathParserBaseListener.h
@@ -20,48 +20,52 @@
 
 #pragma once
 
-
-#include "antlr4-runtime.h"
 #include "PathParserListener.h"
-
+#include "antlr4-runtime.h"
 
 /**
  * This class provides an empty implementation of PathParserListener,
- * which can be extended to create a listener which only needs to handle a subset
- * of the available methods.
+ * which can be extended to create a listener which only needs to handle a
+ * subset of the available methods.
  */
-class  PathParserBaseListener : public PathParserListener {
-public:
+class PathParserBaseListener : public PathParserListener {
+   public:
+    virtual void enterPath(PathParser::PathContext* /*ctx*/) override {}
+    virtual void exitPath(PathParser::PathContext* /*ctx*/) override {}
 
-  virtual void enterPath(PathParser::PathContext * /*ctx*/) override { }
-  virtual void exitPath(PathParser::PathContext * /*ctx*/) override { }
+    virtual void enterPrefixPath(
+        PathParser::PrefixPathContext* /*ctx*/) override {}
+    virtual void exitPrefixPath(
+        PathParser::PrefixPathContext* /*ctx*/) override {}
 
-  virtual void enterPrefixPath(PathParser::PrefixPathContext * /*ctx*/) override { }
-  virtual void exitPrefixPath(PathParser::PrefixPathContext * /*ctx*/) override { }
+    virtual void enterSuffixPath(
+        PathParser::SuffixPathContext* /*ctx*/) override {}
+    virtual void exitSuffixPath(
+        PathParser::SuffixPathContext* /*ctx*/) override {}
 
-  virtual void enterSuffixPath(PathParser::SuffixPathContext * /*ctx*/) override { }
-  virtual void exitSuffixPath(PathParser::SuffixPathContext * /*ctx*/) override { }
+    virtual void enterNodeName(PathParser::NodeNameContext* /*ctx*/) override {}
+    virtual void exitNodeName(PathParser::NodeNameContext* /*ctx*/) override {}
 
-  virtual void enterNodeName(PathParser::NodeNameContext * /*ctx*/) override { }
-  virtual void exitNodeName(PathParser::NodeNameContext * /*ctx*/) override { }
+    virtual void enterNodeNameWithoutWildcard(
+        PathParser::NodeNameWithoutWildcardContext* /*ctx*/) override {}
+    virtual void exitNodeNameWithoutWildcard(
+        PathParser::NodeNameWithoutWildcardContext* /*ctx*/) override {}
 
-  virtual void enterNodeNameWithoutWildcard(PathParser::NodeNameWithoutWildcardContext * /*ctx*/) override { }
-  virtual void exitNodeNameWithoutWildcard(PathParser::NodeNameWithoutWildcardContext * /*ctx*/) override { }
+    virtual void enterNodeNameSlice(
+        PathParser::NodeNameSliceContext* /*ctx*/) override {}
+    virtual void exitNodeNameSlice(
+        PathParser::NodeNameSliceContext* /*ctx*/) override {}
 
-  virtual void enterNodeNameSlice(PathParser::NodeNameSliceContext * /*ctx*/) override { }
-  virtual void exitNodeNameSlice(PathParser::NodeNameSliceContext * /*ctx*/) override { }
+    virtual void enterIdentifier(
+        PathParser::IdentifierContext* /*ctx*/) override {}
+    virtual void exitIdentifier(
+        PathParser::IdentifierContext* /*ctx*/) override {}
 
-  virtual void enterIdentifier(PathParser::IdentifierContext * /*ctx*/) override { }
-  virtual void exitIdentifier(PathParser::IdentifierContext * /*ctx*/) override { }
+    virtual void enterWildcard(PathParser::WildcardContext* /*ctx*/) override {}
+    virtual void exitWildcard(PathParser::WildcardContext* /*ctx*/) override {}
 
-  virtual void enterWildcard(PathParser::WildcardContext * /*ctx*/) override { }
-  virtual void exitWildcard(PathParser::WildcardContext * /*ctx*/) override { }
-
-
-  virtual void enterEveryRule(antlr4::ParserRuleContext * /*ctx*/) override { }
-  virtual void exitEveryRule(antlr4::ParserRuleContext * /*ctx*/) override { }
-  virtual void visitTerminal(antlr4::tree::TerminalNode * /*node*/) override { }
-  virtual void visitErrorNode(antlr4::tree::ErrorNode * /*node*/) override { }
-
+    virtual void enterEveryRule(antlr4::ParserRuleContext* /*ctx*/) override {}
+    virtual void exitEveryRule(antlr4::ParserRuleContext* /*ctx*/) override {}
+    virtual void visitTerminal(antlr4::tree::TerminalNode* /*node*/) override {}
+    virtual void visitErrorNode(antlr4::tree::ErrorNode* /*node*/) override {}
 };
-
diff --git a/cpp/src/parser/generated/PathParserBaseVisitor.cpp b/cpp/src/parser/generated/PathParserBaseVisitor.cpp
index 9c6ac11..95ac79f 100644
--- a/cpp/src/parser/generated/PathParserBaseVisitor.cpp
+++ b/cpp/src/parser/generated/PathParserBaseVisitor.cpp
@@ -18,7 +18,4 @@
  */
 // Generated from PathParser.g4 by ANTLR 4.9.3
 
-
 #include "PathParserBaseVisitor.h"
-
-
diff --git a/cpp/src/parser/generated/PathParserBaseVisitor.h b/cpp/src/parser/generated/PathParserBaseVisitor.h
index 1dfc626..06c57cf 100644
--- a/cpp/src/parser/generated/PathParserBaseVisitor.h
+++ b/cpp/src/parser/generated/PathParserBaseVisitor.h
@@ -20,50 +20,52 @@
 
 #pragma once
 
-
-#include "antlr4-runtime.h"
 #include "PathParserVisitor.h"
-
+#include "antlr4-runtime.h"
 
 /**
- * This class provides an empty implementation of PathParserVisitor, which can be
- * extended to create a visitor which only needs to handle a subset of the available methods.
+ * This class provides an empty implementation of PathParserVisitor, which can
+ * be extended to create a visitor which only needs to handle a subset of the
+ * available methods.
  */
-class  PathParserBaseVisitor : public PathParserVisitor {
-public:
+class PathParserBaseVisitor : public PathParserVisitor {
+   public:
+    virtual antlrcpp::Any visitPath(PathParser::PathContext *ctx) override {
+        return visitChildren(ctx);
+    }
 
-  virtual antlrcpp::Any visitPath(PathParser::PathContext *ctx) override {
-    return visitChildren(ctx);
-  }
+    virtual antlrcpp::Any visitPrefixPath(
+        PathParser::PrefixPathContext *ctx) override {
+        return visitChildren(ctx);
+    }
 
-  virtual antlrcpp::Any visitPrefixPath(PathParser::PrefixPathContext *ctx) override {
-    return visitChildren(ctx);
-  }
+    virtual antlrcpp::Any visitSuffixPath(
+        PathParser::SuffixPathContext *ctx) override {
+        return visitChildren(ctx);
+    }
 
-  virtual antlrcpp::Any visitSuffixPath(PathParser::SuffixPathContext *ctx) override {
-    return visitChildren(ctx);
-  }
+    virtual antlrcpp::Any visitNodeName(
+        PathParser::NodeNameContext *ctx) override {
+        return visitChildren(ctx);
+    }
 
-  virtual antlrcpp::Any visitNodeName(PathParser::NodeNameContext *ctx) override {
-    return visitChildren(ctx);
-  }
+    virtual antlrcpp::Any visitNodeNameWithoutWildcard(
+        PathParser::NodeNameWithoutWildcardContext *ctx) override {
+        return visitChildren(ctx);
+    }
 
-  virtual antlrcpp::Any visitNodeNameWithoutWildcard(PathParser::NodeNameWithoutWildcardContext *ctx) override {
-    return visitChildren(ctx);
-  }
+    virtual antlrcpp::Any visitNodeNameSlice(
+        PathParser::NodeNameSliceContext *ctx) override {
+        return visitChildren(ctx);
+    }
 
-  virtual antlrcpp::Any visitNodeNameSlice(PathParser::NodeNameSliceContext *ctx) override {
-    return visitChildren(ctx);
-  }
+    virtual antlrcpp::Any visitIdentifier(
+        PathParser::IdentifierContext *ctx) override {
+        return visitChildren(ctx);
+    }
 
-  virtual antlrcpp::Any visitIdentifier(PathParser::IdentifierContext *ctx) override {
-    return visitChildren(ctx);
-  }
-
-  virtual antlrcpp::Any visitWildcard(PathParser::WildcardContext *ctx) override {
-    return visitChildren(ctx);
-  }
-
-
+    virtual antlrcpp::Any visitWildcard(
+        PathParser::WildcardContext *ctx) override {
+        return visitChildren(ctx);
+    }
 };
-
diff --git a/cpp/src/parser/generated/PathParserListener.cpp b/cpp/src/parser/generated/PathParserListener.cpp
index d41f8bc..06dec21 100644
--- a/cpp/src/parser/generated/PathParserListener.cpp
+++ b/cpp/src/parser/generated/PathParserListener.cpp
@@ -18,7 +18,4 @@
  */
 // Generated from PathParser.g4 by ANTLR 4.9.3
 
-
 #include "PathParserListener.h"
-
-
diff --git a/cpp/src/parser/generated/PathParserListener.h b/cpp/src/parser/generated/PathParserListener.h
index f069a14..feb50e3 100644
--- a/cpp/src/parser/generated/PathParserListener.h
+++ b/cpp/src/parser/generated/PathParserListener.h
@@ -20,41 +20,38 @@
 
 #pragma once
 
-
-#include "antlr4-runtime.h"
 #include "PathParser.h"
-
+#include "antlr4-runtime.h"
 
 /**
- * This interface defines an abstract listener for a parse tree produced by PathParser.
+ * This interface defines an abstract listener for a parse tree produced by
+ * PathParser.
  */
-class  PathParserListener : public antlr4::tree::ParseTreeListener {
-public:
+class PathParserListener : public antlr4::tree::ParseTreeListener {
+   public:
+    virtual void enterPath(PathParser::PathContext *ctx) = 0;
+    virtual void exitPath(PathParser::PathContext *ctx) = 0;
 
-  virtual void enterPath(PathParser::PathContext *ctx) = 0;
-  virtual void exitPath(PathParser::PathContext *ctx) = 0;
+    virtual void enterPrefixPath(PathParser::PrefixPathContext *ctx) = 0;
+    virtual void exitPrefixPath(PathParser::PrefixPathContext *ctx) = 0;
 
-  virtual void enterPrefixPath(PathParser::PrefixPathContext *ctx) = 0;
-  virtual void exitPrefixPath(PathParser::PrefixPathContext *ctx) = 0;
+    virtual void enterSuffixPath(PathParser::SuffixPathContext *ctx) = 0;
+    virtual void exitSuffixPath(PathParser::SuffixPathContext *ctx) = 0;
 
-  virtual void enterSuffixPath(PathParser::SuffixPathContext *ctx) = 0;
-  virtual void exitSuffixPath(PathParser::SuffixPathContext *ctx) = 0;
+    virtual void enterNodeName(PathParser::NodeNameContext *ctx) = 0;
+    virtual void exitNodeName(PathParser::NodeNameContext *ctx) = 0;
 
-  virtual void enterNodeName(PathParser::NodeNameContext *ctx) = 0;
-  virtual void exitNodeName(PathParser::NodeNameContext *ctx) = 0;
+    virtual void enterNodeNameWithoutWildcard(
+        PathParser::NodeNameWithoutWildcardContext *ctx) = 0;
+    virtual void exitNodeNameWithoutWildcard(
+        PathParser::NodeNameWithoutWildcardContext *ctx) = 0;
 
-  virtual void enterNodeNameWithoutWildcard(PathParser::NodeNameWithoutWildcardContext *ctx) = 0;
-  virtual void exitNodeNameWithoutWildcard(PathParser::NodeNameWithoutWildcardContext *ctx) = 0;
+    virtual void enterNodeNameSlice(PathParser::NodeNameSliceContext *ctx) = 0;
+    virtual void exitNodeNameSlice(PathParser::NodeNameSliceContext *ctx) = 0;
 
-  virtual void enterNodeNameSlice(PathParser::NodeNameSliceContext *ctx) = 0;
-  virtual void exitNodeNameSlice(PathParser::NodeNameSliceContext *ctx) = 0;
+    virtual void enterIdentifier(PathParser::IdentifierContext *ctx) = 0;
+    virtual void exitIdentifier(PathParser::IdentifierContext *ctx) = 0;
 
-  virtual void enterIdentifier(PathParser::IdentifierContext *ctx) = 0;
-  virtual void exitIdentifier(PathParser::IdentifierContext *ctx) = 0;
-
-  virtual void enterWildcard(PathParser::WildcardContext *ctx) = 0;
-  virtual void exitWildcard(PathParser::WildcardContext *ctx) = 0;
-
-
+    virtual void enterWildcard(PathParser::WildcardContext *ctx) = 0;
+    virtual void exitWildcard(PathParser::WildcardContext *ctx) = 0;
 };
-
diff --git a/cpp/src/parser/generated/PathParserVisitor.cpp b/cpp/src/parser/generated/PathParserVisitor.cpp
index bea4c74..78f078a 100644
--- a/cpp/src/parser/generated/PathParserVisitor.cpp
+++ b/cpp/src/parser/generated/PathParserVisitor.cpp
@@ -18,7 +18,4 @@
  */
 // Generated from PathParser.g4 by ANTLR 4.9.3
 
-
 #include "PathParserVisitor.h"
-
-
diff --git a/cpp/src/parser/generated/PathParserVisitor.h b/cpp/src/parser/generated/PathParserVisitor.h
index 5090d03..27a5e5d 100644
--- a/cpp/src/parser/generated/PathParserVisitor.h
+++ b/cpp/src/parser/generated/PathParserVisitor.h
@@ -20,38 +20,38 @@
 
 #pragma once
 
-
-#include "antlr4-runtime.h"
 #include "PathParser.h"
-
-
+#include "antlr4-runtime.h"
 
 /**
  * This class defines an abstract visitor for a parse tree
  * produced by PathParser.
  */
-class  PathParserVisitor : public antlr4::tree::AbstractParseTreeVisitor {
-public:
-
-  /**
-   * Visit parse trees produced by PathParser.
-   */
+class PathParserVisitor : public antlr4::tree::AbstractParseTreeVisitor {
+   public:
+    /**
+     * Visit parse trees produced by PathParser.
+     */
     virtual antlrcpp::Any visitPath(PathParser::PathContext *context) = 0;
 
-    virtual antlrcpp::Any visitPrefixPath(PathParser::PrefixPathContext *context) = 0;
+    virtual antlrcpp::Any visitPrefixPath(
+        PathParser::PrefixPathContext *context) = 0;
 
-    virtual antlrcpp::Any visitSuffixPath(PathParser::SuffixPathContext *context) = 0;
+    virtual antlrcpp::Any visitSuffixPath(
+        PathParser::SuffixPathContext *context) = 0;
 
-    virtual antlrcpp::Any visitNodeName(PathParser::NodeNameContext *context) = 0;
+    virtual antlrcpp::Any visitNodeName(
+        PathParser::NodeNameContext *context) = 0;
 
-    virtual antlrcpp::Any visitNodeNameWithoutWildcard(PathParser::NodeNameWithoutWildcardContext *context) = 0;
+    virtual antlrcpp::Any visitNodeNameWithoutWildcard(
+        PathParser::NodeNameWithoutWildcardContext *context) = 0;
 
-    virtual antlrcpp::Any visitNodeNameSlice(PathParser::NodeNameSliceContext *context) = 0;
+    virtual antlrcpp::Any visitNodeNameSlice(
+        PathParser::NodeNameSliceContext *context) = 0;
 
-    virtual antlrcpp::Any visitIdentifier(PathParser::IdentifierContext *context) = 0;
+    virtual antlrcpp::Any visitIdentifier(
+        PathParser::IdentifierContext *context) = 0;
 
-    virtual antlrcpp::Any visitWildcard(PathParser::WildcardContext *context) = 0;
-
-
+    virtual antlrcpp::Any visitWildcard(
+        PathParser::WildcardContext *context) = 0;
 };
-
diff --git a/cpp/src/parser/path_nodes_generator.cpp b/cpp/src/parser/path_nodes_generator.cpp
index a22d390..55fe035 100644
--- a/cpp/src/parser/path_nodes_generator.cpp
+++ b/cpp/src/parser/path_nodes_generator.cpp
@@ -16,32 +16,35 @@
  * specific language governing permissions and limitations
  * under the License.
  */
+#include "path_nodes_generator.h"
+
 #include <string>
 #include <vector>
 
-#include "path_nodes_generator.h"
-#include "utils/errno_define.h"
 #include "generated/PathLexer.h"
 #include "generated/PathParser.h"
 #include "path_parser_error.h"
 #include "path_visitor.h"
+#include "utils/errno_define.h"
 
 namespace storage {
-    std::vector<std::string> PathNodesGenerator::invokeParser(const std::string& path) {
-        antlr4::ANTLRInputStream inputStream(path);
-        PathLexer lexer(&inputStream);
-        lexer.removeErrorListeners(); 
-        lexer.addErrorListener(&PathParseError::getInstance());
-        antlr4::CommonTokenStream tokens(&lexer);
-        PathParser parser(&tokens);
-        parser.removeErrorListeners(); 
-        parser.addErrorListener(&PathParseError::getInstance());
-        parser.getInterpreter<antlr4::atn::ParserATNSimulator>()->setPredictionMode(antlr4::atn::PredictionMode::LL);
-        /* if use SLL Mode to parse path, it will throw exception
-            but c++ tsfile forbid throw exception, so we use LL Mode
-            to parse path.
-        */
-        PathVisitor path_visitor;
-        return path_visitor.visit(parser.path()).as<std::vector<std::string>>();
-    }
+std::vector<std::string> PathNodesGenerator::invokeParser(
+    const std::string& path) {
+    antlr4::ANTLRInputStream inputStream(path);
+    PathLexer lexer(&inputStream);
+    lexer.removeErrorListeners();
+    lexer.addErrorListener(&PathParseError::getInstance());
+    antlr4::CommonTokenStream tokens(&lexer);
+    PathParser parser(&tokens);
+    parser.removeErrorListeners();
+    parser.addErrorListener(&PathParseError::getInstance());
+    parser.getInterpreter<antlr4::atn::ParserATNSimulator>()->setPredictionMode(
+        antlr4::atn::PredictionMode::LL);
+    /* if use SLL Mode to parse path, it will throw exception
+        but c++ tsfile forbid throw exception, so we use LL Mode
+        to parse path.
+    */
+    PathVisitor path_visitor;
+    return path_visitor.visit(parser.path()).as<std::vector<std::string>>();
 }
+}  // namespace storage
diff --git a/cpp/src/parser/path_nodes_generator.h b/cpp/src/parser/path_nodes_generator.h
index 51a78b6..4118746 100644
--- a/cpp/src/parser/path_nodes_generator.h
+++ b/cpp/src/parser/path_nodes_generator.h
@@ -24,9 +24,9 @@
 
 namespace storage {
 class PathNodesGenerator {
-    public:
-        static std::vector<std::string> invokeParser(const std::string& path);
+   public:
+    static std::vector<std::string> invokeParser(const std::string& path);
 };
-}
+}  // namespace storage
 
 #endif
\ No newline at end of file
diff --git a/cpp/src/parser/path_parser_error.h b/cpp/src/parser/path_parser_error.h
index f4cdfb4..bf2a67b 100644
--- a/cpp/src/parser/path_parser_error.h
+++ b/cpp/src/parser/path_parser_error.h
@@ -16,24 +16,23 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-#include "antlr4-runtime.h"
-#include <stdexcept>
-#include <string>
 #include <set>
 #include <sstream>
+#include <stdexcept>
+#include <string>
+
+#include "antlr4-runtime.h"
 
 class PathParseError : public antlr4::BaseErrorListener {
-public:
+   public:
     static PathParseError& getInstance() {
         static PathParseError instance;
         return instance;
     }
 
     void syntaxError(antlr4::Recognizer* recognizer,
-                     antlr4::Token* offending_symbol,
-                     size_t line,
-                     size_t char_position_inLine,
-                     const std::string& msg,
+                     antlr4::Token* offending_symbol, size_t line,
+                     size_t char_position_inLine, const std::string& msg,
                      std::exception_ptr e) override {
         std::string modified_msg = msg;
 
@@ -47,24 +46,29 @@
                 expectedTokenNames.insert(vocabulary.getDisplayName(token));
             }
 
-            if (expectedTokenNames.count("ID") && expectedTokenNames.count("QUOTED_ID")) {
+            if (expectedTokenNames.count("ID") &&
+                expectedTokenNames.count("QUOTED_ID")) {
                 std::ostringstream expectedStr;
                 expectedStr << "{ID, QUOTED_ID";
 
-                if (expectedTokenNames.count("*") && expectedTokenNames.count("**")) {
+                if (expectedTokenNames.count("*") &&
+                    expectedTokenNames.count("**")) {
                     expectedStr << ", *, **";
                 }
 
                 expectedStr << "}";
-                modified_msg = replace_substring(msg, expectedTokens.toString(vocabulary), expectedStr.str());
+                modified_msg =
+                    replace_substring(msg, expectedTokens.toString(vocabulary),
+                                      expectedStr.str());
             }
         }
 
         throw std::runtime_error("line " + std::to_string(line) + ":" +
-                                 std::to_string(char_position_inLine) + " " + modified_msg);
+                                 std::to_string(char_position_inLine) + " " +
+                                 modified_msg);
     }
 
-private:
+   private:
     static std::string replace_substring(const std::string& source,
                                          const std::string& from,
                                          const std::string& to) {
@@ -72,6 +76,7 @@
         if (start_pos == std::string::npos) {
             return source;
         }
-        return source.substr(0, start_pos) + to + source.substr(start_pos + from.length());
+        return source.substr(0, start_pos) + to +
+               source.substr(start_pos + from.length());
     }
 };
\ No newline at end of file
diff --git a/cpp/src/parser/path_visitor.cpp b/cpp/src/parser/path_visitor.cpp
index ebbf5d3..6c79684 100644
--- a/cpp/src/parser/path_visitor.cpp
+++ b/cpp/src/parser/path_visitor.cpp
@@ -19,93 +19,96 @@
 
 #include "path_visitor.h"
 
-namespace storage
-{
-    antlrcpp::Any PathVisitor::visitPath(PathParser::PathContext *ctx) {
-        if (ctx->prefixPath() != nullptr) {
-            return visitPrefixPath(ctx->prefixPath());
+namespace storage {
+antlrcpp::Any PathVisitor::visitPath(PathParser::PathContext* ctx) {
+    if (ctx->prefixPath() != nullptr) {
+        return visitPrefixPath(ctx->prefixPath());
+    } else {
+        return visitSuffixPath(ctx->suffixPath());
+    }
+}
+
+antlrcpp::Any PathVisitor::visitPrefixPath(PathParser::PrefixPathContext* ctx) {
+    std::vector<PathParser::NodeNameContext*> node_names = ctx->nodeName();
+    std::vector<std::string> path;
+    path.reserve(node_names.size() + 1);
+    path.push_back(ctx->ROOT()->getText());
+    for (uint64_t i = 0; i < node_names.size(); i++) {
+        path.push_back(parse_node_name(node_names[i]));
+    }
+    return path;
+}
+
+antlrcpp::Any PathVisitor::visitSuffixPath(PathParser::SuffixPathContext* ctx) {
+    std::vector<PathParser::NodeNameContext*> node_names = ctx->nodeName();
+    std::vector<std::string> path;
+    path.reserve(node_names.size());
+    for (uint64_t i = 0; i < node_names.size(); i++) {
+        path.emplace_back(parse_node_name(node_names[i]));
+    }
+    return path;
+}
+std::string PathVisitor::parse_node_name(PathParser::NodeNameContext* ctx) {
+    std::string node_name = ctx->getText();
+    if (starts_with(node_name, BACK_QUOTE_STRING) &&
+        ends_with(node_name, BACK_QUOTE_STRING)) {
+        std::string unWrapped = node_name.substr(1, node_name.length() - 2);
+        if (is_real_number(unWrapped) ||
+            !std::regex_match(unWrapped, IDENTIFIER_PATTERN)) {
+            return node_name;
+        }
+
+        return unWrapped;
+    }
+
+    return node_name;
+}
+bool PathVisitor::is_real_number(const std::string& str) {
+    std::string s = str;
+    if (starts_with(s, "+") || starts_with(s, "-")) {
+        std::string removeSign = s.substr(1);
+        if (starts_with(removeSign, "+") || starts_with(removeSign, "-")) {
+            return false;
         } else {
-            return visitSuffixPath(ctx->suffixPath());
-        }
-    } 
-
-    antlrcpp::Any PathVisitor::visitPrefixPath(PathParser::PrefixPathContext *ctx) {
-        std::vector<PathParser::NodeNameContext *> node_names = ctx->nodeName();
-        std::vector<std::string> path;
-        path.reserve(node_names.size() + 1);
-        path.push_back(ctx->ROOT()->getText());
-        for (uint64_t i = 0; i < node_names.size(); i++) {
-            path.push_back(parse_node_name(node_names[i]));
-        }
-        return path;
-    } 
-
-    antlrcpp::Any PathVisitor::visitSuffixPath(PathParser::SuffixPathContext *ctx) {
-        std::vector<PathParser::NodeNameContext *> node_names = ctx->nodeName();
-        std::vector<std::string> path;
-        path.reserve(node_names.size());
-        for (uint64_t i = 0; i < node_names.size(); i++) {
-            path.emplace_back(parse_node_name(node_names[i]));
-        }
-        return path;
-    }
-    std::string PathVisitor::parse_node_name(PathParser::NodeNameContext *ctx) {
-        std::string node_name = ctx->getText();
-        if (starts_with(node_name, BACK_QUOTE_STRING) && ends_with(node_name, BACK_QUOTE_STRING)) {
-            std::string unWrapped = node_name.substr(1, node_name.length() - 2);
-            if (is_real_number(unWrapped) || !std::regex_match(unWrapped, IDENTIFIER_PATTERN)) {
-                return node_name;
-            }
-            
-            return unWrapped;
-        }
-
-        return node_name;
-    }
-    bool PathVisitor::is_real_number(const std::string& str) {
-        std::string s = str;
-        if (starts_with(s, "+") || starts_with(s, "-")) {
-            std::string removeSign = s.substr(1);
-            if (starts_with(removeSign, "+") || starts_with(removeSign, "-")) {
-                return false;
-            } else {
-                s = removeSign;
-            }
-        }
-        size_t index = 0;
-        auto it = std::find_if(s.begin(), s.end(), [](const char& c) { return c != '0'; });
-        if (it != s.end()) {
-            index = it - s.begin();
-        }
-
-        if (index > 0 && (s[index] == 'e' || s[index] == 'E')) {
-            return is_creatable(s.substr(index - 1));
-        } else {
-            return is_creatable(s.substr(index));
+            s = removeSign;
         }
     }
-    bool PathVisitor::starts_with(const std::string& str, const std::string& prefix) {
-        if (prefix.size() > str.size()) {
-            return false;
-        }
-        return str.substr(0, prefix.size()) == prefix;
+    size_t index = 0;
+    auto it = std::find_if(s.begin(), s.end(),
+                           [](const char& c) { return c != '0'; });
+    if (it != s.end()) {
+        index = it - s.begin();
     }
 
-    bool PathVisitor::ends_with(const std::string& str, const std::string& suffix) {
-        if (suffix.size() > str.size()) {
-            return false;
-        }
-        return str.substr(str.size() - suffix.size()) == suffix;
+    if (index > 0 && (s[index] == 'e' || s[index] == 'E')) {
+        return is_creatable(s.substr(index - 1));
+    } else {
+        return is_creatable(s.substr(index));
     }
+}
+bool PathVisitor::starts_with(const std::string& str,
+                              const std::string& prefix) {
+    if (prefix.size() > str.size()) {
+        return false;
+    }
+    return str.substr(0, prefix.size()) == prefix;
+}
 
-    bool PathVisitor::is_creatable(const std::string& str) {
-        try {
-            std::stod(str);
-            return true;
-        } catch (const std::invalid_argument& e) {
-            return false;
-        } catch (const std::out_of_range& e) {
-            return false;
-        }
+bool PathVisitor::ends_with(const std::string& str, const std::string& suffix) {
+    if (suffix.size() > str.size()) {
+        return false;
     }
-} // namespace storage
+    return str.substr(str.size() - suffix.size()) == suffix;
+}
+
+bool PathVisitor::is_creatable(const std::string& str) {
+    try {
+        std::stod(str);
+        return true;
+    } catch (const std::invalid_argument& e) {
+        return false;
+    } catch (const std::out_of_range& e) {
+        return false;
+    }
+}
+}  // namespace storage
diff --git a/cpp/src/parser/path_visitor.h b/cpp/src/parser/path_visitor.h
index 23c6aba..34b5965 100644
--- a/cpp/src/parser/path_visitor.h
+++ b/cpp/src/parser/path_visitor.h
@@ -20,30 +20,29 @@
 #ifndef PATH_VISITOR_H
 #define PATH_VISITOR_H
 
-#include "generated/PathParserBaseVisitor.h"
-#include "generated/PathParser.h"
 #include "common/constant/tsfile_constant.h"
+#include "generated/PathParser.h"
+#include "generated/PathParserBaseVisitor.h"
 
-namespace storage
-{
-    class PathVisitor : public PathParserBaseVisitor
-    {
-        public:
-            antlrcpp::Any visitPath(PathParser::PathContext *ctx) override;
+namespace storage {
+class PathVisitor : public PathParserBaseVisitor {
+   public:
+    antlrcpp::Any visitPath(PathParser::PathContext* ctx) override;
 
-            antlrcpp::Any visitPrefixPath(PathParser::PrefixPathContext *ctx) override; 
+    antlrcpp::Any visitPrefixPath(PathParser::PrefixPathContext* ctx) override;
 
-            antlrcpp::Any visitSuffixPath(PathParser::SuffixPathContext *ctx) override;
+    antlrcpp::Any visitSuffixPath(PathParser::SuffixPathContext* ctx) override;
 
-            static bool is_real_number(const std::string& str);
-        private:
-            std::string parse_node_name(PathParser::NodeNameContext *ctx);
+    static bool is_real_number(const std::string& str);
 
-            static bool starts_with(const std::string& src, const std::string& prefix);
-            static bool ends_with(const std::string& src, const std::string& suffix);
-            static bool is_creatable(const std::string& str);
-    };
-    
-} // namespace storage
+   private:
+    std::string parse_node_name(PathParser::NodeNameContext* ctx);
+
+    static bool starts_with(const std::string& src, const std::string& prefix);
+    static bool ends_with(const std::string& src, const std::string& suffix);
+    static bool is_creatable(const std::string& str);
+};
+
+}  // namespace storage
 
 #endif
diff --git a/cpp/src/reader/aligned_chunk_reader.cc b/cpp/src/reader/aligned_chunk_reader.cc
index 06af869..9578ecc 100644
--- a/cpp/src/reader/aligned_chunk_reader.cc
+++ b/cpp/src/reader/aligned_chunk_reader.cc
@@ -209,9 +209,9 @@
     int ret = E_OK;
     Filter *filter =
         (oneshoot_filter != nullptr ? oneshoot_filter : time_filter_);
-
     if (prev_time_page_not_finish() && prev_value_page_not_finish()) {
-        ret = decode_time_value_buf_into_tsblock(ret_tsblock, oneshoot_filter);
+        ret = decode_time_value_buf_into_tsblock(ret_tsblock, oneshoot_filter,
+                                                 &pa);
         return ret;
     }
     if (!prev_time_page_not_finish() && !prev_value_page_not_finish()) {
@@ -237,7 +237,8 @@
         }
     }
     if (IS_SUCC(ret)) {
-        ret = decode_time_value_buf_into_tsblock(ret_tsblock, oneshoot_filter);
+        ret = decode_time_value_buf_into_tsblock(ret_tsblock, oneshoot_filter,
+                                                 &pa);
     }
     return ret;
 }
@@ -303,7 +304,8 @@
     int offset = chunk_meta->offset_of_chunk_header_ + chunk_visit_offset;
     int read_size =
         (want_size < DEFAULT_READ_SIZE ? DEFAULT_READ_SIZE : want_size);
-    if (file_data_buf_size < read_size || (may_shrink && read_size < file_data_buf_size / 10)) {
+    if (file_data_buf_size < read_size ||
+        (may_shrink && read_size < file_data_buf_size / 10)) {
         file_data_buf = (char *)mem_realloc(file_data_buf, read_size);
         if (IS_NULL(file_data_buf)) {
             return E_OOM;
@@ -367,7 +369,6 @@
     uint32_t time_compressed_buf_size = 0;
     uint32_t time_uncompressed_buf_size = 0;
 
-
     // Step 2: do uncompress
     if (IS_SUCC(ret)) {
         time_compressed_buf =
@@ -485,10 +486,10 @@
 }
 
 int AlignedChunkReader::decode_time_value_buf_into_tsblock(
-    TsBlock *&ret_tsblock, Filter *filter) {
+    TsBlock *&ret_tsblock, Filter *filter, common::PageArena *pa) {
     int ret = common::E_OK;
     ret = decode_tv_buf_into_tsblock_by_datatype(time_in_, value_in_,
-                                                 ret_tsblock, filter);
+                                                 ret_tsblock, filter, pa);
     // if we return during @decode_tv_buf_into_tsblock, we should keep
     // @uncompressed_buf_ valid until all TV pairs are decoded.
     if (ret != E_OVERFLOW) {
@@ -520,9 +521,8 @@
         uint32_t mask = 1 << 7;                                                \
         int64_t time = 0;                                                      \
         CppType value;                                                         \
-        while ((time_decoder_->has_remaining() || time_in.has_remaining())     \
-                && (value_decoder_->has_remaining() ||                         \
-                value_in.has_remaining())){                                    \
+        while (time_decoder_->has_remaining(time_in) &&                        \
+               value_decoder_->has_remaining(value_in)) {                      \
             cur_value_index++;                                                 \
             if (((value_page_col_notnull_bitmap_[cur_value_index / 8] &        \
                   0xFF) &                                                      \
@@ -531,16 +531,17 @@
                 if (ret != E_OK) {                                             \
                     break;                                                     \
                 }                                                              \
-                ret = value_decoder_->read_##ReadType(value,                   \
-                value_in);                                                     \
-                if (ret != E_OK) {                                             \
+                if (UNLIKELY(!row_appender.add_row())) {                       \
+                    ret = E_OVERFLOW;                                          \
                     break;                                                     \
                 }                                                              \
+                row_appender.append(0, (char *)&time, sizeof(time));           \
+                row_appender.append_null(1);                                   \
                 continue;                                                      \
             }                                                                  \
             if (UNLIKELY(!row_appender.add_row())) {                           \
                 ret = E_OVERFLOW;                                              \
-                cur_value_index--;                                            \
+                cur_value_index--;                                             \
                 break;                                                         \
             } else if (RET_FAIL(time_decoder_->read_int64(time, time_in))) {   \
             } else if (RET_FAIL(value_decoder_->read_##ReadType(value,         \
@@ -562,40 +563,47 @@
     Filter *filter) {
     int ret = E_OK;
     uint32_t mask = 1 << 7;
-    do {
-        int64_t time = 0;
-        int32_t value;
-        while ((time_decoder_->has_remaining() &&
-                value_decoder_->has_remaining()) ||
-               (time_in.has_remaining() && value_in.has_remaining())) {
-            if (((value_page_col_notnull_bitmap_[cur_value_index / 8] & 0xFF) &
-                 (mask >> (cur_value_index % 8))) == 0) {
-                RET_FAIL(time_decoder_->read_int64(time, time_in));
-                continue;
+    int64_t time = 0;
+    int32_t value;
+    while (time_decoder_->has_remaining(time_in) &&
+           value_decoder_->has_remaining(value_in)) {
+        cur_value_index++;
+        if (((value_page_col_notnull_bitmap_[cur_value_index / 8] & 0xFF) &
+             (mask >> (cur_value_index % 8))) == 0) {
+            ret = time_decoder_->read_int64(time, time_in);
+            if (ret != E_OK) {
+                break;
             }
             if (UNLIKELY(!row_appender.add_row())) {
                 ret = E_OVERFLOW;
                 break;
-            } else if (RET_FAIL(time_decoder_->read_int64(time, time_in))) {
             }
-            if (RET_FAIL(value_decoder_->read_int32(value, value_in))) {
-            } else if (filter != nullptr && !filter->satisfy(time, value)) {
-                row_appender.backoff_add_row();
-                continue;
-            } else {
-                /*std::cout << "decoder: time=" << time << ", value=" << value
-                 * << std::endl;*/
-                row_appender.append(0, (char *)&time, sizeof(time));
-                row_appender.append(1, (char *)&value, sizeof(value));
-            }
+            row_appender.append(0, (char *)&time, sizeof(time));
+            row_appender.append_null(1);
+            continue;
         }
-    } while (false);
+        if (UNLIKELY(!row_appender.add_row())) {
+            ret = E_OVERFLOW;
+            cur_value_index--;
+            break;
+        } else if (RET_FAIL(time_decoder_->read_int64(time, time_in))) {
+        } else if (RET_FAIL(value_decoder_->read_int32(value, value_in))) {
+        } else if (filter != nullptr && !filter->satisfy(time, value)) {
+            row_appender.backoff_add_row();
+            continue;
+        } else {
+            /*std::cout << "decoder: time=" << time << ", value=" << value
+             * << std::endl;*/
+            row_appender.append(0, (char *)&time, sizeof(time));
+            row_appender.append(1, (char *)&value, sizeof(value));
+        }
+    }
     return ret;
 }
 
 int AlignedChunkReader::decode_tv_buf_into_tsblock_by_datatype(
     ByteStream &time_in, ByteStream &value_in, TsBlock *ret_tsblock,
-    Filter *filter) {
+    Filter *filter, common::PageArena *pa) {
     int ret = E_OK;
     RowAppender row_appender(ret_tsblock);
     switch (value_chunk_header_.data_type_) {
@@ -603,10 +611,14 @@
             DECODE_TYPED_TV_INTO_TSBLOCK(bool, boolean, time_in_, value_in_,
                                          row_appender);
             break;
+        case common::DATE:
         case common::INT32:
-            DECODE_TYPED_TV_INTO_TSBLOCK(int32_t, int32, time_in_, value_in_,
-                                         row_appender);
+            // DECODE_TYPED_TV_INTO_TSBLOCK(int32_t, int32, time_in_, value_in_,
+            //                              row_appender);
+            ret = i32_DECODE_TYPED_TV_INTO_TSBLOCK(time_in_, value_in_,
+                                                   row_appender, filter);
             break;
+        case common::TIMESTAMP:
         case common::INT64:
             DECODE_TYPED_TV_INTO_TSBLOCK(int64_t, int64, time_in_, value_in_,
                                          row_appender);
@@ -619,6 +631,12 @@
             DECODE_TYPED_TV_INTO_TSBLOCK(double, double, time_in_, value_in_,
                                          row_appender);
             break;
+        case common::STRING:
+        case common::BLOB:
+        case common::TEXT:
+            ret = STRING_DECODE_TYPED_TV_INTO_TSBLOCK(
+                time_in, value_in, row_appender, *pa, filter);
+            break;
         default:
             ret = E_NOT_SUPPORT;
             ASSERT(false);
@@ -629,4 +647,28 @@
     return ret;
 }
 
+int AlignedChunkReader::STRING_DECODE_TYPED_TV_INTO_TSBLOCK(
+    ByteStream &time_in, ByteStream &value_in, RowAppender &row_appender,
+    PageArena &pa, Filter *filter) {
+    int ret = E_OK;
+    int64_t time = 0;
+    common::String value;
+    while (time_decoder_->has_remaining(time_in)) {
+        ASSERT(value_decoder_->has_remaining(value_in));
+        if (UNLIKELY(!row_appender.add_row())) {
+            ret = E_OVERFLOW;
+            break;
+        } else if (RET_FAIL(time_decoder_->read_int64(time, time_in))) {
+        } else if (RET_FAIL(value_decoder_->read_String(value, pa, value_in))) {
+        } else if (filter != nullptr && !filter->satisfy(time, value)) {
+            row_appender.backoff_add_row();
+            continue;
+        } else {
+            row_appender.append(0, (char *)&time, sizeof(time));
+            row_appender.append(1, value.buf_, value.len_);
+        }
+    }
+    return ret;
+}
+
 }  // end namespace storage
\ No newline at end of file
diff --git a/cpp/src/reader/aligned_chunk_reader.h b/cpp/src/reader/aligned_chunk_reader.h
index 58898f7..7bf2904 100644
--- a/cpp/src/reader/aligned_chunk_reader.h
+++ b/cpp/src/reader/aligned_chunk_reader.h
@@ -102,25 +102,32 @@
     int decode_cur_time_page_data();
     int decode_cur_value_page_data();
     int decode_time_value_buf_into_tsblock(common::TsBlock *&ret_tsblock,
-                                           Filter *filter);
+                                           Filter *filter,
+                                           common::PageArena *pa);
     bool prev_time_page_not_finish() const {
-        return (time_decoder_ && time_decoder_->has_remaining()) ||
+        return (time_decoder_ && time_decoder_->has_remaining(time_in_)) ||
                time_in_.has_remaining();
     }
 
     bool prev_value_page_not_finish() const {
-        return (value_decoder_ && value_decoder_->has_remaining()) ||
+        return (value_decoder_ && value_decoder_->has_remaining(value_in_)) ||
                value_in_.has_remaining();
     }
 
     int decode_tv_buf_into_tsblock_by_datatype(common::ByteStream &time_in,
                                                common::ByteStream &value_in,
                                                common::TsBlock *ret_tsblock,
-                                               Filter *filter);
+                                               Filter *filter,
+                                               common::PageArena *pa);
     int i32_DECODE_TYPED_TV_INTO_TSBLOCK(common::ByteStream &time_in,
                                          common::ByteStream &value_in,
                                          common::RowAppender &row_appender,
                                          Filter *filter);
+    int STRING_DECODE_TYPED_TV_INTO_TSBLOCK(common::ByteStream &time_in,
+                                            common::ByteStream &value_in,
+                                            common::RowAppender &row_appender,
+                                            common::PageArena &pa,
+                                            Filter *filter);
 
    private:
     ReadFile *read_file_;
diff --git a/cpp/src/reader/block/device_ordered_tsblock_reader.cc b/cpp/src/reader/block/device_ordered_tsblock_reader.cc
index 7469cc8..e1c46ff 100644
--- a/cpp/src/reader/block/device_ordered_tsblock_reader.cc
+++ b/cpp/src/reader/block/device_ordered_tsblock_reader.cc
@@ -23,7 +23,8 @@
 
 int DeviceOrderedTsBlockReader::has_next(bool &has_next) {
     int ret = common::E_OK;
-    if (current_reader_ != nullptr && IS_SUCC(current_reader_->has_next(has_next)) && has_next) {
+    if (current_reader_ != nullptr &&
+        IS_SUCC(current_reader_->has_next(has_next)) && has_next) {
         return common::E_OK;
     }
     if (current_reader_ != nullptr) {
@@ -40,11 +41,17 @@
             current_reader_ = nullptr;
         }
         current_reader_ = new SingleDeviceTsBlockReader(
-            task, block_size_, metadata_querier_, tsfile_io_reader_, time_filter_,
-            field_filter_);
+            task, block_size_, metadata_querier_, tsfile_io_reader_,
+            time_filter_, field_filter_);
         if (current_reader_ == nullptr) {
             return common::E_OOM;
         }
+        if (RET_FAIL(current_reader_->init(task, block_size_, time_filter_,
+                                           field_filter_))) {
+            delete current_reader_;
+            current_reader_ = nullptr;
+            return ret;
+        }
 
         if (RET_FAIL(current_reader_->has_next(has_next))) {
             return ret;
diff --git a/cpp/src/reader/block/single_device_tsblock_reader.cc b/cpp/src/reader/block/single_device_tsblock_reader.cc
index ec8059f..1df563c 100644
--- a/cpp/src/reader/block/single_device_tsblock_reader.cc
+++ b/cpp/src/reader/block/single_device_tsblock_reader.cc
@@ -29,9 +29,7 @@
       field_filter_(field_filter),
       block_size_(block_size),
       tuple_desc_(),
-      tsfile_io_reader_(tsfile_io_reader) {
-    init(device_query_task, block_size, time_filter, field_filter);
-}
+      tsfile_io_reader_(tsfile_io_reader) {}
 
 int SingleDeviceTsBlockReader::init(DeviceQueryTask* device_query_task,
                                     uint32_t block_size, Filter* time_filter,
@@ -63,12 +61,11 @@
         device_query_task_->get_column_mapping()
             ->get_measurement_columns()
             .size());
-    tsfile_io_reader_->get_timeseries_indexes(
-        device_query_task->get_device_id(),
-        device_query_task->get_column_mapping()->get_measurement_columns(),
-        time_series_indexs, pa_);
-    for (auto measurement_column :
-         device_query_task->get_column_mapping()->get_measurement_columns()) {
+    if (RET_FAIL(tsfile_io_reader_->get_timeseries_indexes(
+            device_query_task->get_device_id(),
+            device_query_task->get_column_mapping()->get_measurement_columns(),
+            time_series_indexs, pa_))) {
+        return ret;
     }
     for (const auto& time_series_index : time_series_indexs) {
         construct_column_context(time_series_index, time_filter);
@@ -104,9 +101,11 @@
         has_next = false;
         return common::E_OK;
     }
+
     for (auto col_appender : col_appenders_) {
         col_appender->reset();
     }
+
     current_block_->reset();
 
     bool next_time_set = false;
@@ -171,6 +170,21 @@
                 break;
             }
         }
+
+        // Align all columns, filling with nulls where data is missing.
+        uint32_t row_count =
+            col_appenders_[time_column_index_]->get_col_row_count();
+        for (auto& col_appender : col_appenders_) {
+            if (tuple_desc_.get_column_category(
+                    col_appender->get_column_index()) !=
+                common::ColumnCategory::FIELD) {
+                continue;
+            }
+            while (col_appender->get_col_row_count() < row_count) {
+                col_appender->add_row();
+                col_appender->append_null();
+            }
+        }
     }
     return ret;
 }
@@ -199,11 +213,20 @@
     for (const auto& entry : id_column_contexts_) {
         const auto& id_column_context = entry.second;
         for (int32_t pos : id_column_context.pos_in_result_) {
-            common::String device_id(
-                device_query_task_->get_device_id()->get_segments().at(
-                    id_column_context.pos_in_device_id_));
+            std::string* device_tag = nullptr;
+            device_tag = device_query_task_->get_device_id()->get_segments().at(
+                id_column_context.pos_in_device_id_);
+            if (device_tag == nullptr) {
+                ret = col_appenders_[pos + 1]->fill_null(
+                    current_block_->get_row_count());
+                if (ret != common::E_OK) {
+                    return ret;
+                }
+                continue;
+            }
+
             if (RET_FAIL(col_appenders_[pos + 1]->fill(
-                    (char*)&device_id, sizeof(device_id),
+                    device_tag->c_str(), device_tag->length(),
                     current_block_->get_row_count()))) {
                 return ret;
             }
@@ -357,8 +380,8 @@
     if (value_iter_->end()) {
         return common::E_NO_MORE_DATA;
     }
-    value = value_iter_->read(&len);
-    assert(value != nullptr);
+    bool is_null = false;
+    value = value_iter_->read(&len, &is_null);
     return common::E_OK;
 }
 
@@ -383,7 +406,11 @@
     }
     for (int32_t pos : pos_in_result_) {
         col_appenders[pos + 1]->add_row();
-        col_appenders[pos + 1]->append(val, len);
+        if (val == nullptr) {
+            col_appenders[pos + 1]->append_null();
+        } else {
+            col_appenders[pos + 1]->append(val, len);
+        }
     }
 }
 
diff --git a/cpp/src/reader/block/single_device_tsblock_reader.h b/cpp/src/reader/block/single_device_tsblock_reader.h
index a94aed3..46ac8c4 100644
--- a/cpp/src/reader/block/single_device_tsblock_reader.h
+++ b/cpp/src/reader/block/single_device_tsblock_reader.h
@@ -40,7 +40,7 @@
                                        Filter* time_filter,
                                        Filter* field_filter);
     ~SingleDeviceTsBlockReader() { close(); }
-    int has_next(bool &has_next) override;
+    int has_next(bool& has_next) override;
     int next(common::TsBlock*& ret_block) override;
     int init(DeviceQueryTask* device_query_task, uint32_t block_size,
              Filter* time_filter, Filter* field_filter);
@@ -48,7 +48,7 @@
 
    private:
     int construct_column_context(const ITimeseriesIndex* time_series_index,
-                                  Filter* time_filter);
+                                 Filter* time_filter);
     int fill_measurements(
         std::vector<MeasurementColumnContext*>& column_contexts);
     int fill_ids();
@@ -124,14 +124,12 @@
                          column_context_map) override;
     int init(DeviceQueryTask* device_query_task,
              const ITimeseriesIndex* time_series_index, Filter* time_filter,
-             const std::vector<int32_t>& pos_in_result,
-             common::PageArena& pa);
+             const std::vector<int32_t>& pos_in_result, common::PageArena& pa);
     int get_next_tsblock(bool alloc_mem) override;
     int get_current_time(int64_t& time) override;
     int get_current_value(char*& value, uint32_t& len) override;
     int move_iter() override;
 
-
    private:
     std::string column_name_;
     std::vector<int32_t> pos_in_result_;
diff --git a/cpp/src/reader/bloom_filter.cc b/cpp/src/reader/bloom_filter.cc
index 5834809..591bb6c 100644
--- a/cpp/src/reader/bloom_filter.cc
+++ b/cpp/src/reader/bloom_filter.cc
@@ -121,33 +121,22 @@
     for (; word_idx < (filter_data_bytes_len / 8); word_idx += 1) {
         uint64_t cur_word = 0;
         uint8_t *cur_word_start_byte = filter_data + (word_idx * 8);
-        cur_word |= *(cur_word_start_byte + 0);
-        cur_word = cur_word << 8;
-        cur_word |= *(cur_word_start_byte + 1);
-        cur_word = cur_word << 8;
-        cur_word |= *(cur_word_start_byte + 2);
-        cur_word = cur_word << 8;
-        cur_word |= *(cur_word_start_byte + 3);
-        cur_word = cur_word << 8;
-        cur_word |= *(cur_word_start_byte + 4);
-        cur_word = cur_word << 8;
-        cur_word |= *(cur_word_start_byte + 5);
-        cur_word = cur_word << 8;
-        cur_word |= *(cur_word_start_byte + 6);
-        cur_word = cur_word << 8;
-        cur_word |= *(cur_word_start_byte + 7);
-        cur_word = cur_word << 8;
-        *(words_ + word_idx) = cur_word;
+        for (int b = 0; b < 8; ++b) {
+            cur_word |= static_cast<uint64_t>(cur_word_start_byte[b])
+                        << (8 * b);
+        }
+        words_[word_idx] = cur_word;
     }
 
     if (filter_data_bytes_len > word_idx * 8) {
         uint64_t cur_word = 0;
         uint8_t *cur_word_start_byte = filter_data + (word_idx * 8);
-        for (uint32_t r = 0; r < filter_data_bytes_len - word_idx * 8; r++) {
-            cur_word |= *(cur_word_start_byte + r);
-            cur_word = cur_word << 8;
+        int remain = filter_data_bytes_len - word_idx * 8;
+        for (int b = 0; b < remain; ++b) {
+            cur_word |= static_cast<uint64_t>(cur_word_start_byte[b])
+                        << (8 * b);
         }
-        *(words_ + word_idx) = cur_word;
+        words_[word_idx] = cur_word;
     }
     return ret;
 }
@@ -190,8 +179,10 @@
     }
     memcpy(path_buf, device_name.buf_, device_name.len_);
     *(path_buf + device_name.len_) = '.';
-    memcpy(path_buf + device_name.len_ + 1, measurement_name.buf_,
-           measurement_name.len_);
+    if (measurement_name.buf_) {
+        memcpy(path_buf + device_name.len_ + 1, measurement_name.buf_,
+               measurement_name.len_);
+    }
     *(path_buf + device_name.len_ + measurement_name.len_ + 1) = '\0';
     ret_str.buf_ = path_buf;
     ret_str.len_ = len;
diff --git a/cpp/src/reader/bloom_filter.h b/cpp/src/reader/bloom_filter.h
index 181cb96..a43b264 100644
--- a/cpp/src/reader/bloom_filter.h
+++ b/cpp/src/reader/bloom_filter.h
@@ -72,7 +72,7 @@
     void set(int32_t pos) {
         int32_t word_idx = pos / 64;
         int32_t word_offset = pos % 64;
-        words_[word_idx] |= (1ul << word_offset);
+        words_[word_idx] |= (1ull << word_offset);
     }
     int32_t get_words_in_use() const {
         for (int32_t i = word_count_ - 1; i >= 0; i--) {
@@ -109,6 +109,7 @@
                        const common::String &measurement_name);
     int serialize_to(common::ByteStream &out);
     int deserialize_from(common::ByteStream &in);
+    BitSet *get_bit_set() { return &bitset_; }
 
    private:
     common::String get_entry_string(const common::String &device_name,
diff --git a/cpp/src/reader/chunk_reader.cc b/cpp/src/reader/chunk_reader.cc
index f1e395d..d4ab50f 100644
--- a/cpp/src/reader/chunk_reader.cc
+++ b/cpp/src/reader/chunk_reader.cc
@@ -240,8 +240,8 @@
         file_data_buf_size_ = read_size;
     }
     int ret_read_len = 0;
-    if (RET_FAIL(read_file_->read(offset, file_data_buf, read_size,
-                                  ret_read_len))) {
+    if (RET_FAIL(
+            read_file_->read(offset, file_data_buf, read_size, ret_read_len))) {
     } else {
         in_stream_.wrap_from(file_data_buf, ret_read_len);
         // DEBUG_hex_dump_buf("wrapped buf = ", file_data_buf, 256);
@@ -367,9 +367,8 @@
     do {                                                                       \
         int64_t time = 0;                                                      \
         CppType value;                                                         \
-        while (time_decoder_->has_remaining() || time_in.has_remaining()) {    \
-            ASSERT(value_decoder_->has_remaining() ||                          \
-                   value_in.has_remaining());                                  \
+        while (time_decoder_->has_remaining(time_in)) {                        \
+            ASSERT(value_decoder_->has_remaining(value_in));                   \
             if (UNLIKELY(!row_appender.add_row())) {                           \
                 ret = E_OVERFLOW;                                              \
                 break;                                                         \
@@ -396,8 +395,8 @@
     do {
         int64_t time = 0;
         int32_t value;
-        while (time_decoder_->has_remaining() || time_in.has_remaining()) {
-            ASSERT(value_decoder_->has_remaining() || value_in.has_remaining());
+        while (time_decoder_->has_remaining(time_in)) {
+            ASSERT(value_decoder_->has_remaining(value_in));
             if (UNLIKELY(!row_appender.add_row())) {
                 ret = E_OVERFLOW;
                 break;
@@ -425,8 +424,8 @@
     int ret = E_OK;
     int64_t time = 0;
     common::String value;
-    while (time_decoder_->has_remaining() || time_in.has_remaining()) {
-        ASSERT(value_decoder_->has_remaining() || value_in.has_remaining());
+    while (time_decoder_->has_remaining(time_in)) {
+        ASSERT(value_decoder_->has_remaining(value_in));
         if (UNLIKELY(!row_appender.add_row())) {
             ret = E_OVERFLOW;
             break;
@@ -437,7 +436,7 @@
             continue;
         } else {
             row_appender.append(0, (char *)&time, sizeof(time));
-            row_appender.append(1, (char *)&value, sizeof(value));
+            row_appender.append(1, value.buf_, value.len_);
         }
     }
     return ret;
@@ -455,12 +454,14 @@
             DECODE_TYPED_TV_INTO_TSBLOCK(bool, boolean, time_in_, value_in_,
                                          row_appender);
             break;
+        case common::DATE:
         case common::INT32:
             // DECODE_TYPED_TV_INTO_TSBLOCK(int32_t, int32, time_in_, value_in_,
             // row_appender);
             ret = i32_DECODE_TYPED_TV_INTO_TSBLOCK(time_in_, value_in_,
                                                    row_appender, filter);
             break;
+        case TIMESTAMP:
         case common::INT64:
             DECODE_TYPED_TV_INTO_TSBLOCK(int64_t, int64, time_in_, value_in_,
                                          row_appender);
@@ -473,9 +474,11 @@
             DECODE_TYPED_TV_INTO_TSBLOCK(double, double, time_in_, value_in_,
                                          row_appender);
             break;
+        case common::TEXT:
+        case common::BLOB:
         case common::STRING:
-            ret = STRING_DECODE_TYPED_TV_INTO_TSBLOCK(time_in, value_in, row_appender,
-                                                *pa, filter);
+            ret = STRING_DECODE_TYPED_TV_INTO_TSBLOCK(
+                time_in, value_in, row_appender, *pa, filter);
             break;
         default:
             ret = E_NOT_SUPPORT;
diff --git a/cpp/src/reader/chunk_reader.h b/cpp/src/reader/chunk_reader.h
index 80a3416..23be484 100644
--- a/cpp/src/reader/chunk_reader.h
+++ b/cpp/src/reader/chunk_reader.h
@@ -49,7 +49,7 @@
           value_in_(),
           uncompressed_buf_(nullptr) {}
     int init(ReadFile *read_file, common::String m_name,
-                     common::TSDataType data_type, Filter *time_filter) override;
+             common::TSDataType data_type, Filter *time_filter) override;
     void reset() override;
     void destroy() override;
     ~ChunkReader() override = default;
@@ -68,12 +68,13 @@
     int load_by_meta(ChunkMeta *meta) override;
 
     int get_next_page(common::TsBlock *tsblock, Filter *oneshoot_filter,
-                              common::PageArena &pa) override;
+                      common::PageArena &pa) override;
 
    private:
     FORCE_INLINE bool chunk_has_only_one_page() const {
-        return (chunk_header_.chunk_type_ & ONLY_ONE_PAGE_CHUNK_HEADER_MARKER) ==
-            ONLY_ONE_PAGE_CHUNK_HEADER_MARKER;
+        return (chunk_header_.chunk_type_ &
+                ONLY_ONE_PAGE_CHUNK_HEADER_MARKER) ==
+               ONLY_ONE_PAGE_CHUNK_HEADER_MARKER;
     }
     int alloc_compressor_and_value_decoder(
         common::TSEncoding encoding, common::TSDataType data_type,
@@ -85,7 +86,7 @@
     int decode_cur_page_data(common::TsBlock *&ret_tsblock, Filter *filter,
                              common::PageArena &pa);
     bool prev_page_not_finish() const {
-        return (time_decoder_ && time_decoder_->has_remaining()) ||
+        return (time_decoder_ && time_decoder_->has_remaining(time_in_)) ||
                time_in_.has_remaining();
     }
 
diff --git a/cpp/src/reader/column_mapping.h b/cpp/src/reader/column_mapping.h
index 4c3fde2..97c2cc3 100644
--- a/cpp/src/reader/column_mapping.h
+++ b/cpp/src/reader/column_mapping.h
@@ -21,9 +21,8 @@
 
 #include "common/schema.h"
 #include "expression.h"
-namespace storage
-{
-class ColumnMapping { 
+namespace storage {
+class ColumnMapping {
    public:
     int add(const std::string &column_name, int index, TableSchema &schema) {
         int column_index = schema.find_column_index(column_name);
@@ -50,7 +49,8 @@
         return common::E_OK;
     }
 
-    const std::vector<int> &get_column_pos(const std::string &column_name) const {
+    const std::vector<int> &get_column_pos(
+        const std::string &column_name) const {
         static const std::vector<int> empty;
         auto it = column_pos_map.find(column_name);
         return it != column_pos_map.end() ? it->second : empty;
@@ -77,7 +77,7 @@
     std::unordered_set<std::string> tag_columns_;
     std::unordered_set<std::string> field_columns_;
 };
-    
-} // namespace storage
 
-#endif // READER_COLUMN_MAPPING_H
\ No newline at end of file
+}  // namespace storage
+
+#endif  // READER_COLUMN_MAPPING_H
\ No newline at end of file
diff --git a/cpp/src/reader/device_meta_iterator.cc b/cpp/src/reader/device_meta_iterator.cc
index d451226..4f47341 100644
--- a/cpp/src/reader/device_meta_iterator.cc
+++ b/cpp/src/reader/device_meta_iterator.cc
@@ -28,7 +28,6 @@
     if (load_results() != common::E_OK) {
         return false;
     }
-
     return !result_cache_.empty();
 }
 
@@ -44,6 +43,7 @@
 }
 
 int DeviceMetaIterator::load_results() {
+    bool is_root_idx_node = true;
     while (!meta_index_nodes_.empty()) {
         // To avoid ASan overflow.
         // using `const auto&` creates a reference
@@ -58,6 +58,12 @@
         } else {
             return common::E_INVALID_NODE_TYPE;
         }
+        // The first MetaIndexNode is the root and is not loaded here, so no
+        // need to destruct it here.
+        if (!is_root_idx_node) {
+            meta_data_index_node->~MetaIndexNode();
+        }
+        is_root_idx_node = false;
     }
 
     return common::E_OK;
@@ -78,7 +84,7 @@
                                  : meta_index_node->end_offset_;
         MetaIndexNode* child_node = nullptr;
         if (RET_FAIL(io_reader_->read_device_meta_index(
-                start_offset, end_offset, pa_, child_node, false))) {
+                start_offset, end_offset, pa_, child_node, true))) {
             return ret;
         } else {
             result_cache_.push(
diff --git a/cpp/src/reader/filter/gt_eq.h b/cpp/src/reader/filter/gt_eq.h
index b18c825..067c560 100644
--- a/cpp/src/reader/filter/gt_eq.h
+++ b/cpp/src/reader/filter/gt_eq.h
@@ -26,7 +26,7 @@
 template <typename T>
 class GtEq : public UnaryFilter<T> {
    public:
-    GtEq() : UnaryFilter<T>() {};
+    GtEq() : UnaryFilter<T>(){};
     GtEq(T value, FilterType type) : UnaryFilter<T>(value, type) {}
 
     virtual ~GtEq() {}
diff --git a/cpp/src/reader/imeta_data_querier.h b/cpp/src/reader/imeta_data_querier.h
index 01a9606..73a005e 100644
--- a/cpp/src/reader/imeta_data_querier.h
+++ b/cpp/src/reader/imeta_data_querier.h
@@ -36,7 +36,8 @@
 
     virtual std::vector<std::vector<std::shared_ptr<ChunkMeta>>>
     get_chunk_metadata_lists(
-        std::shared_ptr<IDeviceID> device_id, const std::unordered_set<std::string>& field_names,
+        std::shared_ptr<IDeviceID> device_id,
+        const std::unordered_set<std::string>& field_names,
         const MetaIndexNode* field_node = nullptr) const = 0;
 
     virtual std::map<Path, std::vector<std::shared_ptr<ChunkMeta>>>
diff --git a/cpp/src/reader/meta_data_querier.cc b/cpp/src/reader/meta_data_querier.cc
index 7224643..5a32b92 100644
--- a/cpp/src/reader/meta_data_querier.cc
+++ b/cpp/src/reader/meta_data_querier.cc
@@ -36,29 +36,33 @@
 
 MetadataQuerier::~MetadataQuerier() {}
 
-
-std::vector<std::shared_ptr<ChunkMeta>> MetadataQuerier::get_chunk_metadata_list(
-    const Path& path) const {
+std::vector<std::shared_ptr<ChunkMeta>>
+MetadataQuerier::get_chunk_metadata_list(const Path& path) const {
     // std::vector<std::shared_ptr<ChunkMeta>> chunk_meta_list;
     // if (device_chunk_meta_cache_->tryGet(path.device_, chunk_meta_list)) {
     //     return chunk_meta_list;
     // } else {
-    //     io_reader_->get_chunk_metadata_list(path.device_, path.measurement_, chunk_meta_list);
+    //     io_reader_->get_chunk_metadata_list(path.device_, path.measurement_,
+    //     chunk_meta_list);
     // }
     // return io_reader_->get_chunk_metadata_list(path);
     ASSERT(false);
     return {};
 }
 
-std::vector<std::vector<std::shared_ptr<ChunkMeta>>> MetadataQuerier::get_chunk_metadata_lists(
-    std::shared_ptr<IDeviceID> device_id, const std::unordered_set<std::string>& field_names,
+std::vector<std::vector<std::shared_ptr<ChunkMeta>>>
+MetadataQuerier::get_chunk_metadata_lists(
+    std::shared_ptr<IDeviceID> device_id,
+    const std::unordered_set<std::string>& field_names,
     const MetaIndexNode* field_node) const {
-    // return io_reader_->get_chunk_metadata_lists(device_id, field_names, field_node);
+    // return io_reader_->get_chunk_metadata_lists(device_id, field_names,
+    // field_node);
     ASSERT(false);
     return {};
 }
 
-std::map<Path, std::vector<std::shared_ptr<ChunkMeta>>> MetadataQuerier::get_chunk_metadata_map(const std::vector<Path>& paths) const {
+std::map<Path, std::vector<std::shared_ptr<ChunkMeta>>>
+MetadataQuerier::get_chunk_metadata_map(const std::vector<Path>& paths) const {
     // return io_reader_->get_chunk_metadata_map(paths);
     ASSERT(false);
     return {};
@@ -86,8 +90,7 @@
     return {};
 }
 
-void MetadataQuerier::clear() {
-}
+void MetadataQuerier::clear() {}
 
 std::unique_ptr<DeviceMetaIterator> MetadataQuerier::device_iterator(
     MetaIndexNode* root, const Filter* id_filter) {
@@ -95,8 +98,9 @@
         new DeviceMetaIterator(io_reader_, root, id_filter));
 }
 
-int MetadataQuerier::load_chunk_meta(const std::pair<IDeviceID, std::string>& key,
-                        std::vector<ChunkMeta*>& chunk_meta_list) {
+int MetadataQuerier::load_chunk_meta(
+    const std::pair<IDeviceID, std::string>& key,
+    std::vector<ChunkMeta*>& chunk_meta_list) {
     // return io_reader_->load_chunk_meta(key, chunk_meta_list);
     ASSERT(false);
     return common::E_NOT_SUPPORT;
diff --git a/cpp/src/reader/qds_with_timegenerator.cc b/cpp/src/reader/qds_with_timegenerator.cc
index fd0e129..61dd974 100644
--- a/cpp/src/reader/qds_with_timegenerator.cc
+++ b/cpp/src/reader/qds_with_timegenerator.cc
@@ -303,6 +303,7 @@
         index_lookup_.insert({paths[i].measurement_, i + 1});
         if (RET_FAIL(io_reader_->alloc_ssi(
                 paths[i].device_id_, paths[i].measurement_, va.ssi_, pa_))) {
+            return ret;
         } else {
             va.io_reader_ = io_reader_;
             data_types.push_back(va.value_col_iter_->get_data_type());
@@ -312,8 +313,9 @@
     result_set_metadata_ =
         std::make_shared<ResultSetMetadata>(column_names, data_types);
     row_record_ = new RowRecord(value_at_vec_.size() + 1);
-    tree_ = construct_node_tree(qe->expression_);
-    return E_OK;
+    ret = construct_node_tree(qe->expression_, tree_);
+    if (ret == E_NO_MORE_DATA) return E_OK;
+    return ret;
 }
 
 void destroy_node(Node *node) {
@@ -357,7 +359,8 @@
         return E_OK;
     }
     row_record_->set_timestamp(timestamp);
-    row_record_->get_field(0)->set_value(TSDataType::INT64, &timestamp, pa_);
+    row_record_->get_field(0)->set_value(TSDataType::INT64, &timestamp,
+                                         sizeof(timestamp), pa_);
 #if DEBUG_SE
     std::cout << "QDSWithTimeGenerator::get_next: timestamp=" << timestamp
               << ", will generate row at this timestamp." << std::endl;
@@ -367,7 +370,7 @@
         ValueAt &va = value_at_vec_[i];
         void *val_obj_ptr = va.at(timestamp);
         row_record_->get_field(i + 1)->set_value(va.data_type_, val_obj_ptr,
-                                                 pa_);
+                                                 get_len(va.data_type_), pa_);
     }
 
     tree_->next_timestamp(timestamp);
@@ -396,17 +399,18 @@
 std::shared_ptr<ResultSetMetadata> QDSWithTimeGenerator::get_metadata() {
     return result_set_metadata_;
 }
-Node *QDSWithTimeGenerator::construct_node_tree(Expression *expr) {
+
+int QDSWithTimeGenerator::construct_node_tree(Expression *expr, Node *&node) {
+    int ret = E_OK;
     if (expr->type_ == AND_EXPR || expr->type_ == OR_EXPR) {
-        Node *root = nullptr;
         if (expr->type_ == AND_EXPR) {
-            root = new Node(AND_NODE);
+            node = new Node(AND_NODE);
         } else {
-            root = new Node(OR_NODE);
+            node = new Node(OR_NODE);
         }
-        root->left_ = construct_node_tree(expr->left_);
-        root->right_ = construct_node_tree(expr->right_);
-        return root;
+        if (RET_FAIL(construct_node_tree(expr->left_, node->left_))) {
+        } else if (RET_FAIL(construct_node_tree(expr->right_, node->right_))) {
+        }
     } else if (expr->type_ == SERIES_EXPR) {
         Node *leaf = new Node(LEAF_NODE);
         Path &path = expr->series_path_;
@@ -414,12 +418,12 @@
                                         leaf->sss_.ssi_, pa_, expr->filter_);
         if (E_OK == ret) {
             leaf->sss_.init();
+            node = leaf;
         } else {
             // do nothing, this leaf node will return no data at all.
         }
-        return leaf;
     }
-    return nullptr;
+    return ret;
 }
 
 }  // namespace storage
diff --git a/cpp/src/reader/qds_with_timegenerator.h b/cpp/src/reader/qds_with_timegenerator.h
index c9202fe..648a1ce 100644
--- a/cpp/src/reader/qds_with_timegenerator.h
+++ b/cpp/src/reader/qds_with_timegenerator.h
@@ -126,7 +126,7 @@
     std::shared_ptr<ResultSetMetadata> get_metadata();
 
    private:
-    Node *construct_node_tree(Expression *expr);
+    int construct_node_tree(Expression *expr, Node *&node);
 
    private:
     RowRecord *row_record_;
diff --git a/cpp/src/reader/qds_without_timegenerator.cc b/cpp/src/reader/qds_without_timegenerator.cc
index 805c9c4..fb2ef6c 100644
--- a/cpp/src/reader/qds_without_timegenerator.cc
+++ b/cpp/src/reader/qds_without_timegenerator.cc
@@ -68,8 +68,9 @@
 
     for (size_t i = 0; i < path_count; i++) {
         get_next_tsblock(i, true);
-        data_types.push_back(value_iters_[i] != nullptr ?
-            value_iters_[i]->get_data_type() : TSDataType::NULL_TYPE);
+        data_types.push_back(value_iters_[i] != nullptr
+                                 ? value_iters_[i]->get_data_type()
+                                 : TSDataType::NULL_TYPE);
     }
     result_set_metadata_ =
         std::make_shared<ResultSetMetadata>(column_names, data_types);
@@ -117,15 +118,16 @@
     }
     int64_t time = heap_time_.begin()->first;
     row_record_->set_timestamp(time);
-    row_record_->get_field(0)->set_value(INT64, &time, pa_);
+    row_record_->get_field(0)->set_value(INT64, &time, get_len(INT64), pa_);
 
     uint32_t count = heap_time_.count(time);
     std::multimap<int64_t, uint32_t>::iterator iter = heap_time_.find(time);
     for (uint32_t i = 0; i < count; ++i) {
         uint32_t len = 0;
+        auto val_datatype = value_iters_[iter->second]->get_data_type();
+        void *val_ptr = value_iters_[iter->second]->read(&len);
         row_record_->get_field(iter->second + 1)
-            ->set_value(value_iters_[iter->second]->get_data_type(),
-                        value_iters_[iter->second]->read(&len), pa_);
+            ->set_value(val_datatype, val_ptr, len, pa_);
         value_iters_[iter->second]->next();
         if (!time_iters_[iter->second]->end()) {
             int64_t timev = *(int64_t *)(time_iters_[iter->second]->read(&len));
diff --git a/cpp/src/reader/query_data_set.h b/cpp/src/reader/query_data_set.h
new file mode 100644
index 0000000..8584276
--- /dev/null
+++ b/cpp/src/reader/query_data_set.h
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef READER_QUERY_DATA_SET_H
+#define READER_QUERY_DATA_SET_H
+
+#include "common/row_record.h"
+
+namespace storage {
+
+class QueryDataSet {
+   public:
+    QueryDataSet() {}
+    virtual ~QueryDataSet() {}
+    virtual RowRecord *get_next() = 0;
+};
+
+}  // namespace storage
+
+#endif  // READER_QUERY_DATA_SET_H
diff --git a/cpp/src/reader/result_set.h b/cpp/src/reader/result_set.h
index 59e0845..b0d45b0 100644
--- a/cpp/src/reader/result_set.h
+++ b/cpp/src/reader/result_set.h
@@ -20,6 +20,9 @@
 #ifndef READER_QUERY_DATA_SET_H
 #define READER_QUERY_DATA_SET_H
 
+#include <algorithm>
+#include <iostream>
+#include <string>
 #include <unordered_map>
 
 #include "common/row_record.h"
@@ -167,7 +170,32 @@
     virtual void close() = 0;
 
    protected:
-    std::unordered_map<std::string, uint32_t> index_lookup_;
+    struct CaseInsensitiveHash {
+        std::size_t operator()(const std::string& str) const {
+            std::string lowerStr = str;
+            std::transform(lowerStr.begin(), lowerStr.end(), lowerStr.begin(),
+                           [](unsigned char c) { return std::tolower(c); });
+            return std::hash<std::string>()(lowerStr);
+        }
+    };
+
+    struct CaseInsensitiveEqual {
+        bool operator()(const std::string& lhs, const std::string& rhs) const {
+            if (lhs.size() != rhs.size()) {
+                return false;
+            }
+            for (size_t i = 0; i < lhs.size(); ++i) {
+                if (std::tolower(lhs[i]) != std::tolower(rhs[i])) {
+                    return false;
+                }
+            }
+            return true;
+        }
+    };
+
+    std::unordered_map<std::string, uint32_t, CaseInsensitiveHash,
+                       CaseInsensitiveEqual>
+        index_lookup_;
     common::PageArena pa_;
 };
 
@@ -187,6 +215,22 @@
     return row_record->get_field(column_index)->get_string_value();
 }
 
+template <>
+inline std::tm ResultSet::get_value(const std::string& full_name) {
+    RowRecord* row_record = get_row_record();
+    ASSERT(index_lookup_.count(full_name));
+    uint32_t index = index_lookup_[full_name];
+    ASSERT(index >= 0 && index < row_record->get_col_num());
+    return row_record->get_field(index)->get_date_value();
+}
+template <>
+inline std::tm ResultSet::get_value(uint32_t column_index) {
+    column_index--;
+    RowRecord* row_record = get_row_record();
+    ASSERT(column_index >= 0 && column_index < row_record->get_col_num());
+    return row_record->get_field(column_index)->get_date_value();
+}
+
 }  // namespace storage
 
 #endif  // READER_QUERY_DATA_SET_H
diff --git a/cpp/src/reader/scan_iterator.cc b/cpp/src/reader/scan_iterator.cc
index 0dc2a48..b7d398d 100644
--- a/cpp/src/reader/scan_iterator.cc
+++ b/cpp/src/reader/scan_iterator.cc
@@ -165,14 +165,16 @@
         ////log_err("io_reader init error, ret=%d, file_path=%s",
         // ret, open_file->get_file_path().c_str());
     } else {
-        std::shared_ptr<IDeviceID> device_id = std::make_shared<StringArrayDeviceID>(col_schema_->get_device_name_str());
+        std::shared_ptr<IDeviceID> device_id =
+            std::make_shared<StringArrayDeviceID>(
+                col_schema_->get_device_name_str());
         std::string measurement_name = col_schema_->get_measurement_name_str();
         if (ssi_ != nullptr) {
             delete ssi_;
             ssi_ = nullptr;
         }
-        if (RET_FAIL(io_reader_.alloc_ssi(device_id, measurement_name, ssi_,
-                                          *pa))) {
+        if (RET_FAIL(
+                io_reader_.alloc_ssi(device_id, measurement_name, ssi_, *pa))) {
         }
     }
     return ret;
diff --git a/cpp/src/reader/scan_iterator.h b/cpp/src/reader/scan_iterator.h
index 9ae3011..3a15eb0 100644
--- a/cpp/src/reader/scan_iterator.h
+++ b/cpp/src/reader/scan_iterator.h
@@ -145,7 +145,10 @@
 class DataScanIterator {
    public:
     DataScanIterator()
-        : col_schema_(), page_arena_(), data_run_list_(&page_arena_), cursor_() {}
+        : col_schema_(),
+          page_arena_(),
+          data_run_list_(&page_arena_),
+          cursor_() {}
     ~DataScanIterator() {}
     int init() { return common::E_OK; }
     void destory() {
diff --git a/cpp/src/reader/table_query_executor.cc b/cpp/src/reader/table_query_executor.cc
index 97913c2..d09a5c9 100644
--- a/cpp/src/reader/table_query_executor.cc
+++ b/cpp/src/reader/table_query_executor.cc
@@ -31,8 +31,8 @@
     pa.init(512, common::MOD_TSFILE_READER);
     MetaIndexNode *table_root = nullptr;
     std::shared_ptr<TableSchema> table_schema;
-    if (RET_FAIL(file_metadata->get_table_metaindex_node(table_name,
-                                                         table_root))) {
+    if (RET_FAIL(
+            file_metadata->get_table_metaindex_node(table_name, table_root))) {
     } else if (RET_FAIL(
                    file_metadata->get_table_schema(table_name, table_schema))) {
     }
@@ -41,19 +41,22 @@
         ret_qds = nullptr;
         return ret;
     }
-    std::vector<std::string> std_column_names(columns);
-    for (auto &column : std_column_names) {
+    std::vector<std::string> lower_case_column_names(columns);
+    for (auto &column : lower_case_column_names) {
         to_lowercase_inplace(column);
     }
-    std::shared_ptr<ColumnMapping> column_mapping = std::make_shared<ColumnMapping>();
-    for (size_t i = 0; i < std_column_names.size(); ++i) {
-        column_mapping->add(std_column_names[i], static_cast<int>(i), *table_schema);
+    std::shared_ptr<ColumnMapping> column_mapping =
+        std::make_shared<ColumnMapping>();
+    for (size_t i = 0; i < lower_case_column_names.size(); ++i) {
+        column_mapping->add(lower_case_column_names[i], static_cast<int>(i),
+                            *table_schema);
     }
     std::vector<common::TSDataType> data_types;
-    data_types.reserve(columns.size());
-    for (size_t i = 0; i < columns.size(); ++i) {
-        auto ind = table_schema->find_column_index(columns[i]);
+    data_types.reserve(lower_case_column_names.size());
+    for (size_t i = 0; i < lower_case_column_names.size(); ++i) {
+        auto ind = table_schema->find_column_index(lower_case_column_names[i]);
         if (ind < 0) {
+            delete time_filter;
             return common::E_COLUMN_NOT_EXIST;
         }
         data_types.push_back(table_schema->get_data_types()[ind]);
@@ -61,7 +64,7 @@
     // column_mapping.add(*measurement_filter);
 
     auto device_task_iterator = std::unique_ptr<DeviceTaskIterator>(
-        new DeviceTaskIterator(std_column_names, table_root, column_mapping,
+        new DeviceTaskIterator(columns, table_root, column_mapping,
                                meta_data_querier_, id_filter, table_schema));
 
     std::unique_ptr<TsBlockReader> tsblock_reader;
@@ -77,8 +80,8 @@
             ret = common::E_UNSUPPORTED_ORDER;
     }
     assert(tsblock_reader != nullptr);
-    ret_qds = new TableResultSet(std::move(tsblock_reader), columns,
-                                 data_types);
+    ret_qds =
+        new TableResultSet(std::move(tsblock_reader), columns, data_types);
     return ret;
 }
 
diff --git a/cpp/src/reader/table_query_executor.h b/cpp/src/reader/table_query_executor.h
index 32d522c..83a82fe 100644
--- a/cpp/src/reader/table_query_executor.h
+++ b/cpp/src/reader/table_query_executor.h
@@ -27,8 +27,8 @@
 #include "reader/column_mapping.h"
 #include "reader/task/device_task_iterator.h"
 #include "result_set.h"
-#include "utils/errno_define.h"
 #include "table_result_set.h"
+#include "utils/errno_define.h"
 namespace storage {
 
 class DeviceTaskIterator;
@@ -64,10 +64,9 @@
     }
     int query(const std::string &table_name,
               const std::vector<std::string> &columns, Filter *time_filter,
-              Filter *id_filter, Filter *field_filter,
-              ResultSet *&ret_qds);
+              Filter *id_filter, Filter *field_filter, ResultSet *&ret_qds);
     void destroy_query_data_set(ResultSet *qds);
-    
+
    private:
     IMetadataQuerier *meta_data_querier_;
     TsFileIOReader *tsfile_io_reader_;
diff --git a/cpp/src/reader/table_result_set.cc b/cpp/src/reader/table_result_set.cc
index bbadc44..aeeefb4 100644
--- a/cpp/src/reader/table_result_set.cc
+++ b/cpp/src/reader/table_result_set.cc
@@ -70,11 +70,14 @@
         bool null = false;
         row_record_->reset();
         for (uint32_t i = 0; i < row_iterator_->get_column_count(); ++i) {
-            row_record_->get_field(i)->set_value(
-                row_iterator_->get_data_type(i),
-                row_iterator_->read(i, &len, &null), pa_);
+            const auto value = row_iterator_->read(i, &len, &null);
+            if (!null) {
+                row_record_->get_field(i)->set_value(
+                    row_iterator_->get_data_type(i), value, len, pa_);
+                row_iterator_->next(i);
+            }
         }
-        row_iterator_->next();
+        row_iterator_->update_row_id();
     }
     return ret;
 }
diff --git a/cpp/src/reader/task/device_query_task.cc b/cpp/src/reader/task/device_query_task.cc
index 0ec7a8d..3c87a89 100644
--- a/cpp/src/reader/task/device_query_task.cc
+++ b/cpp/src/reader/task/device_query_task.cc
@@ -33,7 +33,7 @@
     return task;
 }
 
- DeviceQueryTask::~DeviceQueryTask() {
+DeviceQueryTask::~DeviceQueryTask() {
     if (index_root_) {
         index_root_->~MetaIndexNode();
     }
diff --git a/cpp/src/reader/task/device_query_task.h b/cpp/src/reader/task/device_query_task.h
index d2a367f..8ba4dce 100644
--- a/cpp/src/reader/task/device_query_task.h
+++ b/cpp/src/reader/task/device_query_task.h
@@ -28,7 +28,7 @@
     DeviceQueryTask(std::shared_ptr<IDeviceID> device_id,
                     std::vector<std::string> column_names,
                     std::shared_ptr<ColumnMapping> column_mapping,
-                    MetaIndexNode* index_root,
+                    MetaIndexNode *index_root,
                     std::shared_ptr<TableSchema> table_schema)
         : device_id_(device_id),
           column_names_(column_names),
@@ -40,8 +40,9 @@
     static DeviceQueryTask *create_device_query_task(
         std::shared_ptr<IDeviceID> device_id,
         std::vector<std::string> column_names,
-        std::shared_ptr<ColumnMapping> column_mapping, MetaIndexNode* index_root,
-        std::shared_ptr<TableSchema> table_schema, common::PageArena &pa);
+        std::shared_ptr<ColumnMapping> column_mapping,
+        MetaIndexNode *index_root, std::shared_ptr<TableSchema> table_schema,
+        common::PageArena &pa);
 
     const std::vector<std::string> &get_column_names() const {
         return column_names_;
diff --git a/cpp/src/reader/task/device_task_iterator.cc b/cpp/src/reader/task/device_task_iterator.cc
index 18874ee..52ab97a 100644
--- a/cpp/src/reader/task/device_task_iterator.cc
+++ b/cpp/src/reader/task/device_task_iterator.cc
@@ -30,8 +30,8 @@
     if (RET_FAIL(device_meta_iterator_->next(device_meta_pair))) {
     } else {
         task = DeviceQueryTask::create_device_query_task(
-            device_meta_pair.first, column_names_, column_mapping_, device_meta_pair.second,
-            table_schema_, pa_);
+            device_meta_pair.first, column_names_, column_mapping_,
+            device_meta_pair.second, table_schema_, pa_);
     }
     return ret;
 }
diff --git a/cpp/src/reader/tsfile_executor.cc b/cpp/src/reader/tsfile_executor.cc
index 71c03f0..546a953 100644
--- a/cpp/src/reader/tsfile_executor.cc
+++ b/cpp/src/reader/tsfile_executor.cc
@@ -112,6 +112,9 @@
     return ret;
 }
 
-void TsFileExecutor::destroy_query_data_set(ResultSet *qds) { delete qds; }
+void TsFileExecutor::destroy_query_data_set(ResultSet *qds) {
+    delete qds;
+    qds = nullptr;
+}
 
 }  // namespace storage
diff --git a/cpp/src/reader/tsfile_reader.cc b/cpp/src/reader/tsfile_reader.cc
index bf3a2db..5dba982 100644
--- a/cpp/src/reader/tsfile_reader.cc
+++ b/cpp/src/reader/tsfile_reader.cc
@@ -27,8 +27,9 @@
 
 namespace storage {
 TsFileReader::TsFileReader()
-    : read_file_(nullptr), tsfile_executor_(nullptr), table_query_executor_(nullptr) {
-}
+    : read_file_(nullptr),
+      tsfile_executor_(nullptr),
+      table_query_executor_(nullptr) {}
 
 TsFileReader::~TsFileReader() { close(); }
 
@@ -83,12 +84,12 @@
     return ret;
 }
 
-int TsFileReader::query(const std::string &table_name,
-                        const std::vector<std::string> &columns_names,
+int TsFileReader::query(const std::string& table_name,
+                        const std::vector<std::string>& columns_names,
                         int64_t start_time, int64_t end_time,
-                        ResultSet *&result_set) {
+                        ResultSet*& result_set) {
     int ret = E_OK;
-    TsFileMeta *tsfile_meta = tsfile_executor_->get_tsfile_meta();
+    TsFileMeta* tsfile_meta = tsfile_executor_->get_tsfile_meta();
     if (tsfile_meta == nullptr) {
         return E_TSFILE_WRITER_META_ERR;
     }
@@ -101,11 +102,13 @@
     std::vector<TSDataType> data_types = table_schema->get_data_types();
 
     Filter* time_filter = new TimeBetween(start_time, end_time, false);
-    ret = table_query_executor_->query(to_lower(table_name), columns_names, time_filter, nullptr, nullptr, result_set);
+    ret =
+        table_query_executor_->query(to_lower(table_name), columns_names,
+                                     time_filter, nullptr, nullptr, result_set);
     return ret;
 }
 
-void TsFileReader::destroy_query_data_set(storage::ResultSet *qds) {
+void TsFileReader::destroy_query_data_set(storage::ResultSet* qds) {
     tsfile_executor_->destroy_query_data_set(qds);
 }
 
@@ -197,20 +200,22 @@
     return nullptr;
 }
 
-std::shared_ptr<TableSchema> TsFileReader::get_table_schema(const std::string &table_name) {
-    TsFileMeta *file_metadata = tsfile_executor_->get_tsfile_meta();
-    MetaIndexNode *table_root = nullptr;
+std::shared_ptr<TableSchema> TsFileReader::get_table_schema(
+    const std::string& table_name) {
+    TsFileMeta* file_metadata = tsfile_executor_->get_tsfile_meta();
+    MetaIndexNode* table_root = nullptr;
     std::shared_ptr<TableSchema> table_schema;
     if (IS_FAIL(file_metadata->get_table_metaindex_node(to_lower(table_name),
-                                                         table_root))) {
-    } else if (IS_FAIL(
-                   file_metadata->get_table_schema(to_lower(table_name), table_schema))) {
+                                                        table_root))) {
+    } else if (IS_FAIL(file_metadata->get_table_schema(to_lower(table_name),
+                                                       table_schema))) {
     }
     return table_schema;
 }
 
-std::vector<std::shared_ptr<TableSchema>> TsFileReader::get_all_table_schemas() {
-    TsFileMeta *file_metadata = tsfile_executor_->get_tsfile_meta();
+std::vector<std::shared_ptr<TableSchema>>
+TsFileReader::get_all_table_schemas() {
+    TsFileMeta* file_metadata = tsfile_executor_->get_tsfile_meta();
     std::vector<std::shared_ptr<TableSchema>> table_schemas;
     for (const auto& table_schema : file_metadata->table_schemas_) {
         table_schemas.push_back(table_schema.second);
@@ -218,5 +223,4 @@
     return table_schemas;
 }
 
-
 }  // namespace storage
diff --git a/cpp/src/utils/db_utils.h b/cpp/src/utils/db_utils.h
index 4b5aca9..90d0b96 100644
--- a/cpp/src/utils/db_utils.h
+++ b/cpp/src/utils/db_utils.h
@@ -150,87 +150,6 @@
     }
 };
 
-struct DeviceID {
-    NodeID db_nid_;
-    NodeID device_nid_;
-
-    DeviceID() : db_nid_(0), device_nid_(0) {}
-    DeviceID(const NodeID db_nid, const NodeID device_nid)
-        : db_nid_(db_nid), device_nid_(device_nid) {}
-    explicit DeviceID(const TsID &ts_id)
-        : db_nid_(ts_id.db_nid_), device_nid_(ts_id.device_nid_) {}
-
-    FORCE_INLINE bool operator==(const DeviceID &other) const {
-        return db_nid_ == other.db_nid_ && device_nid_ == other.device_nid_;
-    }
-    FORCE_INLINE bool operator!=(const DeviceID &other) const {
-        return db_nid_ != other.db_nid_ || device_nid_ != other.device_nid_;
-    }
-    FORCE_INLINE void from(const TsID &ts_id) {
-        db_nid_ = ts_id.db_nid_;
-        device_nid_ = ts_id.device_nid_;
-    }
-    FORCE_INLINE bool operator<(const DeviceID &that) const {
-        int32_t this_i32 = (((int32_t)db_nid_) << 16) | (device_nid_);
-        int32_t that_i32 = (((int32_t)that.db_nid_) << 16) | (that.device_nid_);
-        return this_i32 < that_i32;
-    }
-};
-
-#define INVALID_TTL (-1)
-
-// describe single database
-struct DatabaseDesc {
-    int64_t ttl_;
-    std::string db_name_;
-    TsID ts_id_;
-
-    DatabaseDesc() : ttl_(INVALID_TTL), db_name_(""), ts_id_() {}
-    DatabaseDesc(uint64_t ttl, const std::string &name, const TsID &ts_id)
-        : ttl_(ttl), db_name_(name), ts_id_(ts_id) {}
-};
-
-enum WALFlushPolicy {
-    WAL_DISABLED = 0,
-    WAL_ASYNC = 1,
-    WAL_FLUSH = 2,
-    WAL_SYNC = 3,
-};
-
-template <typename T>
-std::string to_string(const T &val) {
-    // todo: There may be a better way to avoid the memory problem of
-    // ostringstream
-    std::ostringstream oss;
-    oss << val;
-    return oss.str();
-}
-
-// TODO rename to DatabaseIdTTL
-struct DatabaseIdTTL {
-    NodeID db_nid_;
-    int64_t ttl_;
-    DatabaseIdTTL() {}
-    DatabaseIdTTL(NodeID db_nid, int64_t ttl) : db_nid_(db_nid), ttl_(ttl) {}
-    DatabaseIdTTL(const DatabaseIdTTL &other)
-        : db_nid_(other.db_nid_), ttl_(other.ttl_) {}
-    DatabaseIdTTL &operator=(const DatabaseIdTTL &other) {
-        this->db_nid_ = other.db_nid_;
-        this->ttl_ = other.ttl_;
-        return *this;
-    }
-    bool operator==(const DatabaseIdTTL &other) {
-        if (db_nid_ != other.db_nid_ || ttl_ != other.ttl_) {
-            return false;
-        }
-        return true;
-    }
-    friend std::ostream &operator<<(std::ostream &out, DatabaseIdTTL &di) {
-        out << "(" << di.db_nid_ << ", " << di.ttl_ << ")  ";
-        return out;
-    }
-};
-
 /**
  * @brief Represents the schema information for a single measurement.
  * @brief Represents the category of a column in a table schema.
@@ -274,8 +193,7 @@
      * is not empty.
      */
     ColumnSchema(std::string column_name, TSDataType data_type,
-                 CompressionType compression,
-                 TSEncoding encoding,
+                 CompressionType compression, TSEncoding encoding,
                  ColumnCategory column_category = ColumnCategory::FIELD)
         : column_name_(std::move(column_name)),
           data_type_(data_type),
@@ -284,21 +202,19 @@
           column_category_(column_category) {}
 
     ColumnSchema(std::string column_name, TSDataType data_type,
-             ColumnCategory column_category = ColumnCategory::FIELD)
-    : column_name_(std::move(column_name)),
-      data_type_(data_type),
-      compression_(get_default_compressor()),
-      encoding_(get_value_encoder(data_type)),
-      column_category_(column_category) {}
+                 ColumnCategory column_category = ColumnCategory::FIELD)
+        : column_name_(std::move(column_name)),
+          data_type_(data_type),
+          compression_(get_default_compressor()),
+          encoding_(get_value_encoder(data_type)),
+          column_category_(column_category) {}
 
     const std::string &get_column_name() const { return column_name_; }
     const TSDataType &get_data_type() const { return data_type_; }
     const ColumnCategory &get_column_category() const {
         return column_category_;
     }
-    const CompressionType &get_compression() const {
-        return compression_;
-    }
+    const CompressionType &get_compression() const { return compression_; }
     const TSEncoding &get_encoding() const { return encoding_; }
     bool operator==(const ColumnSchema &other) const {
         return (data_type_ == other.data_type_ &&
@@ -391,7 +307,6 @@
 #endif
 };
 
-
 FORCE_INLINE int64_t get_cur_timestamp() {
     int64_t timestamp = 0;
     struct timeval tv;
diff --git a/cpp/src/utils/errno_define.h b/cpp/src/utils/errno_define.h
index 8d87ade..df16c5f 100644
--- a/cpp/src/utils/errno_define.h
+++ b/cpp/src/utils/errno_define.h
@@ -73,6 +73,8 @@
 const int E_COLUMN_NOT_EXIST = 50;
 const int E_UNSUPPORTED_ORDER = 51;
 const int E_INVALID_NODE_TYPE = 52;
+const int E_ENCODE_ERR = 53;
+const int E_DECODE_ERR = 54;
 
 }  // end namespace common
 
diff --git a/cpp/src/utils/injection.h b/cpp/src/utils/injection.h
index 3175a2a..0ad79dd 100644
--- a/cpp/src/utils/injection.h
+++ b/cpp/src/utils/injection.h
@@ -51,9 +51,9 @@
     } while (0)
 
 // close injection
-#define DISABLE_INJECTION(inject_point_name)              \
-    do {                                                  \
-        g_all_inject_points.erase(inject_point_name);     \
+#define DISABLE_INJECTION(inject_point_name)          \
+    do {                                              \
+        g_all_inject_points.erase(inject_point_name); \
     } while (0)
 
 // the map save all inject points
diff --git a/cpp/src/utils/storage_utils.h b/cpp/src/utils/storage_utils.h
index 698a730..035bf6e 100644
--- a/cpp/src/utils/storage_utils.h
+++ b/cpp/src/utils/storage_utils.h
@@ -19,6 +19,9 @@
 #ifndef UTILS_STORAGE_UTILS_H
 #define UTILS_STORAGE_UTILS_H
 
+#include <inttypes.h>
+#include <stdint.h>
+
 #include <algorithm>
 
 #include "common/datatype/value.h"
@@ -75,12 +78,12 @@
     return oss.str();
 }
 
-static void to_lowercase_inplace(std::string &str) {
+FORCE_INLINE static void to_lowercase_inplace(std::string &str) {
     std::transform(
         str.begin(), str.end(), str.begin(),
         [](unsigned char c) -> unsigned char { return std::tolower(c); });
 }
-static std::string to_lower(const std::string &str) {
+FORCE_INLINE static std::string to_lower(const std::string &str) {
     std::string result;
     std::transform(
         str.begin(), str.end(), std::back_inserter(result),
diff --git a/cpp/src/writer/chunk_writer.cc b/cpp/src/writer/chunk_writer.cc
index 73618db..6736ba2 100644
--- a/cpp/src/writer/chunk_writer.cc
+++ b/cpp/src/writer/chunk_writer.cc
@@ -26,8 +26,8 @@
 namespace storage {
 
 int ChunkWriter::init(const ColumnSchema &col_schema) {
-    return init(col_schema.column_name_, col_schema.data_type_, col_schema.encoding_,
-                col_schema.compression_);
+    return init(col_schema.column_name_, col_schema.data_type_,
+                col_schema.encoding_, col_schema.compression_);
 }
 
 int ChunkWriter::init(const std::string &measurement_name, TSDataType data_type,
@@ -140,9 +140,11 @@
     first_page_statistic_->deep_copy_from(first_page_writer.get_statistic());
 }
 
-int ChunkWriter::write_first_page_data(ByteStream &pages_data, bool with_statistic) {
+int ChunkWriter::write_first_page_data(ByteStream &pages_data,
+                                       bool with_statistic) {
     int ret = E_OK;
-    if (with_statistic && RET_FAIL(first_page_statistic_->serialize_to(pages_data))) {
+    if (with_statistic &&
+        RET_FAIL(first_page_statistic_->serialize_to(pages_data))) {
     } else if (RET_FAIL(
                    pages_data.write_buf(first_page_data_.compressed_buf_,
                                         first_page_data_.compressed_size_))) {
diff --git a/cpp/src/writer/chunk_writer.h b/cpp/src/writer/chunk_writer.h
index 7add7eb..d12cf22 100644
--- a/cpp/src/writer/chunk_writer.h
+++ b/cpp/src/writer/chunk_writer.h
@@ -28,12 +28,9 @@
 
 namespace storage {
 
-#define CW_DO_WRITE_FOR_TYPE(TSDATATYPE)                      \
+#define CW_DO_WRITE_FOR_TYPE()                                \
     {                                                         \
         int ret = common::E_OK;                               \
-        if (UNLIKELY(data_type_ != TSDATATYPE)) {             \
-            return common::E_TYPE_NOT_MATCH;                  \
-        }                                                     \
         if (RET_FAIL(page_writer_.write(timestamp, value))) { \
             return ret;                                       \
         }                                                     \
@@ -66,22 +63,44 @@
     void destroy();
 
     FORCE_INLINE int write(int64_t timestamp, bool value) {
-        CW_DO_WRITE_FOR_TYPE(common::BOOLEAN);
+        if (UNLIKELY(data_type_ != common::BOOLEAN)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        CW_DO_WRITE_FOR_TYPE();
     }
     FORCE_INLINE int write(int64_t timestamp, int32_t value) {
-        CW_DO_WRITE_FOR_TYPE(common::INT32);
+        if (UNLIKELY(data_type_ != common::INT32 &&
+                     data_type_ != common::DATE)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        CW_DO_WRITE_FOR_TYPE();
     }
     FORCE_INLINE int write(int64_t timestamp, int64_t value) {
-        CW_DO_WRITE_FOR_TYPE(common::INT64);
+        if (UNLIKELY(data_type_ != common::INT64 &&
+                     data_type_ != common::TIMESTAMP)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        CW_DO_WRITE_FOR_TYPE();
     }
     FORCE_INLINE int write(int64_t timestamp, float value) {
-        CW_DO_WRITE_FOR_TYPE(common::FLOAT);
+        if (UNLIKELY(data_type_ != common::FLOAT)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        CW_DO_WRITE_FOR_TYPE();
     }
     FORCE_INLINE int write(int64_t timestamp, double value) {
-        CW_DO_WRITE_FOR_TYPE(common::DOUBLE);
+        if (UNLIKELY(data_type_ != common::DOUBLE)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        CW_DO_WRITE_FOR_TYPE();
     }
     FORCE_INLINE int write(int64_t timestamp, common::String value) {
-        CW_DO_WRITE_FOR_TYPE(common::STRING);
+        if (UNLIKELY(data_type_ != common::STRING &&
+                     data_type_ != common::TEXT &&
+                     data_type_ != common::BLOB)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        CW_DO_WRITE_FOR_TYPE();
     }
 
     int end_encode_chunk();
@@ -127,7 +146,8 @@
     }
     int seal_cur_page(bool end_chunk);
     void save_first_page_data(PageWriter &first_page_writer);
-    int write_first_page_data(common::ByteStream &pages_data, bool with_statistic = true);
+    int write_first_page_data(common::ByteStream &pages_data,
+                              bool with_statistic = true);
 
    private:
     common::TSDataType data_type_;
diff --git a/cpp/src/writer/page_writer.h b/cpp/src/writer/page_writer.h
index e60cc12..47ed329 100644
--- a/cpp/src/writer/page_writer.h
+++ b/cpp/src/writer/page_writer.h
@@ -79,10 +79,7 @@
         int ret = common::E_OK;                                                \
         /* std::cout << "page_writer writer: time=" << timestamp << ", value=" \
          * << value << std::endl; */                                           \
-        if (UNLIKELY(data_type_ != TSDATATYPE)) {                              \
-            ret = common::E_TYPE_NOT_MATCH;                                    \
-        } else if (RET_FAIL(                                                   \
-                       time_encoder_->encode(timestamp, time_out_stream_))) {  \
+        if (RET_FAIL(time_encoder_->encode(timestamp, time_out_stream_))) {    \
         } else if (RET_FAIL(                                                   \
                        value_encoder_->encode(value, value_out_stream_))) {    \
         } else {                                                               \
@@ -113,22 +110,44 @@
     void destroy();
 
     FORCE_INLINE int write(int64_t timestamp, bool value) {
-        PW_DO_WRITE_FOR_TYPE(common::BOOLEAN);
+        if (UNLIKELY(data_type_ != common::BOOLEAN)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        PW_DO_WRITE_FOR_TYPE();
     }
     FORCE_INLINE int write(int64_t timestamp, int32_t value) {
-        PW_DO_WRITE_FOR_TYPE(common::INT32);
+        if (UNLIKELY(data_type_ != common::INT32 &&
+                     data_type_ != common::DATE)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        PW_DO_WRITE_FOR_TYPE();
     }
     FORCE_INLINE int write(int64_t timestamp, int64_t value) {
-        PW_DO_WRITE_FOR_TYPE(common::INT64);
+        if (UNLIKELY(data_type_ != common::INT64 &&
+                     data_type_ != common::TIMESTAMP)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        PW_DO_WRITE_FOR_TYPE();
     }
     FORCE_INLINE int write(int64_t timestamp, float value) {
-        PW_DO_WRITE_FOR_TYPE(common::FLOAT);
+        if (UNLIKELY(data_type_ != common::FLOAT)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        PW_DO_WRITE_FOR_TYPE();
     }
     FORCE_INLINE int write(int64_t timestamp, double value) {
-        PW_DO_WRITE_FOR_TYPE(common::DOUBLE);
+        if (UNLIKELY(data_type_ != common::DOUBLE)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        PW_DO_WRITE_FOR_TYPE();
     }
     FORCE_INLINE int write(int64_t timestamp, common::String value) {
-        PW_DO_WRITE_FOR_TYPE(common::STRING);
+        if (UNLIKELY(data_type_ != common::STRING &&
+                     data_type_ != common::TEXT &&
+                     data_type_ != common::BLOB)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        PW_DO_WRITE_FOR_TYPE();
     }
 
     FORCE_INLINE uint32_t get_point_numer() const { return statistic_->count_; }
diff --git a/cpp/src/writer/time_chunk_writer.cc b/cpp/src/writer/time_chunk_writer.cc
index 892c0d1..81fafc5 100644
--- a/cpp/src/writer/time_chunk_writer.cc
+++ b/cpp/src/writer/time_chunk_writer.cc
@@ -191,4 +191,9 @@
                time_page_writer_.get_statistic()->get_type());
 }
 
+bool TimeChunkWriter::hasData() {
+    return num_of_pages_ > 0 || (time_page_writer_.get_statistic() != nullptr &&
+                                 time_page_writer_.get_statistic()->count_ > 0);
+}
+
 }  // end namespace storage
diff --git a/cpp/src/writer/time_chunk_writer.h b/cpp/src/writer/time_chunk_writer.h
index 8fcd9bd..aff8e2a 100644
--- a/cpp/src/writer/time_chunk_writer.h
+++ b/cpp/src/writer/time_chunk_writer.h
@@ -28,6 +28,8 @@
 
 namespace storage {
 
+// TODO: TimeChunkWriter, ValueChunkWriter, ChunkWriter can be further
+// abstracted.
 class TimeChunkWriter {
    public:
     static const int32_t PAGES_DATA_PAGE_SIZE = 1024;
@@ -68,6 +70,8 @@
 
     int64_t estimate_max_series_mem_size();
 
+    bool hasData();
+
    private:
     FORCE_INLINE bool is_cur_page_full() const {
         // FIXME
@@ -86,13 +90,14 @@
         // free memory
         first_page_data_.destroy();
         if (first_page_statistic_ != nullptr) {
-          StatisticFactory::free(first_page_statistic_);
-          first_page_statistic_ = nullptr;
+            StatisticFactory::free(first_page_statistic_);
+            first_page_statistic_ = nullptr;
         }
     }
     int seal_cur_page(bool end_chunk);
     void save_first_page_data(TimePageWriter &first_time_page_writer);
-    int write_first_page_data(common::ByteStream &pages_data, bool with_statistic = true);
+    int write_first_page_data(common::ByteStream &pages_data,
+                              bool with_statistic = true);
 
    private:
     TimePageWriter time_page_writer_;
diff --git a/cpp/src/writer/time_page_writer.h b/cpp/src/writer/time_page_writer.h
index bc5b821..bbf7016 100644
--- a/cpp/src/writer/time_page_writer.h
+++ b/cpp/src/writer/time_page_writer.h
@@ -73,6 +73,10 @@
 
     FORCE_INLINE int write(int64_t timestamp) {
         int ret = common::E_OK;
+        if (statistic_->count_ != 0 && is_inited_ &&
+            timestamp <= statistic_->end_time_) {
+            return common::E_OUT_OF_ORDER;
+        }
         if (RET_FAIL(time_encoder_->encode(timestamp, time_out_stream_))) {
         } else {
             statistic_->update(timestamp);
diff --git a/cpp/src/writer/tsfile_table_writer.cc b/cpp/src/writer/tsfile_table_writer.cc
index 942b15b..a42bd11 100644
--- a/cpp/src/writer/tsfile_table_writer.cc
+++ b/cpp/src/writer/tsfile_table_writer.cc
@@ -21,7 +21,8 @@
 
 storage::TsFileTableWriter::~TsFileTableWriter() = default;
 
-int storage::TsFileTableWriter::register_table(const std::shared_ptr<TableSchema>& table_schema) {
+int storage::TsFileTableWriter::register_table(
+    const std::shared_ptr<TableSchema>& table_schema) {
     int ret = tsfile_writer_->register_table(table_schema);
     // if multiple tables are registered, set
     exclusive_table_name_ = "";
@@ -35,7 +36,8 @@
     }
     if (tablet.get_table_name().empty()) {
         tablet.set_table_name(exclusive_table_name_);
-    } else if (!exclusive_table_name_.empty() && tablet.get_table_name() != exclusive_table_name_) {
+    } else if (!exclusive_table_name_.empty() &&
+               tablet.get_table_name() != exclusive_table_name_) {
         return common::E_TABLE_NOT_EXIST;
     }
     tablet.set_table_name(to_lower(tablet.get_table_name()));
diff --git a/cpp/src/writer/tsfile_writer.cc b/cpp/src/writer/tsfile_writer.cc
index 0f47756..54c1be0 100644
--- a/cpp/src/writer/tsfile_writer.cc
+++ b/cpp/src/writer/tsfile_writer.cc
@@ -313,6 +313,10 @@
             if (col_index == -1) {
                 return E_COLUMN_NOT_EXIST;
             }
+            if (table_schema->get_data_types()[col_index] !=
+                tablet.schema_vec_->at(i).data_type_) {
+                return E_TYPE_NOT_MATCH;
+            }
             const common::ColumnCategory column_category =
                 table_schema->get_column_categories()[col_index];
             tablet.column_categories_.emplace_back(column_category);
@@ -325,9 +329,11 @@
 }
 
 template <typename MeasurementNamesGetter>
-int TsFileWriter::do_check_schema(std::shared_ptr<IDeviceID> device_id,
-                                  MeasurementNamesGetter &measurement_names,
-                                  SimpleVector<ChunkWriter *> &chunk_writers) {
+int TsFileWriter::do_check_schema(
+    std::shared_ptr<IDeviceID> device_id,
+    MeasurementNamesGetter &measurement_names,
+    SimpleVector<ChunkWriter *> &chunk_writers,
+    SimpleVector<common::TSDataType> &data_types) {
     int ret = E_OK;
     DeviceSchemasMapIter dev_it = schemas_.find(device_id);
     MeasurementSchemaGroup *device_schema = nullptr;
@@ -342,6 +348,7 @@
         auto ms_iter = msm.find(measurement_names.next());
         if (UNLIKELY(ms_iter == msm.end())) {
             chunk_writers.push_back(NULL);
+            data_types.push_back(common::NULL_TYPE);
         } else {
             // In Java we will check data_type. But in C++, no check here.
             // Because checks are performed at the chunk layer and page layer
@@ -367,6 +374,7 @@
             } else {
                 chunk_writers.push_back(ms->chunk_writer_);
             }
+            data_types.push_back(ms->data_type_);
         }
     }
     return ret;
@@ -377,7 +385,8 @@
     std::shared_ptr<IDeviceID> device_id,
     MeasurementNamesGetter &measurement_names,
     storage::TimeChunkWriter *&time_chunk_writer,
-    common::SimpleVector<storage::ValueChunkWriter *> &value_chunk_writers) {
+    common::SimpleVector<storage::ValueChunkWriter *> &value_chunk_writers,
+    SimpleVector<common::TSDataType> &data_types) {
     int ret = E_OK;
     auto dev_it = schemas_.find(device_id);
     MeasurementSchemaGroup *device_schema = NULL;
@@ -398,6 +407,7 @@
         auto ms_iter = msm.find(measurement_names.next());
         if (UNLIKELY(ms_iter == msm.end())) {
             value_chunk_writers.push_back(NULL);
+            data_types.push_back(common::NULL_TYPE);
         } else {
             // Here we may check data_type against ms_iter. But in Java
             // libtsfile, no check here.
@@ -424,6 +434,7 @@
             } else {
                 value_chunk_writers.push_back(ms->value_chunk_writer_);
             }
+            data_types.push_back(ms->data_type_);
         }
     }
     return ret;
@@ -576,10 +587,11 @@
     int ret = E_OK;
     // std::vector<ChunkWriter*> chunk_writers;
     SimpleVector<ChunkWriter *> chunk_writers;
+    SimpleVector<common::TSDataType> data_types;
     MeasurementNamesFromRecord mnames_getter(record);
     if (RET_FAIL(do_check_schema(
             std::make_shared<StringArrayDeviceID>(record.device_id_),
-            mnames_getter, chunk_writers))) {
+            mnames_getter, chunk_writers, data_types))) {
         return ret;
     }
 
@@ -590,7 +602,8 @@
             continue;
         }
         // ignore point writer failure
-        write_point(chunk_writer, record.timestamp_, record.points_[c]);
+        write_point(chunk_writer, record.timestamp_, data_types[c],
+                    record.points_[c]);
     }
 
     record_count_since_last_flush_++;
@@ -601,11 +614,13 @@
 int TsFileWriter::write_record_aligned(const TsRecord &record) {
     int ret = E_OK;
     SimpleVector<ValueChunkWriter *> value_chunk_writers;
+    SimpleVector<common::TSDataType> data_types;
     TimeChunkWriter *time_chunk_writer;
     MeasurementNamesFromRecord mnames_getter(record);
     if (RET_FAIL(do_check_schema_aligned(
             std::make_shared<StringArrayDeviceID>(record.device_id_),
-            mnames_getter, time_chunk_writer, value_chunk_writers))) {
+            mnames_getter, time_chunk_writer, value_chunk_writers,
+            data_types))) {
         return ret;
     }
     if (value_chunk_writers.size() != record.points_.size()) {
@@ -618,29 +633,31 @@
             continue;
         }
         write_point_aligned(value_chunk_writer, record.timestamp_,
-                            record.points_[c]);
+                            data_types[c], record.points_[c]);
     }
     return ret;
 }
 
 int TsFileWriter::write_point(ChunkWriter *chunk_writer, int64_t timestamp,
+                              common::TSDataType data_type,
                               const DataPoint &point) {
-    switch (point.data_type_) {
+    switch (data_type) {
         case common::BOOLEAN:
             return chunk_writer->write(timestamp, point.u_.bool_val_);
+        case common::DATE:
         case common::INT32:
             return chunk_writer->write(timestamp, point.u_.i32_val_);
+        case common::TIMESTAMP:
         case common::INT64:
             return chunk_writer->write(timestamp, point.u_.i64_val_);
         case common::FLOAT:
             return chunk_writer->write(timestamp, point.u_.float_val_);
         case common::DOUBLE:
             return chunk_writer->write(timestamp, point.u_.double_val_);
+        case common::BLOB:
+        case common::TEXT:
         case common::STRING:
             return chunk_writer->write(timestamp, *point.u_.str_val_);
-        case common::TEXT:
-            ASSERT(false);
-            return E_OK;
         default:
             return E_INVALID_DATA_POINT;
     }
@@ -648,15 +665,18 @@
 
 int TsFileWriter::write_point_aligned(ValueChunkWriter *value_chunk_writer,
                                       int64_t timestamp,
+                                      common::TSDataType data_type,
                                       const DataPoint &point) {
     bool isnull = point.isnull;
-    switch (point.data_type_) {
+    switch (data_type) {
         case common::BOOLEAN:
             return value_chunk_writer->write(timestamp, point.u_.bool_val_,
                                              isnull);
         case common::INT32:
+        case common::DATE:
             return value_chunk_writer->write(timestamp, point.u_.i32_val_,
                                              isnull);
+        case common::TIMESTAMP:
         case common::INT64:
             return value_chunk_writer->write(timestamp, point.u_.i64_val_,
                                              isnull);
@@ -666,9 +686,11 @@
         case common::DOUBLE:
             return value_chunk_writer->write(timestamp, point.u_.double_val_,
                                              isnull);
+        case common::BLOB:
         case common::TEXT:
-            ASSERT(false);
-            return E_OK;
+        case common::STRING:
+            return value_chunk_writer->write(timestamp, point.u_.str_val_,
+                                             isnull);
         default:
             return E_INVALID_DATA_POINT;
     }
@@ -678,10 +700,12 @@
     int ret = E_OK;
     SimpleVector<ValueChunkWriter *> value_chunk_writers;
     TimeChunkWriter *time_chunk_writer = nullptr;
+    SimpleVector<common::TSDataType> data_types;
     MeasurementNamesFromTablet mnames_getter(tablet);
     if (RET_FAIL(do_check_schema_aligned(
             std::make_shared<StringArrayDeviceID>(tablet.insert_target_name_),
-            mnames_getter, time_chunk_writer, value_chunk_writers))) {
+            mnames_getter, time_chunk_writer, value_chunk_writers,
+            data_types))) {
         return ret;
     }
     time_write_column(time_chunk_writer, tablet);
@@ -702,10 +726,11 @@
 int TsFileWriter::write_tablet(const Tablet &tablet) {
     int ret = E_OK;
     SimpleVector<ChunkWriter *> chunk_writers;
+    SimpleVector<common::TSDataType> data_types;
     MeasurementNamesFromTablet mnames_getter(tablet);
     if (RET_FAIL(do_check_schema(
             std::make_shared<StringArrayDeviceID>(tablet.insert_target_name_),
-            mnames_getter, chunk_writers))) {
+            mnames_getter, chunk_writers, data_types))) {
         return ret;
     }
     ASSERT(chunk_writers.size() == tablet.get_column_count());
@@ -751,7 +776,9 @@
                 return ret;
             }
             for (int i = start_idx; i < end_idx; i++) {
-                time_chunk_writer->write(tablet.timestamps_[i]);
+                if (RET_FAIL(time_chunk_writer->write(tablet.timestamps_[i]))) {
+                    return ret;
+                }
             }
             uint32_t field_col_count = 0;
             for (uint32_t i = 0; i < tablet.get_column_count(); ++i) {
@@ -774,8 +801,9 @@
         } else {
             MeasurementNamesFromTablet mnames_getter(tablet);
             SimpleVector<ChunkWriter *> chunk_writers;
-            if (RET_FAIL(
-                    do_check_schema(device_id, mnames_getter, chunk_writers))) {
+            SimpleVector<common::TSDataType> data_types;
+            if (RET_FAIL(do_check_schema(device_id, mnames_getter,
+                                         chunk_writers, data_types))) {
                 return ret;
             }
             ASSERT(chunk_writers.size() == tablet.get_column_count());
@@ -878,29 +906,43 @@
     int64_t *timestamps = tablet.timestamps_;
     Tablet::ValueMatrixEntry col_values = tablet.value_matrix_[col_idx];
     BitMap &col_notnull_bitmap = tablet.bitmaps_[col_idx];
-
-    if (data_type == common::BOOLEAN) {
-        ret = write_typed_column(value_chunk_writer, timestamps,
-                                 (bool *)col_values.bool_data,
-                                 col_notnull_bitmap, start_idx, end_idx);
-    } else if (data_type == common::INT32) {
-        ret = write_typed_column(value_chunk_writer, timestamps,
-                                 (int32_t *)col_values.int32_data,
-                                 col_notnull_bitmap, start_idx, end_idx);
-    } else if (data_type == common::INT64) {
-        ret = write_typed_column(value_chunk_writer, timestamps,
-                                 (int64_t *)col_values.int64_data,
-                                 col_notnull_bitmap, start_idx, end_idx);
-    } else if (data_type == common::FLOAT) {
-        ret = write_typed_column(value_chunk_writer, timestamps,
-                                 (float *)col_values.float_data,
-                                 col_notnull_bitmap, start_idx, end_idx);
-    } else if (data_type == common::DOUBLE) {
-        ret = write_typed_column(value_chunk_writer, timestamps,
-                                 (double *)col_values.double_data,
-                                 col_notnull_bitmap, start_idx, end_idx);
-    } else {
-        return E_NOT_SUPPORT;
+    switch (data_type) {
+        case common::BOOLEAN:
+            ret = write_typed_column(value_chunk_writer, timestamps,
+                                     (bool *)col_values.bool_data,
+                                     col_notnull_bitmap, start_idx, end_idx);
+            break;
+        case common::DATE:
+        case common::INT32:
+            ret = write_typed_column(value_chunk_writer, timestamps,
+                                     (int32_t *)col_values.int32_data,
+                                     col_notnull_bitmap, start_idx, end_idx);
+            break;
+        case common::TIMESTAMP:
+        case common::INT64:
+            ret = write_typed_column(value_chunk_writer, timestamps,
+                                     (int64_t *)col_values.int64_data,
+                                     col_notnull_bitmap, start_idx, end_idx);
+            break;
+        case common::FLOAT:
+            ret = write_typed_column(value_chunk_writer, timestamps,
+                                     (float *)col_values.float_data,
+                                     col_notnull_bitmap, start_idx, end_idx);
+            break;
+        case common::DOUBLE:
+            ret = write_typed_column(value_chunk_writer, timestamps,
+                                     (double *)col_values.double_data,
+                                     col_notnull_bitmap, start_idx, end_idx);
+            break;
+        case common::STRING:
+        case common::TEXT:
+        case common::BLOB:
+            ret = write_typed_column(value_chunk_writer, timestamps,
+                                     (common::String *)col_values.string_data,
+                                     col_notnull_bitmap, start_idx, end_idx);
+            break;
+        default:
+            ret = E_NOT_SUPPORT;
     }
     return ret;
 }
@@ -1016,6 +1058,14 @@
     DO_VALUE_WRITE_TYPED_COLUMN();
 }
 
+int TsFileWriter::write_typed_column(ValueChunkWriter *value_chunk_writer,
+                                     int64_t *timestamps,
+                                     common::String *col_values,
+                                     common::BitMap &col_notnull_bitmap,
+                                     uint32_t start_idx, uint32_t end_idx) {
+    DO_VALUE_WRITE_TYPED_COLUMN();
+}
+
 // TODO make sure ret is meaningful to SDK user
 int TsFileWriter::flush() {
     int ret = E_OK;
@@ -1050,6 +1100,11 @@
 
 bool TsFileWriter::check_chunk_group_empty(MeasurementSchemaGroup *chunk_group,
                                            bool is_aligned) {
+    if (chunk_group->is_aligned_ &&
+        chunk_group->time_chunk_writer_ != nullptr &&
+        chunk_group->time_chunk_writer_->hasData()) {
+        return false;
+    }
     MeasurementSchemaMap &map = chunk_group->measurement_schema_map_;
     for (MeasurementSchemaMapIter ms_iter = map.begin(); ms_iter != map.end();
          ms_iter++) {
diff --git a/cpp/src/writer/tsfile_writer.h b/cpp/src/writer/tsfile_writer.h
index 1d2368f..bad9238 100644
--- a/cpp/src/writer/tsfile_writer.h
+++ b/cpp/src/writer/tsfile_writer.h
@@ -59,7 +59,8 @@
     void set_generate_table_schema(bool generate_table_schema);
     int register_timeseries(const std::string &device_id,
                             const MeasurementSchema &measurement_schema);
-    int register_timeseries(const std::string &device_path,
+    int register_timeseries(
+        const std::string &device_path,
         const std::vector<MeasurementSchema *> &measurement_schema_vec);
     int register_aligned_timeseries(
         const std::string &device_id,
@@ -103,11 +104,12 @@
 
    private:
     int write_point(storage::ChunkWriter *chunk_writer, int64_t timestamp,
-                    const DataPoint &point);
+                    common::TSDataType data_type, const DataPoint &point);
     bool check_chunk_group_empty(MeasurementSchemaGroup *chunk_group,
                                  bool is_aligned);
     int write_point_aligned(ValueChunkWriter *value_chunk_writer,
-                            int64_t timestamp, const DataPoint &point);
+                            int64_t timestamp, common::TSDataType data_type,
+                            const DataPoint &point);
     int flush_chunk_group(MeasurementSchemaGroup *chunk_group, bool is_aligned);
 
     int write_typed_column(storage::ChunkWriter *chunk_writer,
@@ -139,14 +141,16 @@
     int do_check_schema(
         std::shared_ptr<IDeviceID> device_id,
         MeasurementNamesGetter &measurement_names,
-        common::SimpleVector<storage::ChunkWriter *> &chunk_writers);
+        common::SimpleVector<storage::ChunkWriter *> &chunk_writers,
+        common::SimpleVector<common::TSDataType> &data_types);
 
     template <typename MeasurementNamesGetter>
     int do_check_schema_aligned(
         std::shared_ptr<IDeviceID> device_id,
         MeasurementNamesGetter &measurement_names,
         storage::TimeChunkWriter *&time_chunk_writer,
-        common::SimpleVector<storage::ValueChunkWriter *> &value_chunk_writers);
+        common::SimpleVector<storage::ValueChunkWriter *> &value_chunk_writers,
+        common::SimpleVector<common::TSDataType> &data_types);
     int do_check_schema_table(
         std::shared_ptr<IDeviceID> device_id, Tablet &tablet,
         storage::TimeChunkWriter *&time_chunk_writer,
@@ -210,8 +214,7 @@
 
     int value_write_column(ValueChunkWriter *value_chunk_writer,
                            const Tablet &tablet, int col_idx,
-                           uint32_t start_idx,
-                           uint32_t end_idx);
+                           uint32_t start_idx, uint32_t end_idx);
 };
 
 }  // end namespace storage
diff --git a/cpp/src/writer/value_chunk_writer.h b/cpp/src/writer/value_chunk_writer.h
index a3e3423..3aba7bb 100644
--- a/cpp/src/writer/value_chunk_writer.h
+++ b/cpp/src/writer/value_chunk_writer.h
@@ -28,12 +28,9 @@
 
 namespace storage {
 
-#define VCW_DO_WRITE_FOR_TYPE(TSDATATYPE, ISNULL)                           \
+#define VCW_DO_WRITE_FOR_TYPE(ISNULL)                                       \
     {                                                                       \
         int ret = common::E_OK;                                             \
-        if (UNLIKELY(data_type_ != TSDATATYPE)) {                           \
-            return common::E_TYPE_NOT_MATCH;                                \
-        }                                                                   \
         if (RET_FAIL(value_page_writer_.write(timestamp, value, ISNULL))) { \
             return ret;                                                     \
         }                                                                   \
@@ -66,19 +63,50 @@
     void destroy();
 
     FORCE_INLINE int write(int64_t timestamp, bool value, bool isnull) {
-        VCW_DO_WRITE_FOR_TYPE(common::BOOLEAN, isnull);
+        if (UNLIKELY(data_type_ != common::BOOLEAN)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        VCW_DO_WRITE_FOR_TYPE(isnull);
     }
+
     FORCE_INLINE int write(int64_t timestamp, int32_t value, bool isnull) {
-        VCW_DO_WRITE_FOR_TYPE(common::INT32, isnull);
+        if (UNLIKELY(data_type_ != common::INT32 &&
+                     data_type_ != common::DATE)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        VCW_DO_WRITE_FOR_TYPE(isnull);
     }
+
     FORCE_INLINE int write(int64_t timestamp, int64_t value, bool isnull) {
-        VCW_DO_WRITE_FOR_TYPE(common::INT64, isnull);
+        if (UNLIKELY(data_type_ != common::INT64 &&
+                     data_type_ != common::TIMESTAMP)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        VCW_DO_WRITE_FOR_TYPE(isnull);
     }
+
     FORCE_INLINE int write(int64_t timestamp, float value, bool isnull) {
-        VCW_DO_WRITE_FOR_TYPE(common::FLOAT, isnull);
+        if (UNLIKELY(data_type_ != common::FLOAT)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        VCW_DO_WRITE_FOR_TYPE(isnull);
     }
+
     FORCE_INLINE int write(int64_t timestamp, double value, bool isnull) {
-        VCW_DO_WRITE_FOR_TYPE(common::DOUBLE, isnull);
+        if (UNLIKELY(data_type_ != common::DOUBLE)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        VCW_DO_WRITE_FOR_TYPE(isnull);
+    }
+
+    FORCE_INLINE int write(int64_t timestamp, common::String value,
+                           bool isnull) {
+        if (UNLIKELY(data_type_ != common::STRING &&
+                     data_type_ != common::TEXT &&
+                     data_type_ != common::BLOB)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        VCW_DO_WRITE_FOR_TYPE(isnull);
     }
 
     int end_encode_chunk();
@@ -113,7 +141,8 @@
     }
     int seal_cur_page(bool end_chunk);
     void save_first_page_data(ValuePageWriter &first_page_writer);
-    int write_first_page_data(common::ByteStream &pages_data, bool with_statistic = true);
+    int write_first_page_data(common::ByteStream &pages_data,
+                              bool with_statistic = true);
 
    private:
     common::TSDataType data_type_;
diff --git a/cpp/src/writer/value_page_writer.h b/cpp/src/writer/value_page_writer.h
index 9cf44ae..4cf2ffa 100644
--- a/cpp/src/writer/value_page_writer.h
+++ b/cpp/src/writer/value_page_writer.h
@@ -63,13 +63,9 @@
     }
 };
 
-#define VPW_DO_WRITE_FOR_TYPE(TSDATATYPE, ISNULL)                         \
+#define VPW_DO_WRITE_FOR_TYPE(ISNULL)                                     \
     {                                                                     \
         int ret = common::E_OK;                                           \
-        if (UNLIKELY(data_type_ != TSDATATYPE)) {                         \
-            ret = common::E_TYPE_NOT_MATCH;                               \
-            return ret;                                                   \
-        }                                                                 \
         if ((size_ / 8) + 1 > col_notnull_bitmap_.size()) {               \
             col_notnull_bitmap_.push_back(0);                             \
         }                                                                 \
@@ -109,19 +105,50 @@
     void destroy();
 
     FORCE_INLINE int write(int64_t timestamp, bool value, bool isnull) {
-        VPW_DO_WRITE_FOR_TYPE(common::BOOLEAN, isnull);
+        if (UNLIKELY(data_type_ != common::BOOLEAN)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        VPW_DO_WRITE_FOR_TYPE(isnull);
     }
+
     FORCE_INLINE int write(int64_t timestamp, int32_t value, bool isnull) {
-        VPW_DO_WRITE_FOR_TYPE(common::INT32, isnull);
+        if (UNLIKELY(data_type_ != common::INT32 &&
+                     data_type_ != common::DATE)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        VPW_DO_WRITE_FOR_TYPE(isnull);
     }
+
     FORCE_INLINE int write(int64_t timestamp, int64_t value, bool isnull) {
-        VPW_DO_WRITE_FOR_TYPE(common::INT64, isnull);
+        if (UNLIKELY(data_type_ != common::INT64 &&
+                     data_type_ != common::TIMESTAMP)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        VPW_DO_WRITE_FOR_TYPE(isnull);
     }
+
     FORCE_INLINE int write(int64_t timestamp, float value, bool isnull) {
-        VPW_DO_WRITE_FOR_TYPE(common::FLOAT, isnull);
+        if (UNLIKELY(data_type_ != common::FLOAT)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        VPW_DO_WRITE_FOR_TYPE(isnull);
     }
+
     FORCE_INLINE int write(int64_t timestamp, double value, bool isnull) {
-        VPW_DO_WRITE_FOR_TYPE(common::DOUBLE, isnull);
+        if (UNLIKELY(data_type_ != common::DOUBLE)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        VPW_DO_WRITE_FOR_TYPE(isnull);
+    }
+
+    FORCE_INLINE int write(int64_t timestamp, common::String value,
+                           bool isnull) {
+        if (UNLIKELY(data_type_ != common::STRING &&
+                     data_type_ != common::TEXT &&
+                     data_type_ != common::BLOB)) {
+            return common::E_TYPE_NOT_MATCH;
+        }
+        VPW_DO_WRITE_FOR_TYPE(isnull);
     }
 
     FORCE_INLINE uint32_t get_point_numer() const { return statistic_->count_; }
diff --git a/cpp/test/common/allocator/byte_stream_test.cc b/cpp/test/common/allocator/byte_stream_test.cc
index b7177d1..6296e3a 100644
--- a/cpp/test/common/allocator/byte_stream_test.cc
+++ b/cpp/test/common/allocator/byte_stream_test.cc
@@ -283,4 +283,36 @@
     EXPECT_EQ(value_to_write, value_read);
 }
 
+TEST_F(SerializationUtilTest, WriteReadIntLEPaddedBitWidth_BitWidthTooLarge) {
+    int32_t value = 123;
+    EXPECT_EQ(SerializationUtil::write_int_little_endian_padded_on_bit_width(
+                  value, *byte_stream_, 40),
+              common::E_TSFILE_CORRUPTED);
+
+    byte_stream_->reset();
+    int32_t read_val = 0;
+    EXPECT_EQ(SerializationUtil::read_int_little_endian_padded_on_bit_width(
+                  *byte_stream_, 40, read_val),
+              common::E_TSFILE_CORRUPTED);
+}
+
+TEST_F(SerializationUtilTest, WriteReadIntLEPaddedBitWidthBoundaryValue) {
+    std::vector<int32_t> test_values = {
+        132100, 1, -1, 12345678, -87654321, INT32_MAX, INT32_MIN};
+    int bit_width = 32;
+    for (int32_t original_value : test_values) {
+        byte_stream_->reset();
+        EXPECT_EQ(
+            SerializationUtil::write_int_little_endian_padded_on_bit_width(
+                original_value, *byte_stream_, bit_width),
+            common::E_OK);
+        int32_t read_value = 0;
+        EXPECT_EQ(SerializationUtil::read_int_little_endian_padded_on_bit_width(
+                      *byte_stream_, bit_width, read_value),
+                  common::E_OK);
+        EXPECT_EQ(read_value, original_value)
+            << "Mismatch with bit_width = " << bit_width;
+    }
+}
+
 }  // namespace common
\ No newline at end of file
diff --git a/cpp/test/common/allocator/page_arena_test.cc b/cpp/test/common/allocator/page_arena_test.cc
index ff231e6..0d34e95 100644
--- a/cpp/test/common/allocator/page_arena_test.cc
+++ b/cpp/test/common/allocator/page_arena_test.cc
@@ -55,4 +55,30 @@
     page_arena.reset();
 }
 
+struct MyStruct {
+    double x;
+    int64_t y;
+    char buf[1721];
+};
+
+TEST(PageArenaAlignedTest, AlignmentWithBuildinTypes) {
+    PageArena page_arena;
+    int page_size = 1024;
+    page_arena.init(page_size, MOD_DEFAULT);
+    for (int i = 0; i < 10; i++) {
+        void* p1 = page_arena.alloc(sizeof(double));
+        ASSERT_NE(p1, nullptr);
+        EXPECT_EQ(reinterpret_cast<uintptr_t>(p1) % alignof(double), 0u);
+
+        void* p2 = page_arena.alloc(sizeof(int64_t));
+        ASSERT_NE(p2, nullptr);
+        EXPECT_EQ(reinterpret_cast<uintptr_t>(p2) % alignof(int64_t), 0u);
+
+        void* p3 = page_arena.alloc(sizeof(MyStruct));
+        ASSERT_NE(p3, nullptr);
+        EXPECT_EQ(reinterpret_cast<uintptr_t>(p3) % alignof(MyStruct), 0u);
+    }
+    page_arena.reset();
+}
+
 }  // namespace common
\ No newline at end of file
diff --git a/cpp/test/common/datatype/date_converter_test.cc b/cpp/test/common/datatype/date_converter_test.cc
new file mode 100644
index 0000000..ad59d57
--- /dev/null
+++ b/cpp/test/common/datatype/date_converter_test.cc
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License a
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include "common/datatype/date_converter.h"
+
+#include <gtest/gtest.h>
+
+#include "common/datatype/value.h"
+#include "common/record.h"
+
+namespace common {
+
+class DateConverterTest : public ::testing::Test {
+   protected:
+    void SetUp() override {
+        // Initialize a valid date (2025-07-03)
+        valid_tm_ = {0, 0,  12, 3,
+                     6, 125};  // tm_mday=3, tm_mon=6 (July), tm_year=125 (2025)
+        valid_int_ = 20250703;
+    }
+
+    std::tm valid_tm_{};
+    int32_t valid_int_{};
+};
+
+// Test normal date conversion
+TEST_F(DateConverterTest, DateToIntValidDate) {
+    int32_t result;
+    ASSERT_EQ(DateConverter::date_to_int(valid_tm_, result), common::E_OK);
+    EXPECT_EQ(result, valid_int_);
+}
+
+TEST_F(DateConverterTest, IntToDateValidDate) {
+    std::tm result = {0};
+    ASSERT_EQ(DateConverter::int_to_date(valid_int_, result), common::E_OK);
+    EXPECT_EQ(result.tm_year, valid_tm_.tm_year);
+    EXPECT_EQ(result.tm_mon, valid_tm_.tm_mon);
+    EXPECT_EQ(result.tm_mday, valid_tm_.tm_mday);
+}
+
+// Test round-trip conversion consistency
+TEST_F(DateConverterTest, RoundTripConversion) {
+    std::tm tm_result = {0};
+    int32_t int_result;
+
+    // Forward conversion then backward conversion
+    ASSERT_EQ(DateConverter::date_to_int(valid_tm_, int_result), common::E_OK);
+    ASSERT_EQ(DateConverter::int_to_date(int_result, tm_result), common::E_OK);
+    EXPECT_EQ(tm_result.tm_year, valid_tm_.tm_year);
+    EXPECT_EQ(tm_result.tm_mon, valid_tm_.tm_mon);
+    EXPECT_EQ(tm_result.tm_mday, valid_tm_.tm_mday);
+}
+
+// Test boundary conditions (leap years, month days)
+TEST_F(DateConverterTest, BoundaryConditions) {
+    // Leap day (Feb 29, 2024)
+    std::tm leap_day = {0, 0, 12, 29, 1, 124};  // 2024-02-29
+    int32_t leap_int;
+    EXPECT_EQ(DateConverter::date_to_int(leap_day, leap_int), common::E_OK);
+
+    // Invalid leap day (Feb 29, 2025 - not a leap year)
+    std::tm invalid_leap = {0, 0, 12, 29, 1, 125};  // 2025-02-29
+    EXPECT_EQ(DateConverter::date_to_int(invalid_leap, leap_int),
+              common::E_INVALID_ARG);
+
+    // First and last day of month
+    std::tm first_day = {0, 0, 12, 1, 0, 125};   // 2025-01-01
+    std::tm last_day = {0, 0, 12, 31, 11, 125};  // 2025-12-31
+    EXPECT_EQ(DateConverter::date_to_int(first_day, leap_int), common::E_OK);
+    EXPECT_EQ(DateConverter::date_to_int(last_day, leap_int), common::E_OK);
+}
+
+// Test invalid inputs
+TEST_F(DateConverterTest, InvalidInputs) {
+    std::tm invalid_tm = {0, 0, 12, 32, 6, 125};  // 2025-07-32 (invalid day)
+    int32_t out_int;
+    EXPECT_EQ(DateConverter::date_to_int(invalid_tm, out_int),
+              common::E_INVALID_ARG);
+
+    // Year out of range
+    std::tm year_out_of_range = {0, 0, 12,
+                                 3, 6, -901};  // 0999-07-03 (year < 1000)
+    EXPECT_EQ(DateConverter::date_to_int(year_out_of_range, out_int),
+              common::E_INVALID_ARG);
+
+    // Invalid integer format
+    std::tm tm_result = {0};
+    EXPECT_EQ(DateConverter::int_to_date(20251301, tm_result),
+              common::E_INVALID_ARG);  // month=13
+    EXPECT_EQ(DateConverter::int_to_date(20250015, tm_result),
+              common::E_INVALID_ARG);  // month=0
+}
+
+// Test uninitialized fields
+TEST_F(DateConverterTest, UninitializedFields) {
+    std::tm uninitialized = {
+        0};  // tm_year etc. are 0 (not explicitly initialized)
+    uninitialized.tm_year = -1;  // Mark as invalid
+    int32_t out_int;
+    EXPECT_EQ(DateConverter::date_to_int(uninitialized, out_int),
+              common::E_INVALID_ARG);
+}
+
+}  // namespace common
\ No newline at end of file
diff --git a/cpp/test/common/device_id_test.cc b/cpp/test/common/device_id_test.cc
new file mode 100644
index 0000000..a72bd28
--- /dev/null
+++ b/cpp/test/common/device_id_test.cc
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License a
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "common/device_id.h"
+
+#include <gtest/gtest.h>
+
+#include "common/tablet.h"
+
+namespace storage {
+using namespace ::common;
+TEST(DeviceIdTest, NormalTest) {
+    std::string device_id_string = "root.db.tb.device1";
+    StringArrayDeviceID device_id = StringArrayDeviceID(device_id_string);
+    ASSERT_EQ("root.db.tb.device1", device_id.get_device_name());
+}
+
+TEST(DeviceIdTest, TabletDeviceId) {
+    std::vector<TSDataType> measurement_types{
+        TSDataType::STRING, TSDataType::STRING, TSDataType::STRING,
+        TSDataType::INT32};
+    std::vector<ColumnCategory> column_categories{
+        ColumnCategory::TAG, ColumnCategory::TAG, ColumnCategory::TAG,
+        ColumnCategory::FIELD};
+    std::vector<std::string> measurement_names{"tag1", "tag2", "tag3", "value"};
+
+    Tablet tablet("test_device0", measurement_names, measurement_types,
+                  column_categories);
+    tablet.add_timestamp(0, 1);
+    tablet.add_value(0, 0, "t1");
+    tablet.add_value(0, 1, "t2");
+    tablet.add_value(0, 2, "t3");
+    tablet.add_value(1, 0, "");
+    tablet.add_value(1, 1, "t2");
+    tablet.add_value(1, 2, "t3");
+    tablet.add_value(2, 1, "t2");
+    tablet.add_value(2, 2, "t3");
+    auto device_id = std::make_shared<StringArrayDeviceID>(
+        std::vector<std::string>({"test_device0", "t1", "t2", "t3"}));
+    auto device_id2 = tablet.get_device_id(0);
+    ASSERT_TRUE(*device_id2 == *device_id);
+
+    ASSERT_EQ("test_device0..t2.t3",
+              tablet.get_device_id(1)->get_device_name());
+    ASSERT_EQ("test_device0.null.t2.t3",
+              tablet.get_device_id(2)->get_device_name());
+}
+}  // namespace storage
diff --git a/cpp/test/common/record_test.cc b/cpp/test/common/record_test.cc
index 3a5c67b..808dafe 100644
--- a/cpp/test/common/record_test.cc
+++ b/cpp/test/common/record_test.cc
@@ -28,63 +28,54 @@
 TEST(DataPointTest, BoolConstructor) {
     DataPoint dp("touch_sensor", true);
     EXPECT_EQ(dp.measurement_name_, "touch_sensor");
-    EXPECT_EQ(dp.data_type_, common::BOOLEAN);
     EXPECT_TRUE(dp.u_.bool_val_);
 }
 
 TEST(DataPointTest, Int32Constructor) {
     DataPoint dp("temperature", int32_t(100));
     EXPECT_EQ(dp.measurement_name_, "temperature");
-    EXPECT_EQ(dp.data_type_, common::INT32);
     EXPECT_EQ(dp.u_.i32_val_, 100);
 }
 
 TEST(DataPointTest, Int64Constructor) {
     DataPoint dp("temperature", int64_t(100000));
     EXPECT_EQ(dp.measurement_name_, "temperature");
-    EXPECT_EQ(dp.data_type_, common::INT64);
     EXPECT_EQ(dp.u_.i64_val_, 100000);
 }
 
 TEST(DataPointTest, FloatConstructor) {
     DataPoint dp("temperature", float(36.6));
     EXPECT_EQ(dp.measurement_name_, "temperature");
-    EXPECT_EQ(dp.data_type_, common::FLOAT);
     EXPECT_FLOAT_EQ(dp.u_.float_val_, 36.6);
 }
 
 TEST(DataPointTest, DoubleConstructor) {
     DataPoint dp("temperature", double(36.6));
     EXPECT_EQ(dp.measurement_name_, "temperature");
-    EXPECT_EQ(dp.data_type_, common::DOUBLE);
     EXPECT_DOUBLE_EQ(dp.u_.double_val_, 36.6);
 }
 
 TEST(DataPointTest, SetInt32) {
     DataPoint dp("temperature");
     dp.set_i32(100);
-    EXPECT_EQ(dp.data_type_, common::INT32);
     EXPECT_EQ(dp.u_.i32_val_, 100);
 }
 
 TEST(DataPointTest, SetInt64) {
     DataPoint dp("temperature");
     dp.set_i64(100000);
-    EXPECT_EQ(dp.data_type_, common::INT64);
     EXPECT_EQ(dp.u_.i64_val_, 100000);
 }
 
 TEST(DataPointTest, SetFloat) {
     DataPoint dp("temperature");
     dp.set_float(36.6);
-    EXPECT_EQ(dp.data_type_, common::FLOAT);
     EXPECT_FLOAT_EQ(dp.u_.float_val_, 36.6);
 }
 
 TEST(DataPointTest, SetDouble) {
     DataPoint dp("temperature");
     dp.set_double(36.6);
-    EXPECT_EQ(dp.data_type_, common::DOUBLE);
     EXPECT_DOUBLE_EQ(dp.u_.double_val_, 36.6);
 }
 
@@ -106,7 +97,6 @@
     ts_record.add_point("temperature", 36.6);
     ASSERT_EQ(ts_record.points_.size(), 1);
     EXPECT_EQ(ts_record.points_[0].measurement_name_, "temperature");
-    EXPECT_EQ(ts_record.points_[0].data_type_, common::DOUBLE);
     EXPECT_DOUBLE_EQ(ts_record.points_[0].u_.double_val_, 36.6);
 }
 
@@ -119,7 +109,6 @@
     ASSERT_EQ(ts_record.points_.size(), 10000);
     for (int i = 0; i < 10000; i++) {
         EXPECT_EQ(ts_record.points_[i].measurement_name_, std::to_string(i));
-        EXPECT_EQ(ts_record.points_[i].data_type_, common::DOUBLE);
         EXPECT_DOUBLE_EQ(ts_record.points_[i].u_.double_val_, 36.6);
     }
 }
diff --git a/cpp/test/common/row_record_test.cc b/cpp/test/common/row_record_test.cc
index fd6e41f..964d055 100644
--- a/cpp/test/common/row_record_test.cc
+++ b/cpp/test/common/row_record_test.cc
@@ -22,6 +22,8 @@
 
 #include <vector>
 
+#include "common/tsblock/tuple_desc.h"
+
 namespace storage {
 
 TEST(FieldTest, DefaultConstructor) {
@@ -34,13 +36,6 @@
     EXPECT_EQ(field.type_, common::BOOLEAN);
 }
 
-TEST(FieldTest, FreeMemory) {
-    Field field(common::TEXT);
-    field.value_.sval_ = strdup("test");
-    field.free_memory();
-    EXPECT_EQ(field.value_.sval_, nullptr);
-}
-
 TEST(FieldTest, IsType) {
     Field field(common::BOOLEAN);
     EXPECT_TRUE(field.is_type(common::BOOLEAN));
@@ -60,14 +55,16 @@
 
 TEST(FieldTest, SetValue) {
     Field field;
-    common::PageArena pa; // dosen't matter
+    common::PageArena pa;  // dosen't matter
     int32_t i32_val = 123;
-    field.set_value(common::INT32, &i32_val, pa);
+    field.set_value(common::INT32, &i32_val, common::get_len(common::INT32),
+                    pa);
     EXPECT_EQ(field.type_, common::INT32);
     EXPECT_EQ(field.value_.ival_, 123);
 
     double d_val = 3.14;
-    field.set_value(common::DOUBLE, &d_val, pa);
+    field.set_value(common::DOUBLE, &d_val, common::get_len(common::DOUBLE),
+                    pa);
     EXPECT_EQ(field.type_, common::DOUBLE);
     EXPECT_DOUBLE_EQ(field.value_.dval_, 3.14);
 }
@@ -92,14 +89,6 @@
     delete field;
 }
 
-TEST(FieldTest, MakeLiteralString) {
-    char* text = strdup("test");
-    Field* field = make_literal(text);
-    EXPECT_EQ(field->type_, common::TEXT);
-    field->free_memory();
-    delete field;
-}
-
 TEST(FieldTest, MakeLiteralBool) {
     Field* field = make_literal(true);
     EXPECT_EQ(field->type_, common::BOOLEAN);
diff --git a/cpp/test/common/schema_test.cc b/cpp/test/common/schema_test.cc
index 2d0d793..1c908ab 100644
--- a/cpp/test/common/schema_test.cc
+++ b/cpp/test/common/schema_test.cc
@@ -33,23 +33,21 @@
 
 #if DEBUG_SE
 TEST(MeasurementSchemaTest, JavaCppGap) {
-    MeasurementSchema* measurement = new MeasurementSchema("measurement_name",
-        common::INT64, common::PLAIN, common::UNCOMPRESSED);
+    MeasurementSchema* measurement = new MeasurementSchema(
+        "measurement_name", common::INT64, common::PLAIN, common::UNCOMPRESSED);
     common::ByteStream stream(1024, common::MOD_DEFAULT);
     measurement->serialize_to(stream);
     auto buf_len = stream.total_size();
     auto buf = new char[buf_len];
     common::copy_bs_to_buf(stream, buf, buf_len);
     const ssize_t expected_size = 27;
-    uint8_t expected_buf[expected_size] = {0, 0, 0, 16, 109, 101, 97, 115, 117,
-                                           114, 101,
-                                           109, 101, 110, 116, 95, 110, 97, 109,
-                                           101, 2, 0,
-                                           0, 0, 0, 0, 0};
+    uint8_t expected_buf[expected_size] = {
+        0,   0,  0,   16, 109, 101, 97, 115, 117, 114, 101, 109, 101, 110,
+        116, 95, 110, 97, 109, 101, 2,  0,   0,   0,   0,   0,   0};
     for (int i = 0; i < expected_size; i++) {
         EXPECT_EQ(buf[i], expected_buf[i]);
     }
-    delete [] buf;
+    delete[] buf;
     delete measurement;
 }
 #endif
@@ -82,22 +80,19 @@
     int id_schema_num = 5;
     int measurement_schema_num = 5;
     for (int i = 0; i < id_schema_num; i++) {
-        measurement_schemas.emplace_back(
-            new MeasurementSchema(
-                "__level" + to_string(i), TSDataType::TEXT, TSEncoding::PLAIN,
-                CompressionType::UNCOMPRESSED));
+        measurement_schemas.emplace_back(new MeasurementSchema(
+            "__level" + to_string(i), TSDataType::TEXT, TSEncoding::PLAIN,
+            CompressionType::UNCOMPRESSED));
         column_categories.emplace_back(ColumnCategory::TAG);
     }
     for (int i = 0; i < measurement_schema_num; i++) {
-        measurement_schemas.emplace_back(
-            new MeasurementSchema(
-                "s" + to_string(i), TSDataType::INT64, TSEncoding::PLAIN,
-                CompressionType::UNCOMPRESSED));
+        measurement_schemas.emplace_back(new MeasurementSchema(
+            "s" + to_string(i), TSDataType::INT64, TSEncoding::PLAIN,
+            CompressionType::UNCOMPRESSED));
         column_categories.emplace_back(ColumnCategory::FIELD);
     }
-    auto table_schema = new TableSchema("test_table",
-                                        measurement_schemas,
-                                        column_categories);
+    auto table_schema =
+        new TableSchema("test_table", measurement_schemas, column_categories);
     common::ByteStream stream(1024, common::MOD_DEFAULT);
     table_schema->serialize_to(stream);
     delete table_schema;
@@ -106,35 +101,26 @@
     auto buf = new char[buf_len];
     common::copy_bs_to_buf(stream, buf, buf_len);
     const ssize_t expected_size = 201;
-    uint8_t expected_buf[expected_size] = {10, 0, 0, 0, 8, 95, 95, 108, 101,
-                                           118, 101, 108, 48, 5, 0, 0,
-                                           0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8,
-                                           95, 95, 108, 101,
-                                           118, 101, 108, 49, 5, 0, 0, 0, 0, 0,
-                                           0, 0, 0, 0, 0, 0,
-                                           0, 0, 8, 95, 95, 108, 101, 118, 101,
-                                           108, 50, 5, 0, 0, 0, 0,
-                                           0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 95, 95,
-                                           108, 101, 118, 101,
-                                           108, 51, 5, 0, 0, 0, 0, 0, 0, 0, 0,
-                                           0, 0, 0, 0, 0,
-                                           8, 95, 95, 108, 101, 118, 101, 108,
-                                           52, 5, 0, 0, 0, 0, 0, 0,
-                                           0, 0, 0, 0, 0, 0, 0, 2, 115, 48, 2,
-                                           0, 0, 0, 0, 0,
-                                           0, 0, 0, 0, 1, 0, 0, 0, 2, 115, 49,
-                                           2, 0, 0, 0, 0,
-                                           0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 115,
-                                           50, 2, 0, 0, 0,
-                                           0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 115,
-                                           51, 2, 0, 0,
-                                           0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2,
-                                           115, 52, 2, 0,
-                                           0, 0, 0, 0, 0, 0, 0, 0, 1};
+    uint8_t expected_buf[expected_size] = {
+        10,  0,   0,   0,   8,   95,  95,  108, 101, 118, 101, 108, 48,  5,
+        0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   8,
+        95,  95,  108, 101, 118, 101, 108, 49,  5,   0,   0,   0,   0,   0,
+        0,   0,   0,   0,   0,   0,   0,   0,   8,   95,  95,  108, 101, 118,
+        101, 108, 50,  5,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+        0,   0,   0,   8,   95,  95,  108, 101, 118, 101, 108, 51,  5,   0,
+        0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   8,   95,
+        95,  108, 101, 118, 101, 108, 52,  5,   0,   0,   0,   0,   0,   0,
+        0,   0,   0,   0,   0,   0,   0,   2,   115, 48,  2,   0,   0,   0,
+        0,   0,   0,   0,   0,   0,   1,   0,   0,   0,   2,   115, 49,  2,
+        0,   0,   0,   0,   0,   0,   0,   0,   0,   1,   0,   0,   0,   2,
+        115, 50,  2,   0,   0,   0,   0,   0,   0,   0,   0,   0,   1,   0,
+        0,   0,   2,   115, 51,  2,   0,   0,   0,   0,   0,   0,   0,   0,
+        0,   1,   0,   0,   0,   2,   115, 52,  2,   0,   0,   0,   0,   0,
+        0,   0,   0,   0,   1};
     for (int i = 0; i < expected_size; i++) {
         EXPECT_EQ(buf[i], expected_buf[i]);
     }
-    delete [] buf;
+    delete[] buf;
 }
 #endif
-} // namespace storage
+}  // namespace storage
diff --git a/cpp/test/common/statistic_test.cc b/cpp/test/common/statistic_test.cc
index a41b29a..f5ccc94 100644
--- a/cpp/test/common/statistic_test.cc
+++ b/cpp/test/common/statistic_test.cc
@@ -221,4 +221,4 @@
     EXPECT_EQ(stat.end_time_, 2000);
 }
 
-} // namespace storage
\ No newline at end of file
+}  // namespace storage
\ No newline at end of file
diff --git a/cpp/test/common/tsfile_common_test.cc b/cpp/test/common/tsfile_common_test.cc
index dc9cebf..0b22b6a 100644
--- a/cpp/test/common/tsfile_common_test.cc
+++ b/cpp/test/common/tsfile_common_test.cc
@@ -23,446 +23,453 @@
 
 namespace storage {
 TEST(PageHeaderTest, DefaultConstructor) {
-  PageHeader header;
-  EXPECT_EQ(header.uncompressed_size_, 0);
-  EXPECT_EQ(header.compressed_size_, 0);
-  EXPECT_EQ(header.statistic_, nullptr);
+    PageHeader header;
+    EXPECT_EQ(header.uncompressed_size_, 0);
+    EXPECT_EQ(header.compressed_size_, 0);
+    EXPECT_EQ(header.statistic_, nullptr);
 }
 
 TEST(PageHeaderTest, Reset) {
-  PageHeader header;
-  header.uncompressed_size_ = 100;
-  header.compressed_size_ = 50;
-  header.statistic_ = StatisticFactory::alloc_statistic(common::BOOLEAN);
+    PageHeader header;
+    header.uncompressed_size_ = 100;
+    header.compressed_size_ = 50;
+    header.statistic_ = StatisticFactory::alloc_statistic(common::BOOLEAN);
 
-  header.reset();
-  EXPECT_EQ(header.uncompressed_size_, 0);
-  EXPECT_EQ(header.compressed_size_, 0);
+    header.reset();
+    EXPECT_EQ(header.uncompressed_size_, 0);
+    EXPECT_EQ(header.compressed_size_, 0);
 }
 
 TEST(ChunkHeaderTest, DefaultConstructor) {
-  ChunkHeader header;
-  EXPECT_EQ(header.measurement_name_, "");
-  EXPECT_EQ(header.data_size_, 0);
-  EXPECT_EQ(header.data_type_, common::INVALID_DATATYPE);
-  EXPECT_EQ(header.compression_type_, common::INVALID_COMPRESSION);
-  EXPECT_EQ(header.encoding_type_, common::INVALID_ENCODING);
-  EXPECT_EQ(header.num_of_pages_, 0);
-  EXPECT_EQ(header.serialized_size_, 0);
-  EXPECT_EQ(header.chunk_type_, 0);
+    ChunkHeader header;
+    EXPECT_EQ(header.measurement_name_, "");
+    EXPECT_EQ(header.data_size_, 0);
+    EXPECT_EQ(header.data_type_, common::INVALID_DATATYPE);
+    EXPECT_EQ(header.compression_type_, common::INVALID_COMPRESSION);
+    EXPECT_EQ(header.encoding_type_, common::INVALID_ENCODING);
+    EXPECT_EQ(header.num_of_pages_, 0);
+    EXPECT_EQ(header.serialized_size_, 0);
+    EXPECT_EQ(header.chunk_type_, 0);
 }
 
 TEST(ChunkHeaderTest, Reset) {
-  ChunkHeader header;
-  header.measurement_name_ = "test";
-  header.data_size_ = 100;
-  header.data_type_ = common::TSDataType::INT32;
-  header.compression_type_ = common::CompressionType::SNAPPY;
-  header.encoding_type_ = common::TSEncoding::PLAIN;
-  header.num_of_pages_ = 5;
-  header.serialized_size_ = 50;
-  header.chunk_type_ = 1;
+    ChunkHeader header;
+    header.measurement_name_ = "test";
+    header.data_size_ = 100;
+    header.data_type_ = common::TSDataType::INT32;
+    header.compression_type_ = common::CompressionType::SNAPPY;
+    header.encoding_type_ = common::TSEncoding::PLAIN;
+    header.num_of_pages_ = 5;
+    header.serialized_size_ = 50;
+    header.chunk_type_ = 1;
 
-  header.reset();
-  EXPECT_EQ(header.measurement_name_, "test");
-  EXPECT_EQ(header.data_size_, 0);
-  EXPECT_EQ(header.data_type_, common::INT32);
-  EXPECT_EQ(header.compression_type_, common::SNAPPY);
-  EXPECT_EQ(header.encoding_type_, common::PLAIN);
-  EXPECT_EQ(header.num_of_pages_, 0);
-  EXPECT_EQ(header.serialized_size_, 0);
-  EXPECT_EQ(header.chunk_type_, 0);
+    header.reset();
+    EXPECT_EQ(header.measurement_name_, "test");
+    EXPECT_EQ(header.data_size_, 0);
+    EXPECT_EQ(header.data_type_, common::INT32);
+    EXPECT_EQ(header.compression_type_, common::SNAPPY);
+    EXPECT_EQ(header.encoding_type_, common::PLAIN);
+    EXPECT_EQ(header.num_of_pages_, 0);
+    EXPECT_EQ(header.serialized_size_, 0);
+    EXPECT_EQ(header.chunk_type_, 0);
 }
 
 TEST(ChunkMetaTest, DefaultConstructor) {
-  ChunkMeta meta;
-  EXPECT_EQ(meta.offset_of_chunk_header_, 0);
-  EXPECT_EQ(meta.statistic_, nullptr);
-  EXPECT_EQ(meta.mask_, 0);
+    ChunkMeta meta;
+    EXPECT_EQ(meta.offset_of_chunk_header_, 0);
+    EXPECT_EQ(meta.statistic_, nullptr);
+    EXPECT_EQ(meta.mask_, 0);
 }
 
 TEST(ChunkMetaTest, Init) {
-  ChunkMeta meta;
-  char name[] = "test";
-  common::String measurement_name(name, sizeof(name));
-  Statistic stat;
-  common::TsID ts_id;
-  common::PageArena pa;
+    ChunkMeta meta;
+    char name[] = "test";
+    common::String measurement_name(name, sizeof(name));
+    Statistic stat;
+    common::TsID ts_id;
+    common::PageArena pa;
 
-  int ret = meta.init(measurement_name, common::TSDataType::INT32, 100, &stat, 1, common::PLAIN, common::UNCOMPRESSED, pa);
-  EXPECT_EQ(ret, common::E_OK);
-  EXPECT_EQ(meta.data_type_, common::TSDataType::INT32);
-  EXPECT_EQ(meta.offset_of_chunk_header_, 100);
-  EXPECT_EQ(meta.statistic_, &stat);
-  EXPECT_EQ(meta.mask_, 1);
+    int ret = meta.init(measurement_name, common::TSDataType::INT32, 100, &stat,
+                        1, common::PLAIN, common::UNCOMPRESSED, pa);
+    EXPECT_EQ(ret, common::E_OK);
+    EXPECT_EQ(meta.data_type_, common::TSDataType::INT32);
+    EXPECT_EQ(meta.offset_of_chunk_header_, 100);
+    EXPECT_EQ(meta.statistic_, &stat);
+    EXPECT_EQ(meta.mask_, 1);
 }
 
 TEST(ChunkGroupMetaTest, Constructor) {
-  common::PageArena pa;
-  ChunkGroupMeta group_meta(&pa);
-  EXPECT_EQ(group_meta.chunk_meta_list_.size(), 0);
+    common::PageArena pa;
+    ChunkGroupMeta group_meta(&pa);
+    EXPECT_EQ(group_meta.chunk_meta_list_.size(), 0);
 }
 
 TEST(ChunkGroupMetaTest, Init) {
-  common::PageArena pa;
-  ChunkGroupMeta group_meta(&pa);
-  int ret = group_meta.
-      init(std::make_shared<StringArrayDeviceID>("device_1"));
-  EXPECT_EQ(ret, common::E_OK);
+    common::PageArena pa;
+    ChunkGroupMeta group_meta(&pa);
+    int ret =
+        group_meta.init(std::make_shared<StringArrayDeviceID>("device_1"));
+    EXPECT_EQ(ret, common::E_OK);
 }
 
 TEST(ChunkGroupMetaTest, Push) {
-  common::PageArena pa;
-  ChunkGroupMeta group_meta(&pa);
-  ChunkMeta meta;
-  int ret = group_meta.push(&meta);
-  EXPECT_EQ(ret, common::E_OK);
-  EXPECT_EQ(group_meta.chunk_meta_list_.size(), 1);
+    common::PageArena pa;
+    ChunkGroupMeta group_meta(&pa);
+    ChunkMeta meta;
+    int ret = group_meta.push(&meta);
+    EXPECT_EQ(ret, common::E_OK);
+    EXPECT_EQ(group_meta.chunk_meta_list_.size(), 1);
 }
 
-class TimeseriesIndexTest : public ::testing::Test {
-};
+class TimeseriesIndexTest : public ::testing::Test {};
 
 TEST_F(TimeseriesIndexTest, ConstructorAndDestructor) {
-  TimeseriesIndex tsIndex;
-  EXPECT_EQ(tsIndex.get_data_type(), common::INVALID_DATATYPE);
-  EXPECT_EQ(tsIndex.get_statistic(), nullptr);
-  EXPECT_EQ(tsIndex.get_chunk_meta_list(), nullptr);
+    TimeseriesIndex tsIndex;
+    EXPECT_EQ(tsIndex.get_data_type(), common::INVALID_DATATYPE);
+    EXPECT_EQ(tsIndex.get_statistic(), nullptr);
+    EXPECT_EQ(tsIndex.get_chunk_meta_list(), nullptr);
 }
 
 TEST_F(TimeseriesIndexTest, ResetFunction) {
-  TimeseriesIndex tsIndex;
-  tsIndex.reset();
-  EXPECT_EQ(tsIndex.get_data_type(), common::VECTOR);
-  EXPECT_EQ(tsIndex.get_statistic(), nullptr);
-  EXPECT_EQ(tsIndex.get_chunk_meta_list(), nullptr);
+    TimeseriesIndex tsIndex;
+    tsIndex.reset();
+    EXPECT_EQ(tsIndex.get_data_type(), common::VECTOR);
+    EXPECT_EQ(tsIndex.get_statistic(), nullptr);
+    EXPECT_EQ(tsIndex.get_chunk_meta_list(), nullptr);
 }
 
 TEST_F(TimeseriesIndexTest, SerializeAndDeserialize) {
-  common::PageArena arena;
-  arena.init(1024, common::MOD_TIMESERIES_INDEX_OBJ);
-  TimeseriesIndex tsIndex;
-  common::ByteStream out(1024, common::MOD_TIMESERIES_INDEX_OBJ);
-  char name[] = "test_measurement";
-  common::String measurementName(name, sizeof(name));
-  tsIndex.set_measurement_name(measurementName);
-  tsIndex.set_ts_meta_type(1);
-  tsIndex.set_data_type(common::TSDataType::INT32);
-  tsIndex.init_statistic(common::TSDataType::INT32);
+    common::PageArena arena;
+    arena.init(1024, common::MOD_TIMESERIES_INDEX_OBJ);
+    TimeseriesIndex tsIndex;
+    common::ByteStream out(1024, common::MOD_TIMESERIES_INDEX_OBJ);
+    char name[] = "test_measurement";
+    common::String measurementName(name, sizeof(name));
+    tsIndex.set_measurement_name(measurementName);
+    tsIndex.set_ts_meta_type(1);
+    tsIndex.set_data_type(common::TSDataType::INT32);
+    tsIndex.init_statistic(common::TSDataType::INT32);
 
-  int ret = tsIndex.serialize_to(out);
-  EXPECT_EQ(ret, common::E_OK);
+    int ret = tsIndex.serialize_to(out);
+    EXPECT_EQ(ret, common::E_OK);
 
-  TimeseriesIndex tsIndexDeserialized;
-  ret = tsIndexDeserialized.deserialize_from(out, &arena);
-  EXPECT_EQ(ret, common::E_OK);
-  EXPECT_EQ(tsIndexDeserialized.get_data_type(), common::TSDataType::INT32);
+    TimeseriesIndex tsIndexDeserialized;
+    ret = tsIndexDeserialized.deserialize_from(out, &arena);
+    EXPECT_EQ(ret, common::E_OK);
+    EXPECT_EQ(tsIndexDeserialized.get_data_type(), common::TSDataType::INT32);
 }
 
 class TSMIteratorTest : public ::testing::Test {
-protected:
-  void SetUp() override {
-    arena.init(1024, common::MOD_DEFAULT);
-    chunk_group_meta_list_ =
-        new common::SimpleList<ChunkGroupMeta *>(&arena);
-    void *buf = arena.alloc(sizeof(ChunkGroupMeta));
-    auto chunk_group_meta = new(buf) ChunkGroupMeta(&arena);
-    chunk_group_meta->device_id_ = std::make_shared<StringArrayDeviceID>(
-        "device_1");
+   protected:
+    void SetUp() override {
+        arena.init(1024, common::MOD_DEFAULT);
+        chunk_group_meta_list_ =
+            new common::SimpleList<ChunkGroupMeta *>(&arena);
+        void *buf = arena.alloc(sizeof(ChunkGroupMeta));
+        auto chunk_group_meta = new (buf) ChunkGroupMeta(&arena);
+        chunk_group_meta->device_id_ =
+            std::make_shared<StringArrayDeviceID>("device_1");
 
-    buf = arena.alloc(sizeof(ChunkMeta));
-    auto chunk_meta = new(buf) ChunkMeta();
-    char measure_name[] = "measurement_1";
-    common::String measurement_name(measure_name, sizeof(measure_name));
-    stat_ = StatisticFactory::alloc_statistic(common::TSDataType::INT32);
-    chunk_meta->init(measurement_name, common::TSDataType::INT32, 100,
-                     stat_, 1, common::PLAIN, common::UNCOMPRESSED,
-                     arena);
+        buf = arena.alloc(sizeof(ChunkMeta));
+        auto chunk_meta = new (buf) ChunkMeta();
+        char measure_name[] = "measurement_1";
+        common::String measurement_name(measure_name, sizeof(measure_name));
+        stat_ = StatisticFactory::alloc_statistic(common::TSDataType::INT32);
+        chunk_meta->init(measurement_name, common::TSDataType::INT32, 100,
+                         stat_, 1, common::PLAIN, common::UNCOMPRESSED, arena);
 
-    chunk_group_meta->chunk_meta_list_.push_back(chunk_meta);
-    chunk_group_meta_list_->push_back(chunk_group_meta);
-  }
-
-  void TearDown() override {
-    for (auto iter = chunk_group_meta_list_->begin(); iter != chunk_group_meta_list_->end(); iter++) {
-      iter.get()->device_id_.reset();
+        chunk_group_meta->chunk_meta_list_.push_back(chunk_meta);
+        chunk_group_meta_list_->push_back(chunk_group_meta);
     }
-    delete chunk_group_meta_list_;
-    StatisticFactory::free(stat_);
-  }
 
-  common::PageArena arena;
-  Statistic *stat_;
-  common::SimpleList<ChunkGroupMeta *> *chunk_group_meta_list_;
+    void TearDown() override {
+        for (auto iter = chunk_group_meta_list_->begin();
+             iter != chunk_group_meta_list_->end(); iter++) {
+            iter.get()->device_id_.reset();
+        }
+        delete chunk_group_meta_list_;
+        StatisticFactory::free(stat_);
+    }
+
+    common::PageArena arena;
+    Statistic *stat_;
+    common::SimpleList<ChunkGroupMeta *> *chunk_group_meta_list_;
 };
 
 TEST_F(TSMIteratorTest, InitSuccess) {
-  TSMIterator iter(*chunk_group_meta_list_);
-  ASSERT_EQ(iter.init(), common::E_OK);
+    TSMIterator iter(*chunk_group_meta_list_);
+    ASSERT_EQ(iter.init(), common::E_OK);
 }
 
 TEST_F(TSMIteratorTest, InitEmptyList) {
-  common::PageArena arena;
-  common::SimpleList<ChunkGroupMeta *> empty_list(&arena);
-  TSMIterator iter(empty_list);
-  ASSERT_EQ(iter.init(), common::E_OK);
+    common::PageArena arena;
+    common::SimpleList<ChunkGroupMeta *> empty_list(&arena);
+    TSMIterator iter(empty_list);
+    ASSERT_EQ(iter.init(), common::E_OK);
 }
 
 TEST_F(TSMIteratorTest, HasNext) {
-  TSMIterator iter(*chunk_group_meta_list_);
-  iter.init();
-  ASSERT_TRUE(iter.has_next());
+    TSMIterator iter(*chunk_group_meta_list_);
+    iter.init();
+    ASSERT_TRUE(iter.has_next());
 }
 
 TEST_F(TSMIteratorTest, GetNext) {
-  TSMIterator iter(*chunk_group_meta_list_);
-  iter.init();
+    TSMIterator iter(*chunk_group_meta_list_);
+    iter.init();
 
-  std::shared_ptr<IDeviceID> ret_device_name;
-  common::String ret_measurement_name;
-  TimeseriesIndex ret_ts_index;
+    std::shared_ptr<IDeviceID> ret_device_name;
+    common::String ret_measurement_name;
+    TimeseriesIndex ret_ts_index;
 
-  ASSERT_TRUE(iter.has_next());
-  ASSERT_EQ(
-      iter.get_next(ret_device_name, ret_measurement_name, ret_ts_index),
-      common::E_OK);
-  common::PageArena arena;
-  char device_name[] = "device_1";
-  auto expect_str = std::make_shared<StringArrayDeviceID>(device_name);
+    ASSERT_TRUE(iter.has_next());
+    ASSERT_EQ(
+        iter.get_next(ret_device_name, ret_measurement_name, ret_ts_index),
+        common::E_OK);
+    common::PageArena arena;
+    char device_name[] = "device_1";
+    auto expect_str = std::make_shared<StringArrayDeviceID>(device_name);
 
-  ASSERT_TRUE(ret_device_name->operator==(*expect_str));
+    ASSERT_TRUE(ret_device_name->operator==(*expect_str));
 
-  ASSERT_EQ(
-      iter.get_next(ret_device_name, ret_measurement_name, ret_ts_index),
-      common::E_NO_MORE_DATA);
+    ASSERT_EQ(
+        iter.get_next(ret_device_name, ret_measurement_name, ret_ts_index),
+        common::E_NO_MORE_DATA);
 }
 
 class MetaIndexEntryTest : public ::testing::Test {
-protected:
-  common::PageArena pa_;
-  common::ByteStream *out_;
-  std::shared_ptr<MeasurementMetaIndexEntry> entry_;
+   protected:
+    common::PageArena pa_;
+    common::ByteStream *out_;
+    std::shared_ptr<MeasurementMetaIndexEntry> entry_;
 
-  void SetUp() override {
-    out_ = new common::ByteStream(1024, common::MOD_DEFAULT);
-    entry_ = std::make_shared<MeasurementMetaIndexEntry>();
-  }
+    void SetUp() override {
+        out_ = new common::ByteStream(1024, common::MOD_DEFAULT);
+        entry_ = std::make_shared<MeasurementMetaIndexEntry>();
+    }
 
-  void TearDown() override { delete out_; }
+    void TearDown() override { delete out_; }
 };
 
 TEST_F(MetaIndexEntryTest, InitSuccess) {
-  std::string name = "test_name";
-  int64_t offset = 123456;
-  ASSERT_EQ(entry_->init(name, offset, pa_), common::E_OK);
-  ASSERT_EQ(entry_->offset_, offset);
+    std::string name = "test_name";
+    int64_t offset = 123456;
+    ASSERT_EQ(entry_->init(name, offset, pa_), common::E_OK);
+    ASSERT_EQ(entry_->offset_, offset);
 }
 
 TEST_F(MetaIndexEntryTest, SerializeDeserialize) {
-  std::string name = "test_name";
-  int64_t offset = 123456;
-  entry_->init(name, offset, pa_);
+    std::string name = "test_name";
+    int64_t offset = 123456;
+    entry_->init(name, offset, pa_);
 
-  ASSERT_EQ(entry_->serialize_to(*out_), common::E_OK);
+    ASSERT_EQ(entry_->serialize_to(*out_), common::E_OK);
 
-  MeasurementMetaIndexEntry new_entry;
-  ASSERT_EQ(new_entry.deserialize_from(*out_, &pa_), common::E_OK);
-  ASSERT_EQ(new_entry.offset_, offset);
+    MeasurementMetaIndexEntry new_entry;
+    ASSERT_EQ(new_entry.deserialize_from(*out_, &pa_), common::E_OK);
+    ASSERT_EQ(new_entry.offset_, offset);
 }
 
 class MetaIndexNodeTest : public ::testing::Test {
-protected:
-  common::PageArena pa_;
-  common::ByteStream *out_;
-  MetaIndexNode node_;
+   protected:
+    common::PageArena pa_;
+    common::ByteStream *out_;
+    MetaIndexNode node_;
 
-  MetaIndexNodeTest()
-      : node_(&pa_) {
-  }
+    MetaIndexNodeTest() : node_(&pa_) {}
 
-  void SetUp() override {
-    out_ = new common::ByteStream(1024, common::MOD_DEFAULT);
-  }
+    void SetUp() override {
+        out_ = new common::ByteStream(1024, common::MOD_DEFAULT);
+    }
 
-  void TearDown() override { delete out_; }
+    void TearDown() override { delete out_; }
 };
 
 TEST_F(MetaIndexNodeTest, GetMeasurementFirstChild) {
-  ASSERT_EQ(node_.peek(), nullptr);
+    ASSERT_EQ(node_.peek(), nullptr);
 
-  auto entry = std::make_shared<MeasurementMetaIndexEntry>();
-  entry->init("child_name", 0, pa_);
-  node_.push_entry(entry);
+    auto entry = std::make_shared<MeasurementMetaIndexEntry>();
+    entry->init("child_name", 0, pa_);
+    node_.push_entry(entry);
 
-  ASSERT_EQ(node_.peek(), entry);
+    ASSERT_EQ(node_.peek(), entry);
 }
 
 TEST_F(MetaIndexNodeTest, GetDeviceFirstChild) {
-  ASSERT_EQ(node_.peek(), nullptr);
-  auto device_id = std::make_shared<StringArrayDeviceID>("device_1");
-  auto entry = std::make_shared<DeviceMetaIndexEntry>(device_id, 0);
-  node_.push_entry(entry);
+    ASSERT_EQ(node_.peek(), nullptr);
+    auto device_id = std::make_shared<StringArrayDeviceID>("device_1");
+    auto entry = std::make_shared<DeviceMetaIndexEntry>(device_id, 0);
+    node_.push_entry(entry);
 
-  ASSERT_EQ(node_.peek(), entry);
+    ASSERT_EQ(node_.peek(), entry);
 }
 
 TEST_F(MetaIndexNodeTest, MeasurementSerializeDeserialize) {
-  auto entry = std::make_shared<MeasurementMetaIndexEntry>();
-  entry->init("child_name", 123, pa_);
-  node_.push_entry(entry);
-  node_.end_offset_ = 456;
-  node_.node_type_ = LEAF_MEASUREMENT;
+    auto entry = std::make_shared<MeasurementMetaIndexEntry>();
+    entry->init("child_name", 123, pa_);
+    node_.push_entry(entry);
+    node_.end_offset_ = 456;
+    node_.node_type_ = LEAF_MEASUREMENT;
 
-  ASSERT_EQ(node_.serialize_to(*out_), common::E_OK);
+    ASSERT_EQ(node_.serialize_to(*out_), common::E_OK);
 
-  MetaIndexNode new_node(&pa_);
-  ASSERT_EQ(new_node.deserialize_from(*out_), common::E_OK);
-  ASSERT_EQ(new_node.end_offset_, 456);
-  ASSERT_EQ(new_node.node_type_, LEAF_MEASUREMENT);
+    MetaIndexNode new_node(&pa_);
+    ASSERT_EQ(new_node.deserialize_from(*out_), common::E_OK);
+    ASSERT_EQ(new_node.end_offset_, 456);
+    ASSERT_EQ(new_node.node_type_, LEAF_MEASUREMENT);
 
-  ASSERT_EQ(new_node.peek()->get_name(), entry->get_name());
-  ASSERT_EQ(new_node.peek()->get_offset(), entry->get_offset());
+    ASSERT_EQ(new_node.peek()->get_name(), entry->get_name());
+    ASSERT_EQ(new_node.peek()->get_offset(), entry->get_offset());
 }
 
 TEST_F(MetaIndexNodeTest, DeviceSerializeDeserialize) {
-  auto device_id = std::make_shared<StringArrayDeviceID>("device_1");
-  auto entry = std::make_shared<DeviceMetaIndexEntry>(device_id, 0);
-  node_.push_entry(entry);
-  node_.end_offset_ = 456;
-  node_.node_type_ = LEAF_DEVICE;
+    auto device_id = std::make_shared<StringArrayDeviceID>("device_1");
+    auto entry = std::make_shared<DeviceMetaIndexEntry>(device_id, 0);
+    node_.push_entry(entry);
+    node_.end_offset_ = 456;
+    node_.node_type_ = LEAF_DEVICE;
 
-  ASSERT_EQ(node_.serialize_to(*out_), common::E_OK);
+    ASSERT_EQ(node_.serialize_to(*out_), common::E_OK);
 
-  MetaIndexNode new_node(&pa_);
-  ASSERT_EQ(new_node.device_deserialize_from(*out_), common::E_OK);
-  ASSERT_EQ(new_node.end_offset_, 456);
-  ASSERT_EQ(new_node.node_type_, LEAF_DEVICE);
+    MetaIndexNode new_node(&pa_);
+    ASSERT_EQ(new_node.device_deserialize_from(*out_), common::E_OK);
+    ASSERT_EQ(new_node.end_offset_, 456);
+    ASSERT_EQ(new_node.node_type_, LEAF_DEVICE);
 
-  ASSERT_TRUE(new_node.peek()->get_device_id()->operator==(*entry->get_device_id()));
-  ASSERT_EQ(new_node.peek()->get_offset(), entry->get_offset());
+    ASSERT_TRUE(
+        new_node.peek()->get_device_id()->operator==(*entry->get_device_id()));
+    ASSERT_EQ(new_node.peek()->get_offset(), entry->get_offset());
 }
 
 class MetaIndexNodeSearchTest : public ::testing::Test {
-protected:
-  common::PageArena arena_;
-  MetaIndexNode node_;
-  std::shared_ptr<MeasurementMetaIndexEntry> entry1_ = std::make_shared<
-      MeasurementMetaIndexEntry>();
-  std::shared_ptr<MeasurementMetaIndexEntry> entry2_ = std::make_shared<
-      MeasurementMetaIndexEntry>();
-  std::shared_ptr<MeasurementMetaIndexEntry> entry3_ = std::make_shared<
-      MeasurementMetaIndexEntry>();
+   protected:
+    common::PageArena arena_;
+    MetaIndexNode node_;
+    std::shared_ptr<MeasurementMetaIndexEntry> entry1_ =
+        std::make_shared<MeasurementMetaIndexEntry>();
+    std::shared_ptr<MeasurementMetaIndexEntry> entry2_ =
+        std::make_shared<MeasurementMetaIndexEntry>();
+    std::shared_ptr<MeasurementMetaIndexEntry> entry3_ =
+        std::make_shared<MeasurementMetaIndexEntry>();
 
-  MetaIndexNodeSearchTest()
-      : node_(&arena_) {
-    entry1_->init("apple", 10, arena_);
-    entry2_->init("banana", 20, arena_);
-    entry3_->init("cherry", 30, arena_);
-    node_.children_.push_back(entry1_);
-    node_.children_.push_back(entry2_);
-    node_.children_.push_back(entry3_);
-    node_.end_offset_ = 40;
-    node_.pa_ = &arena_;
-  }
+    MetaIndexNodeSearchTest() : node_(&arena_) {
+        entry1_->init("apple", 10, arena_);
+        entry2_->init("banana", 20, arena_);
+        entry3_->init("cherry", 30, arena_);
+        node_.children_.push_back(entry1_);
+        node_.children_.push_back(entry2_);
+        node_.children_.push_back(entry3_);
+        node_.end_offset_ = 40;
+        node_.pa_ = &arena_;
+    }
 };
 
 TEST_F(MetaIndexNodeSearchTest, ExactSearchFound) {
-  const std::string ret_entry_name("");
-  std::shared_ptr<IMetaIndexEntry> ret_entry = std::make_shared<
-      MeasurementMetaIndexEntry>(ret_entry_name, 0, arena_);
-  int64_t ret_offset = 0;
-  int result = node_.binary_search_children(
-      std::make_shared<StringComparable>("banana"),
-      true, ret_entry, ret_offset);
-  ASSERT_EQ(result, 0);
-  ASSERT_EQ(ret_offset, 30);
+    const std::string ret_entry_name("");
+    std::shared_ptr<IMetaIndexEntry> ret_entry =
+        std::make_shared<MeasurementMetaIndexEntry>(ret_entry_name, 0, arena_);
+    int64_t ret_offset = 0;
+    int result = node_.binary_search_children(
+        std::make_shared<StringComparable>("banana"), true, ret_entry,
+        ret_offset);
+    ASSERT_EQ(result, 0);
+    ASSERT_EQ(ret_offset, 30);
 }
 
 TEST_F(MetaIndexNodeSearchTest, ExactSearchNotFound) {
-  const std::string ret_entry_name("");
-  std::shared_ptr<IMetaIndexEntry> ret_entry = std::make_shared<
-      MeasurementMetaIndexEntry>(ret_entry_name, 0, arena_);
-  int64_t ret_offset = 0;
-  char search_name[] = "grape";
-  int result = node_.binary_search_children(
-      std::make_shared<StringComparable>(search_name),
-      true, ret_entry, ret_offset);
-  ASSERT_EQ(result, common::E_NOT_EXIST);
+    const std::string ret_entry_name("");
+    std::shared_ptr<IMetaIndexEntry> ret_entry =
+        std::make_shared<MeasurementMetaIndexEntry>(ret_entry_name, 0, arena_);
+    int64_t ret_offset = 0;
+    char search_name[] = "grape";
+    int result = node_.binary_search_children(
+        std::make_shared<StringComparable>(search_name), true, ret_entry,
+        ret_offset);
+    ASSERT_EQ(result, common::E_NOT_EXIST);
 }
 
 TEST_F(MetaIndexNodeSearchTest, NonExactSearchFound) {
-  const std::string ret_entry_name("");
-  std::shared_ptr<IMetaIndexEntry> ret_entry = std::make_shared<
-      MeasurementMetaIndexEntry>(ret_entry_name, 0, arena_);
-  int64_t ret_offset = 0;
-  char search_name[] = "blueberry";
-  int result = node_.binary_search_children(
-      std::make_shared<StringComparable>(search_name),
-      false, ret_entry, ret_offset);
-  ASSERT_EQ(result, 0);
-  ASSERT_EQ(ret_offset, 30);
+    const std::string ret_entry_name("");
+    std::shared_ptr<IMetaIndexEntry> ret_entry =
+        std::make_shared<MeasurementMetaIndexEntry>(ret_entry_name, 0, arena_);
+    int64_t ret_offset = 0;
+    char search_name[] = "blueberry";
+    int result = node_.binary_search_children(
+        std::make_shared<StringComparable>(search_name), false, ret_entry,
+        ret_offset);
+    ASSERT_EQ(result, 0);
+    ASSERT_EQ(ret_offset, 30);
 }
 
 TEST_F(MetaIndexNodeSearchTest, NonExactSearchNotFound) {
-  const std::string ret_entry_name("");
-  std::shared_ptr<IMetaIndexEntry> ret_entry = std::make_shared<
-      MeasurementMetaIndexEntry>(ret_entry_name, 0, arena_);
-  int64_t ret_offset = 0;
-  char search_name[] = "aardvark";
-  int result = node_.binary_search_children(
-      std::make_shared<StringComparable>(search_name),
-      false, ret_entry, ret_offset);
-  ASSERT_EQ(result, common::E_NOT_EXIST);
+    const std::string ret_entry_name("");
+    std::shared_ptr<IMetaIndexEntry> ret_entry =
+        std::make_shared<MeasurementMetaIndexEntry>(ret_entry_name, 0, arena_);
+    int64_t ret_offset = 0;
+    char search_name[] = "aardvark";
+    int result = node_.binary_search_children(
+        std::make_shared<StringComparable>(search_name), false, ret_entry,
+        ret_offset);
+    ASSERT_EQ(result, common::E_NOT_EXIST);
 }
 
 class TsFileMetaTest : public ::testing::Test {
-protected:
-  common::PageArena pa_;
-  common::ByteStream *out_;
-  TsFileMeta meta_;
+   protected:
+    common::PageArena pa_;
+    common::ByteStream *out_;
+    TsFileMeta meta_;
 
-  void SetUp() override {
-    out_ = new common::ByteStream(1024, common::MOD_DEFAULT);
-  }
+    void SetUp() override {
+        out_ = new common::ByteStream(1024, common::MOD_DEFAULT);
+    }
 
-  void TearDown() override {
-    delete out_;
-  }
+    void TearDown() override { delete out_; }
 };
 
 TEST_F(TsFileMetaTest, SerializeDeserialize) {
-  std::shared_ptr<IDeviceID> device_id = std::make_shared<StringArrayDeviceID>("device");
-  auto entry = std::make_shared<DeviceMetaIndexEntry>(device_id, 123);
-  auto index_node = std::make_shared<MetaIndexNode>(&pa_);
-  index_node->end_offset_ = 123456789;
-  index_node->children_.emplace_back(entry);
-  index_node->children_.emplace_back(entry);
-  std::string table_name = "table_name";
-  meta_.table_metadata_index_node_map_.insert(std::make_pair(table_name, index_node));
+    std::shared_ptr<IDeviceID> device_id =
+        std::make_shared<StringArrayDeviceID>("device");
+    auto entry = std::make_shared<DeviceMetaIndexEntry>(device_id, 123);
+    auto index_node = std::make_shared<MetaIndexNode>(&pa_);
+    index_node->end_offset_ = 123456789;
+    index_node->children_.emplace_back(entry);
+    index_node->children_.emplace_back(entry);
+    std::string table_name = "table_name";
+    meta_.table_metadata_index_node_map_.insert(
+        std::make_pair(table_name, index_node));
 
-  std::vector<MeasurementSchema* > column_schemas;
-  std::vector<common::ColumnCategory> column_categories;
-  column_categories.emplace_back(common::ColumnCategory::FIELD);
-  column_schemas.emplace_back(new MeasurementSchema());
+    std::vector<MeasurementSchema *> column_schemas;
+    std::vector<common::ColumnCategory> column_categories;
+    column_categories.emplace_back(common::ColumnCategory::FIELD);
+    column_schemas.emplace_back(new MeasurementSchema());
 
-  auto table_schema = std::make_shared<TableSchema>(table_name, column_schemas, column_categories);
+    auto table_schema = std::make_shared<TableSchema>(
+        table_name, column_schemas, column_categories);
 
-  meta_.table_schemas_.insert(std::make_pair(table_name, table_schema));
-  meta_.tsfile_properties_.insert(std::make_pair("key", "value"));
+    meta_.table_schemas_.insert(std::make_pair(table_name, table_schema));
+    meta_.tsfile_properties_.insert(
+        std::make_pair("key", new std::string("value")));
+    meta_.tsfile_properties_.insert(std::make_pair("null_key", nullptr));
 
-  meta_.meta_offset_ = 456;
-  void *buf = pa_.alloc(sizeof(BloomFilter));
-  meta_.bloom_filter_ = new(buf) BloomFilter();
-  meta_.bloom_filter_->init(0.1, 100);
+    meta_.meta_offset_ = 456;
+    void *buf = pa_.alloc(sizeof(BloomFilter));
+    meta_.bloom_filter_ = new (buf) BloomFilter();
+    meta_.bloom_filter_->init(0.1, 100);
 
-  meta_.serialize_to(*out_);
+    meta_.serialize_to(*out_);
 
-  TsFileMeta new_meta(&pa_);
-  new_meta.deserialize_from(*out_);
-  ASSERT_EQ(new_meta.meta_offset_, 456);
-  ASSERT_EQ(new_meta.table_metadata_index_node_map_.size(), 1);
-  ASSERT_EQ(new_meta.table_metadata_index_node_map_[table_name]->children_.size(), 2);
-  ASSERT_EQ(new_meta.table_schemas_.size(), 1);
-  ASSERT_EQ(new_meta.table_schemas_[table_name]->get_column_categories().size(), 1);
+    TsFileMeta new_meta(&pa_);
+    new_meta.deserialize_from(*out_);
+
+    ASSERT_EQ(new_meta.meta_offset_, 456);
+    ASSERT_EQ(new_meta.table_metadata_index_node_map_.size(), 1);
+    ASSERT_EQ(
+        new_meta.table_metadata_index_node_map_[table_name]->children_.size(),
+        2);
+    ASSERT_EQ(new_meta.table_schemas_.size(), 1);
+    ASSERT_EQ(
+        new_meta.table_schemas_[table_name]->get_column_categories().size(), 1);
+    ASSERT_EQ(*new_meta.tsfile_properties_["key"], std::string("value"));
+    ASSERT_EQ(new_meta.tsfile_properties_["null_key"], nullptr);
 }
-} // namespace storage
+}  // namespace storage
diff --git a/cpp/test/compress/gzip_compressor_test.cc b/cpp/test/compress/gzip_compressor_test.cc
index a8929f2..a6fb712 100644
--- a/cpp/test/compress/gzip_compressor_test.cc
+++ b/cpp/test/compress/gzip_compressor_test.cc
@@ -108,7 +108,7 @@
     compressor.compress(uncompressed.data(), uncompressed.size(),
                         compressed_buf, compressed_buf_len);
     compressor.after_compress(compressed_buf);
-    
+
     compressor.reset(true);
     compressor.compress(uncompressed.data(), uncompressed.size(),
                         compressed_buf, compressed_buf_len_new);
diff --git a/cpp/test/cwrapper/c_release_test.cc b/cpp/test/cwrapper/c_release_test.cc
index 0ccb0cd..692f104 100644
--- a/cpp/test/cwrapper/c_release_test.cc
+++ b/cpp/test/cwrapper/c_release_test.cc
@@ -69,7 +69,8 @@
     remove("test_empty_schema.tsfile");
 
     // Invalid schema with memory threshold
-    file = write_file_new("test_empty_schema_memory_threshold.tsfile", &error_code);
+    file = write_file_new("test_empty_schema_memory_threshold.tsfile",
+                          &error_code);
     ASSERT_EQ(RET_OK, error_code);
     // Invalid schema
     writer = tsfile_writer_new_with_memory_threshold(file, &test_schema, 100,
@@ -108,7 +109,6 @@
 
     free_table_schema(table_schema);
     free_table_schema(test_schema);
-
 }
 
 TEST_F(CReleaseTest, TsFileWriterWriteDataAbnormalColumn) {
@@ -219,8 +219,8 @@
 TEST_F(CReleaseTest, TsFileWriterMultiDataType) {
     ERRNO error_code = RET_OK;
     remove("TsFileWriterMultiDataType.tsfile");
-    WriteFile file = write_file_new(
-        "TsFileWriterMultiDataType.tsfile", &error_code);
+    WriteFile file =
+        write_file_new("TsFileWriterMultiDataType.tsfile", &error_code);
     ASSERT_EQ(RET_OK, error_code);
     TableSchema all_type_schema;
     all_type_schema.table_name = strdup("All_Datatype");
@@ -291,8 +291,8 @@
     ASSERT_EQ(RET_OK, tsfile_writer_close(writer));
     free_write_file(&file);
 
-    TsFileReader reader = tsfile_reader_new(
-        "TsFileWriterMultiDataType.tsfile", &error_code);
+    TsFileReader reader =
+        tsfile_reader_new("TsFileWriterMultiDataType.tsfile", &error_code);
     ASSERT_EQ(RET_OK, error_code);
     ResultSet result_set = tsfile_query_table(
         reader, "all_datatype", column_list, 6, 0, 1000, &error_code);
@@ -307,9 +307,9 @@
         ASSERT_EQ("device1", std::string(str_value));
         free(str_value);
         ASSERT_EQ(value, tsfile_result_set_get_value_by_name_int32_t(result_set,
-                                                                     "INT32"));
+                                                                     "int32"));
         ASSERT_EQ(value * 100, tsfile_result_set_get_value_by_name_int64_t(
-                                   result_set, "INT64"));
+                                   result_set, "int64"));
         ASSERT_EQ(value * 100.0, tsfile_result_set_get_value_by_name_float(
                                      result_set, "FLOAT"));
 
diff --git a/cpp/test/cwrapper/cwrapper_test.cc b/cpp/test/cwrapper/cwrapper_test.cc
index 60eeabd..90a93fb 100644
--- a/cpp/test/cwrapper/cwrapper_test.cc
+++ b/cpp/test/cwrapper/cwrapper_test.cc
@@ -116,7 +116,6 @@
     ASSERT_EQ(code, RET_OK);
     ASSERT_EQ(tsfile_writer_close(writer), 0);
 
-
     TsFileReader reader =
         tsfile_reader_new("cwrapper_write_flush_and_read.tsfile", &code);
     ASSERT_EQ(code, 0);
@@ -182,6 +181,5 @@
     free(column_names);
     free(data_types);
     free_write_file(&file);
-
 }
 }  // namespace cwrapper
\ No newline at end of file
diff --git a/cpp/test/encoding/dictionary_codec_test.cc b/cpp/test/encoding/dictionary_codec_test.cc
index f41fc2b..f9d814c 100644
--- a/cpp/test/encoding/dictionary_codec_test.cc
+++ b/cpp/test/encoding/dictionary_codec_test.cc
@@ -18,7 +18,9 @@
  */
 #include <gtest/gtest.h>
 
+#include <random>
 #include <string>
+#include <unordered_set>
 #include <vector>
 
 #include "encoding/dictionary_decoder.h"
@@ -80,21 +82,91 @@
     ASSERT_EQ(decoder.read_string(stream), "apple");
 }
 
-TEST_F(DictionaryTest, DictionaryEncoderAndDecoderLargeQuantities) {
+TEST_F(DictionaryTest, DictionaryEncoderAndDecoderOneItem) {
     DictionaryEncoder encoder;
     common::ByteStream stream(1024, common::MOD_DICENCODE_OBJ);
     encoder.init();
 
-    for (int64_t value = 1; value < 10000; value++) {
-        encoder.encode(std::to_string(value), stream);
+    encoder.encode("apple", stream);
+    encoder.flush(stream);
+
+    DictionaryDecoder decoder;
+    decoder.init();
+
+    ASSERT_TRUE(decoder.has_next(stream));
+    ASSERT_EQ(decoder.read_string(stream), "apple");
+
+    ASSERT_FALSE(decoder.has_next(stream));
+}
+
+TEST_F(DictionaryTest, DictionaryEncoderAndDecoderRepeatedItems) {
+    DictionaryEncoder encoder;
+    common::ByteStream stream(1024, common::MOD_DICENCODE_OBJ);
+    encoder.init();
+
+    for (char c = 'a'; c <= 'z'; c++) {
+        for (int i = 0; i < 100; i++) {
+            encoder.encode(std::string(c, 3), stream);
+        }
     }
     encoder.flush(stream);
 
     DictionaryDecoder decoder;
     decoder.init();
 
-    for (int64_t value = 1; value < 10000; value++) {
-        ASSERT_EQ(decoder.read_string(stream), std::to_string(value));
+    for (char c = 'a'; c <= 'z'; c++) {
+        for (int i = 0; i < 100; i++) {
+            ASSERT_EQ(decoder.read_string(stream), std::string(c, 3));
+        }
+    }
+}
+
+TEST_F(DictionaryTest,
+       DictionaryEncoderAndDecoderLargeQuantitiesWithRandomStrings) {
+    DictionaryEncoder encoder;
+    common::ByteStream stream(1024, common::MOD_DICENCODE_OBJ);
+    encoder.init();
+
+    // Prepare random string generator
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<> length_dist(5, 20);  // String length range
+    std::uniform_int_distribution<> char_dist(33,
+                                              126);  // Printable ASCII range
+
+    // Generate 10000 random strings
+    const int num_strings = 10000;
+    std::vector<std::string> test_strings;
+    std::unordered_set<std::string> string_set;  // For ensuring uniqueness
+
+    while (test_strings.size() < num_strings) {
+        int length = length_dist(gen);
+        std::string str;
+        str.reserve(length);
+
+        for (int i = 0; i < length; ++i) {
+            str.push_back(static_cast<char>(char_dist(gen)));
+        }
+
+        // Ensure string uniqueness
+        if (string_set.insert(str).second) {
+            test_strings.push_back(str);
+        }
+    }
+
+    // Encode all strings
+    for (const auto& str : test_strings) {
+        encoder.encode(str, stream);
+    }
+    encoder.flush(stream);
+
+    DictionaryDecoder decoder;
+    decoder.init();
+
+    // Decode and verify all strings
+    for (const auto& expected_str : test_strings) {
+        std::string decoded_str = decoder.read_string(stream);
+        ASSERT_EQ(decoded_str, expected_str);
     }
 }
 
diff --git a/cpp/test/encoding/gorilla_codec_test.cc b/cpp/test/encoding/gorilla_codec_test.cc
index 6f685b4..47056a6 100644
--- a/cpp/test/encoding/gorilla_codec_test.cc
+++ b/cpp/test/encoding/gorilla_codec_test.cc
@@ -18,6 +18,8 @@
  */
 #include <gtest/gtest.h>
 
+#include <limits>
+
 #include "encoding/gorilla_decoder.h"
 #include "encoding/gorilla_encoder.h"
 
@@ -107,4 +109,102 @@
     }
 }
 
+TEST_F(GorillaCodecTest, FloatEncodingDecodingBoundaryValues) {
+    storage::FloatGorillaEncoder float_encoder;
+    storage::FloatGorillaDecoder float_decoder;
+    common::ByteStream stream(1024, common::MOD_DEFAULT);
+
+    // Test values include important boundary cases and special floating-point
+    // values
+    std::vector<float> test_values = {
+        0.0f,   // Zero
+        -0.0f,  // Negative zero (distinct in IEEE 754)
+        1.0f,   // Positive one
+        -1.0f,  // Negative one
+        std::numeric_limits<float>::min(),     // Smallest positive normalized
+                                               // value
+        std::numeric_limits<float>::max(),     // Largest positive finite value
+        std::numeric_limits<float>::lowest(),  // Smallest (most negative)
+                                               // finite value
+        std::numeric_limits<float>::infinity(),   // Positive infinity
+        -std::numeric_limits<float>::infinity(),  // Negative infinity
+        std::numeric_limits<float>::
+            denorm_min(),  // Smallest positive subnormal (denormalized) value
+        std::numeric_limits<float>::epsilon(),  // Difference between 1 and the
+                                                // next representable value
+        std::nanf("")                           // Not-a-Number (NaN)
+    };
+
+    // Encode all test values into the stream
+    for (auto value : test_values) {
+        EXPECT_EQ(float_encoder.encode(value, stream), common::E_OK);
+    }
+    float_encoder.flush(stream);
+
+    // Decode values from the stream and verify correctness
+    for (auto expected : test_values) {
+        float decoded = float_decoder.decode(stream);
+        if (std::isnan(expected)) {
+            // NaN is unordered; must use isnan() to check
+            EXPECT_TRUE(std::isnan(decoded));
+        } else if (std::isinf(expected)) {
+            // Check if decoded value is infinite and has the same sign
+            EXPECT_TRUE(std::isinf(decoded));
+            EXPECT_EQ(std::signbit(expected), std::signbit(decoded));
+        } else {
+            // For finite floats, allow small precision differences
+            EXPECT_FLOAT_EQ(decoded, expected);
+        }
+    }
+}
+
+TEST_F(GorillaCodecTest, DoubleEncodingDecodingBoundaryValues) {
+    storage::DoubleGorillaEncoder double_encoder;
+    storage::DoubleGorillaDecoder double_decoder;
+    common::ByteStream stream(1024, common::MOD_DEFAULT);
+
+    // Test values include important boundary cases and special floating-point
+    // values for double precision
+    std::vector<double> test_values = {
+        0.0,   // Zero
+        -0.0,  // Negative zero (distinct in IEEE 754)
+        1.0,   // Positive one
+        -1.0,  // Negative one
+        std::numeric_limits<double>::min(),     // Smallest positive normalized
+                                                // value
+        std::numeric_limits<double>::max(),     // Largest positive finite value
+        std::numeric_limits<double>::lowest(),  // Smallest (most negative)
+                                                // finite value
+        std::numeric_limits<double>::infinity(),   // Positive infinity
+        -std::numeric_limits<double>::infinity(),  // Negative infinity
+        std::numeric_limits<double>::
+            denorm_min(),  // Smallest positive subnormal (denormalized) value
+        std::numeric_limits<double>::epsilon(),  // Difference between 1 and the
+                                                 // next representable value
+        std::nan("")                             // Not-a-Number (NaN)
+    };
+
+    // Encode all test values into the stream
+    for (auto value : test_values) {
+        EXPECT_EQ(double_encoder.encode(value, stream), common::E_OK);
+    }
+    double_encoder.flush(stream);
+
+    // Decode values from the stream and verify correctness
+    for (auto expected : test_values) {
+        double decoded = double_decoder.decode(stream);
+        if (std::isnan(expected)) {
+            // NaN is unordered; must use isnan() to check
+            EXPECT_TRUE(std::isnan(decoded));
+        } else if (std::isinf(expected)) {
+            // Check if decoded value is infinite and has the same sign
+            EXPECT_TRUE(std::isinf(decoded));
+            EXPECT_EQ(std::signbit(expected), std::signbit(decoded));
+        } else {
+            // For finite doubles, allow small precision differences
+            EXPECT_DOUBLE_EQ(decoded, expected);
+        }
+    }
+}
+
 }  // namespace storage
diff --git a/cpp/test/encoding/int32_packer_test.cc b/cpp/test/encoding/int32_packer_test.cc
new file mode 100644
index 0000000..1b07238
--- /dev/null
+++ b/cpp/test/encoding/int32_packer_test.cc
@@ -0,0 +1,194 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License a
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include "encoding/int32_packer.h"
+
+#include <gtest/gtest.h>
+
+#include <bitset>
+#include <random>
+
+namespace storage {
+
+TEST(IntPackerTest, SequentialValues) {
+    for (int width = 3; width < 32; ++width) {
+        int32_t arr[8];
+        for (int i = 0; i < 8; ++i) arr[i] = i;
+        Int32Packer packer(width);
+        const int bufSize = NUM_OF_INTS * width / 8;
+        std::vector<unsigned char> buf(bufSize, 0);
+        packer.pack_8values(arr, 0, buf.data());
+        int32_t res[8] = {0};
+        packer.unpack_8values(buf.data(), 0, res);
+        for (int i = 0; i < 8; ++i) {
+            EXPECT_EQ(res[i], arr[i]) << "Width=" << width << " Index=" << i;
+        }
+    }
+}
+
+TEST(IntPackerStressTest, PackUnpackRandomPositiveValues) {
+    const int width = 31;
+    const int count = 100000;
+    const int total_values = count * 8;
+
+    Int32Packer packer(width);
+    std::vector<int32_t> pre_values;
+    std::vector<unsigned char> buffer;
+    pre_values.reserve(total_values);
+    buffer.resize(count * width);
+    int idx = 0;
+    std::srand(12345);  // Optional: deterministic seed
+    for (int i = 0; i < count; ++i) {
+        int32_t vs[8];
+        for (int j = 0; j < 8; ++j) {
+            vs[j] = std::rand() &
+                    0x7FFFFFFF;  // ensure non-negative (Java `nextInt`)
+            pre_values.push_back(vs[j]);
+        }
+
+        unsigned char temp_buf[32] = {0};
+        packer.pack_8values(vs, 0, temp_buf);
+        std::memcpy(buffer.data() + idx, temp_buf, width);
+        idx += width;
+    }
+
+    std::vector<int32_t> res(total_values);
+    packer.unpack_all_values(buffer.data(), static_cast<int>(buffer.size()),
+                             res.data());
+    std::string diff_msg;
+    for (int i = 0; i < total_values; ++i) {
+        if (res[i] != pre_values[i]) {
+            diff_msg += "\nMismatch at index " + std::to_string(i) +
+                        ": expected=" + std::to_string(pre_values[i]) +
+                        ", actual=" + std::to_string(res[i]);
+        }
+    }
+    ASSERT_TRUE(diff_msg.empty()) << diff_msg;
+}
+
+// Test all zeros for various widths
+TEST(Int32PackerTest, AllZeroValues) {
+    for (int width = 1; width <= 31; ++width) {
+        int32_t arr[NUM_OF_INTS] = {0};
+        Int32Packer packer(width);
+        const int bufSize = NUM_OF_INTS * width / 8;
+        std::vector<unsigned char> buf(bufSize, 0);
+        packer.pack_8values(arr, 0, buf.data());
+        int32_t res[NUM_OF_INTS] = {
+            1};  // initialize non-zero to catch failures
+        packer.unpack_8values(buf.data(), 0, res);
+        for (int i = 0; i < NUM_OF_INTS; ++i) {
+            EXPECT_EQ(res[i], 0) << "Width=" << width << " Index=" << i;
+        }
+    }
+}
+
+// Test boundary width = 1 with alternating bits
+TEST(Int32PackerTest, BoundaryWidthOneAlternating) {
+    const int width = 1;
+    int32_t arr[NUM_OF_INTS] = {0, 1, 0, 1, 0, 1, 0, 1};
+    Int32Packer packer(width);
+    const int bufSize = NUM_OF_INTS * width / 8;
+    std::vector<unsigned char> buf(bufSize, 0);
+    packer.pack_8values(arr, 0, buf.data());
+    int32_t res[NUM_OF_INTS] = {0};
+    packer.unpack_8values(buf.data(), 0, res);
+    for (int i = 0; i < NUM_OF_INTS; ++i) {
+        EXPECT_EQ(res[i], arr[i]) << "Index=" << i;
+    }
+}
+
+// Test maximum width (32 bits)
+TEST(Int32PackerTest, MaxWidth32Random) {
+    const int width = 32;
+    const int times = 100000;
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<int32_t> dist(INT32_MIN, INT32_MAX);
+    for (int t = 0; t < times; ++t) {
+        int32_t arr[NUM_OF_INTS];
+        for (int i = 0; i < NUM_OF_INTS; ++i) {
+            arr[i] = dist(gen);
+        }
+        Int32Packer packer(width);
+        const int bufSize = NUM_OF_INTS * width / 8;
+        std::vector<unsigned char> buf(bufSize, 0);
+        packer.pack_8values(arr, 0, buf.data());
+        int32_t res[NUM_OF_INTS] = {0};
+        packer.unpack_8values(buf.data(), 0, res);
+        for (int i = 0; i < NUM_OF_INTS; ++i) {
+            EXPECT_EQ(res[i], arr[i]) << "Index=" << i;
+        }
+    }
+}
+
+TEST(Int32PackerTest, AllNegative32Random) {
+    const int width = 32;
+    const int times = 100000;
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<int32_t> dist(INT32_MIN, -1);
+    for (int t = 0; t < times; ++t) {
+        int32_t arr[NUM_OF_INTS];
+        for (int i = 0; i < NUM_OF_INTS; ++i) {
+            arr[i] = dist(gen);
+        }
+        Int32Packer packer(width);
+        const int bufSize = NUM_OF_INTS * width / 8;
+        std::vector<unsigned char> buf(bufSize, 0);
+        packer.pack_8values(arr, 0, buf.data());
+        int32_t res[NUM_OF_INTS] = {0};
+        packer.unpack_8values(buf.data(), 0, res);
+        for (int i = 0; i < NUM_OF_INTS; ++i) {
+            EXPECT_EQ(res[i], arr[i]) << "Index=" << i;
+        }
+    }
+}
+
+// Test unpack_all_values for multiple blocks
+TEST(Int32PackerTest, UnpackAllValuesMultipleBlocks) {
+    const int width = 16;
+    // pack 10 blocks sequentially
+    const int blocks = 10;
+    Int32Packer packer(width);
+    std::vector<int32_t> orig(blocks * NUM_OF_INTS);
+    std::vector<unsigned char> buf(blocks * width);
+
+    // Fill orig with pattern: block * 16 + index
+    // Example: block 0 = [0,1,...,7], block 1 = [16,17,...,23], etc.
+    for (int b = 0; b < blocks; ++b) {
+        for (int i = 0; i < NUM_OF_INTS; ++i) {
+            orig[b * NUM_OF_INTS + i] = (b << 4) | i;
+        }
+        packer.pack_8values(
+            orig.data() + b * NUM_OF_INTS, 0,
+            buf.data() + b * width);  // pack each block into buf
+    }
+
+    std::vector<int32_t> res(blocks * NUM_OF_INTS, 0);
+    // Unpack all blocks at once
+    packer.unpack_all_values(buf.data(), static_cast<int>(buf.size()),
+                             res.data());
+
+    // Verify each unpacked value matches the original sequence
+    for (size_t i = 0; i < orig.size(); ++i) {
+        EXPECT_EQ(res[i], orig[i]) << "Index=" << i;
+    }
+}
+
+}  // namespace storage
\ No newline at end of file
diff --git a/cpp/test/encoding/int32_rle_codec_test.cc b/cpp/test/encoding/int32_rle_codec_test.cc
new file mode 100644
index 0000000..c580a0e
--- /dev/null
+++ b/cpp/test/encoding/int32_rle_codec_test.cc
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License a
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include <gtest/gtest.h>
+
+#include <limits>
+#include <random>
+#include <vector>
+
+#include "encoding/int32_rle_decoder.h"
+#include "encoding/int32_rle_encoder.h"
+
+namespace storage {
+
+class Int32RleEncoderTest : public ::testing::Test {
+   protected:
+    void SetUp() override {
+        std::srand(static_cast<unsigned int>(std::time(nullptr)));
+    }
+
+    void encode_and_decode(const std::vector<int32_t>& input) {
+        // Encode
+        common::ByteStream stream(1024, common::MOD_ENCODER_OBJ);
+        Int32RleEncoder encoder;
+        for (int32_t v : input) {
+            encoder.encode(v, stream);
+        }
+        encoder.flush(stream);
+
+        // Decode
+        Int32RleDecoder decoder;
+        std::vector<int32_t> decoded;
+        while (decoder.has_next(stream)) {
+            int32_t v;
+            decoder.read_int32(v, stream);
+            decoded.push_back(v);
+        }
+
+        ASSERT_EQ(input.size(), decoded.size());
+        for (size_t i = 0; i < input.size(); ++i) {
+            EXPECT_EQ(input[i], decoded[i]);
+        }
+    }
+};
+
+// All-zero input
+TEST_F(Int32RleEncoderTest, EncodeAllZeros) {
+    std::vector<int32_t> data(64, 0);
+    encode_and_decode(data);
+}
+
+// All INT32_MAX
+TEST_F(Int32RleEncoderTest, EncodeAllMaxValues) {
+    std::vector<int32_t> data(64, std::numeric_limits<int32_t>::max());
+    encode_and_decode(data);
+}
+
+// All INT32_MIN
+TEST_F(Int32RleEncoderTest, EncodeAllMinValues) {
+    std::vector<int32_t> data(64, std::numeric_limits<int32_t>::min());
+    encode_and_decode(data);
+}
+
+// Repeating the same value
+TEST_F(Int32RleEncoderTest, EncodeRepeatingValue) {
+    std::vector<int32_t> data(128, 12345678);
+    encode_and_decode(data);
+}
+
+// Incremental values (0 to 127)
+TEST_F(Int32RleEncoderTest, EncodeIncrementalValues) {
+    std::vector<int32_t> data;
+    for (int i = 0; i < 128; ++i) {
+        data.push_back(i);
+    }
+    encode_and_decode(data);
+}
+
+// Alternating signs: 0, -1, 2, -3, ...
+TEST_F(Int32RleEncoderTest, EncodeAlternatingSigns) {
+    std::vector<int32_t> data;
+    for (int i = 0; i < 100; ++i) {
+        data.push_back(i % 2 == 0 ? i : -i);
+    }
+    encode_and_decode(data);
+}
+
+// Random positive numbers
+TEST_F(Int32RleEncoderTest, EncodeRandomPositiveValues) {
+    std::vector<int32_t> data;
+    for (int i = 0; i < 200; ++i) {
+        data.push_back(std::rand() & 0x7FFFFFFF);
+    }
+    encode_and_decode(data);
+}
+
+// Random negative numbers
+TEST_F(Int32RleEncoderTest, EncodeRandomNegativeValues) {
+    std::vector<int32_t> data;
+    for (int i = 0; i < 200; ++i) {
+        data.push_back(-(std::rand() & 0x7FFFFFFF));
+    }
+    encode_and_decode(data);
+}
+
+// INT32 boundary values
+TEST_F(Int32RleEncoderTest, EncodeBoundaryValues) {
+    std::vector<int32_t> data = {std::numeric_limits<int32_t>::min(), -1, 0, 1,
+                                 std::numeric_limits<int32_t>::max()};
+    encode_and_decode(data);
+}
+
+// Flush after every 8 values (simulate frequent flush)
+TEST_F(Int32RleEncoderTest, EncodeMultipleFlushes) {
+    common::ByteStream stream(1024, common::MOD_ENCODER_OBJ);
+    Int32RleEncoder encoder;
+    std::vector<int32_t> data;
+
+    for (int round = 0; round < 3; ++round) {
+        for (int i = 0; i < 8; ++i) {
+            int val = i + round * 10;
+            encoder.encode(val, stream);
+            data.push_back(val);
+        }
+        encoder.flush(stream);
+    }
+
+    // Decode
+    Int32RleDecoder decoder;
+    std::vector<int32_t> decoded;
+    while (decoder.has_next(stream)) {
+        int32_t v;
+        decoder.read_int32(v, stream);
+        decoded.push_back(v);
+    }
+
+    ASSERT_EQ(data.size(), decoded.size());
+    for (size_t i = 0; i < data.size(); ++i) {
+        EXPECT_EQ(data[i], decoded[i]);
+    }
+}
+
+// Flush with no values encoded
+TEST_F(Int32RleEncoderTest, EncodeFlushWithoutData) {
+    Int32RleEncoder encoder;
+    common::ByteStream stream(1024, common::MOD_ENCODER_OBJ);
+    encoder.flush(stream);  // No values encoded
+
+    EXPECT_EQ(stream.total_size(), 0u);
+}
+
+}  // namespace storage
diff --git a/cpp/test/encoding/int64_packer_test.cc b/cpp/test/encoding/int64_packer_test.cc
new file mode 100644
index 0000000..846f9a0
--- /dev/null
+++ b/cpp/test/encoding/int64_packer_test.cc
@@ -0,0 +1,250 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License a
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include "encoding/int64_packer.h"
+
+#include <gtest/gtest.h>
+
+#include <bitset>
+#include <cmath>
+#include <random>
+
+namespace storage {
+
+TEST(Int64PackerTest, SequentialValues) {
+    for (int width = 4; width < 63; ++width) {
+        int64_t arr[8];
+        for (int i = 0; i < 8; ++i) arr[i] = i;
+        Int64Packer packer(width);
+        const int bufSize = NUM_OF_INTS * width / 8;
+        std::vector<unsigned char> buf(bufSize, 0);
+        packer.pack_8values(arr, 0, buf.data());
+        int64_t res[8] = {0};
+        packer.unpack_8values(buf.data(), 0, res);
+        for (int i = 0; i < 8; ++i) {
+            EXPECT_EQ(res[i], arr[i]) << "Width=" << width << " Index=" << i;
+        }
+    }
+}
+
+TEST(Int64PackerTest, PackUnpackSingleBatchRandomPositiveLongs) {
+    const int byte_count = 63;  // total bytes for 8 packed uint64_t values
+    const int count = 1;
+    const int total_values = count * 8;
+
+    Int64Packer packer(byte_count);
+    std::vector<uint64_t> pre_values;
+    std::vector<unsigned char> buffer(count * byte_count);
+    pre_values.reserve(total_values);
+
+    int idx = 0;
+    std::srand(12345);  // optional fixed seed
+
+    for (int i = 0; i < count; ++i) {
+        int64_t vs[8];
+        for (int j = 0; j < 8; ++j) {
+            // Emulate Java's nextLong() then Math.abs(): remove sign bit
+            uint64_t v = ((uint64_t)std::rand() << 32) | std::rand();
+            vs[j] = v & 0x7FFFFFFFFFFFFFFFULL;  // clear sign bit
+            pre_values.push_back(vs[j]);
+        }
+
+        unsigned char temp_buf[64] = {0};  // temp output buffer
+        packer.pack_8values(vs, 0, temp_buf);
+
+        std::memcpy(buffer.data() + idx, temp_buf, byte_count);
+        idx += byte_count;
+    }
+
+    std::vector<int64_t> result(total_values);
+    packer.unpack_all_values(buffer.data(), static_cast<int>(buffer.size()),
+                             result.data());
+
+    for (int i = 0; i < total_values; ++i) {
+        ASSERT_EQ(result[i], pre_values[i]) << "Mismatch at index " << i;
+    }
+}
+
+// Utility to compute the maximum bit width needed to store all values
+int get_long_max_bit_width(const std::vector<uint64_t>& values) {
+    uint64_t max_val = 0;
+    for (uint64_t v : values) {
+        max_val = std::max(max_val, v);
+    }
+    if (max_val == 0) return 1;
+    return static_cast<int>(std::floor(std::log2(max_val)) + 1);
+}
+
+TEST(Int64PackerTest, PackAllManualBitWidth) {
+    std::vector<uint64_t> bp_list;
+    int bp_count = 15;
+    uint64_t bp_start = 11;
+    for (int i = 0; i < bp_count; ++i) {
+        bp_list.push_back(bp_start);
+        bp_start *= 3;
+    }
+    bp_list.push_back(0);  // Add one zero
+    ASSERT_EQ(bp_list.size(), 16u);
+
+    // Calculate max bit width
+    int bp_bit_width = get_long_max_bit_width(bp_list);
+
+    Int64Packer packer(bp_bit_width);
+    std::ostringstream oss(std::ios::binary);
+
+    // Split into two blocks of 8
+    int64_t value1[8];
+    int64_t value2[8];
+    for (int i = 0; i < 8; ++i) {
+        value1[i] = bp_list[i];
+        value2[i] = bp_list[i + 8];
+    }
+
+    unsigned char bytes1[64] = {0};
+    unsigned char bytes2[64] = {0};
+    packer.pack_8values(value1, 0, bytes1);
+    packer.pack_8values(value2, 0, bytes2);
+    oss.write(reinterpret_cast<const char*>(bytes1), bp_bit_width);
+    oss.write(reinterpret_cast<const char*>(bytes2), bp_bit_width);
+
+    std::string packed_data = oss.str();
+    ASSERT_EQ(static_cast<int>(packed_data.size()), 2 * bp_bit_width);
+
+    // Decode
+    int64_t read_array[16] = {0};
+    packer.unpack_all_values(
+        reinterpret_cast<const unsigned char*>(packed_data.data()),
+        2 * bp_bit_width, read_array);
+
+    // Compare
+    for (int i = 0; i < 16; ++i) {
+        ASSERT_EQ(read_array[i], bp_list[i]) << "Mismatch at index " << i;
+    }
+}
+
+// Test all zeros for various widths
+TEST(Int64PackerTest, AllZeroValues) {
+    for (int width = 1; width <= 31; ++width) {
+        int64_t arr[NUM_OF_INTS] = {0};
+        Int64Packer packer(width);
+        const int bufSize = NUM_OF_INTS * width / 8;
+        std::vector<unsigned char> buf(bufSize, 0);
+        packer.pack_8values(arr, 0, buf.data());
+        int64_t res[NUM_OF_INTS] = {
+            1};  // initialize non-zero to catch failures
+        packer.unpack_8values(buf.data(), 0, res);
+        for (int i = 0; i < NUM_OF_INTS; ++i) {
+            EXPECT_EQ(res[i], 0) << "Width=" << width << " Index=" << i;
+        }
+    }
+}
+
+// Test boundary width = 1 with alternating bits
+TEST(Int64PackerTest, BoundaryWidthOneAlternating) {
+    const int width = 1;
+    int64_t arr[NUM_OF_INTS] = {0, 1, 0, 1, 0, 1, 0, 1};
+    Int64Packer packer(width);
+    const int bufSize = NUM_OF_INTS * width / 8;
+    std::vector<unsigned char> buf(bufSize, 0);
+    packer.pack_8values(arr, 0, buf.data());
+    int64_t res[NUM_OF_INTS] = {0};
+    packer.unpack_8values(buf.data(), 0, res);
+    for (int i = 0; i < NUM_OF_INTS; ++i) {
+        EXPECT_EQ(res[i], arr[i]) << "Index=" << i;
+    }
+}
+
+// Test maximum width (64 bits)
+TEST(Int64PackerTest, MaxWidth64Random) {
+    const int width = 64;
+    const int times = 100000;
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<int64_t> dist(INT64_MIN, INT64_MAX);
+    for (int t = 0; t < times; ++t) {
+        int64_t arr[NUM_OF_INTS];
+        for (int i = 0; i < NUM_OF_INTS; ++i) {
+            arr[i] = dist(gen);
+        }
+        Int64Packer packer(width);
+        const int bufSize = NUM_OF_INTS * width / 8;
+        std::vector<unsigned char> buf(bufSize, 0);
+        packer.pack_8values(arr, 0, buf.data());
+        int64_t res[NUM_OF_INTS] = {0};
+        packer.unpack_8values(buf.data(), 0, res);
+        for (int i = 0; i < NUM_OF_INTS; ++i) {
+            EXPECT_EQ(res[i], arr[i]) << "Index=" << i;
+        }
+    }
+}
+
+TEST(Int64PackerTest, AllNegative64Random) {
+    const int width = 64;
+    const int times = 100000;
+    std::random_device rd;
+    std::mt19937 gen(rd());
+    std::uniform_int_distribution<int64_t> dist(INT64_MIN, -1);
+    for (int t = 0; t < times; ++t) {
+        int64_t arr[NUM_OF_INTS];
+        for (int i = 0; i < NUM_OF_INTS; ++i) {
+            arr[i] = dist(gen);
+        }
+        Int64Packer packer(width);
+        const int bufSize = NUM_OF_INTS * width / 8;
+        std::vector<unsigned char> buf(bufSize, 0);
+        packer.pack_8values(arr, 0, buf.data());
+        int64_t res[NUM_OF_INTS] = {0};
+        packer.unpack_8values(buf.data(), 0, res);
+        for (int i = 0; i < NUM_OF_INTS; ++i) {
+            EXPECT_EQ(res[i], arr[i]) << "Index=" << i;
+        }
+    }
+}
+
+// Test unpack_all_values for multiple blocks
+TEST(Int64PackerTest, UnpackAllValuesMultipleBlocks) {
+    const int width = 16;
+    // pack 10 blocks sequentially
+    const int blocks = 10;
+    Int64Packer packer(width);
+    std::vector<int64_t> orig(blocks * NUM_OF_INTS);
+    std::vector<unsigned char> buf(blocks * width);
+
+    // Fill orig with pattern: block * 16 + index
+    // Example: block 0 = [0,1,...,7], block 1 = [16,17,...,23], etc.
+    for (int b = 0; b < blocks; ++b) {
+        for (int i = 0; i < NUM_OF_INTS; ++i) {
+            orig[b * NUM_OF_INTS + i] = (b << 4) | i;
+        }
+        packer.pack_8values(
+            orig.data() + b * NUM_OF_INTS, 0,
+            buf.data() + b * width);  // pack each block into buf
+    }
+
+    std::vector<int64_t> res(blocks * NUM_OF_INTS, 0);
+    // Unpack all blocks at once
+    packer.unpack_all_values(buf.data(), static_cast<int>(buf.size()),
+                             res.data());
+
+    // Verify each unpacked value matches the original sequence
+    for (size_t i = 0; i < orig.size(); ++i) {
+        EXPECT_EQ(res[i], orig[i]) << "Index=" << i;
+    }
+}
+
+}  // namespace storage
\ No newline at end of file
diff --git a/cpp/test/encoding/int64_rle_codec_test.cc b/cpp/test/encoding/int64_rle_codec_test.cc
new file mode 100644
index 0000000..7583faa
--- /dev/null
+++ b/cpp/test/encoding/int64_rle_codec_test.cc
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License a
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include <gtest/gtest.h>
+
+#include <random>
+
+#include "encoding/int64_rle_decoder.h"
+#include "encoding/int64_rle_encoder.h"
+
+namespace storage {
+
+class Int64RleCodecTest : public ::testing::Test {
+   protected:
+    void SetUp() override {
+        std::srand(static_cast<unsigned int>(std::time(nullptr)));
+    }
+
+    void encode_and_decode_check(const std::vector<int64_t>& input) {
+        common::ByteStream stream(4096, common::MOD_ENCODER_OBJ);
+
+        // Encode
+        Int64RleEncoder encoder;
+        for (int64_t v : input) {
+            encoder.encode(v, stream);
+        }
+        encoder.flush(stream);
+
+        // Decode
+        Int64RleDecoder decoder;
+        for (size_t i = 0; i < input.size(); ++i) {
+            ASSERT_TRUE(decoder.has_next(stream));
+            int64_t value;
+            decoder.read_int64(value, stream);
+            EXPECT_EQ(value, input[i]) << "Mismatch at index " << i;
+        }
+
+        EXPECT_FALSE(decoder.has_next(stream));
+    }
+};
+
+// All-zero input
+TEST_F(Int64RleCodecTest, EncodeAllZeros) {
+    std::vector<int64_t> data(64, 0);
+    encode_and_decode_check(data);
+}
+
+// All INT64_MAX values
+TEST_F(Int64RleCodecTest, EncodeAllMaxValues) {
+    std::vector<int64_t> data(64, std::numeric_limits<int64_t>::max());
+    encode_and_decode_check(data);
+}
+
+// All INT64_MIN values
+TEST_F(Int64RleCodecTest, EncodeAllMinValues) {
+    std::vector<int64_t> data(64, std::numeric_limits<int64_t>::min());
+    encode_and_decode_check(data);
+}
+
+// Repeating a single constant value
+TEST_F(Int64RleCodecTest, EncodeRepeatingSingleValue) {
+    std::vector<int64_t> data(100, 123456789012345);
+    encode_and_decode_check(data);
+}
+
+// Strictly increasing sequence
+TEST_F(Int64RleCodecTest, EncodeIncrementalValues) {
+    std::vector<int64_t> data;
+    for (int64_t i = 0; i < 128; ++i) {
+        data.push_back(i);
+    }
+    encode_and_decode_check(data);
+}
+
+// Alternating positive and negative values
+TEST_F(Int64RleCodecTest, EncodeAlternatingSigns) {
+    std::vector<int64_t> data;
+    for (int64_t i = 0; i < 100; ++i) {
+        data.push_back(i % 2 == 0 ? i : -i);
+    }
+    encode_and_decode_check(data);
+}
+
+// Random positive int64 values
+TEST_F(Int64RleCodecTest, EncodeRandomPositiveValues) {
+    std::vector<int64_t> data;
+    for (int i = 0; i < 256; ++i) {
+        data.push_back(static_cast<int64_t>(std::rand()) << 31 | std::rand());
+    }
+    encode_and_decode_check(data);
+}
+
+// Random negative int64 values
+TEST_F(Int64RleCodecTest, EncodeRandomNegativeValues) {
+    std::vector<int64_t> data;
+    for (int i = 0; i < 256; ++i) {
+        int64_t value = static_cast<int64_t>(std::rand()) << 31 | std::rand();
+        data.push_back(-value);
+    }
+    encode_and_decode_check(data);
+}
+
+// Mixed boundary values
+TEST_F(Int64RleCodecTest, EncodeBoundaryValues) {
+    std::vector<int64_t> data = {std::numeric_limits<int64_t>::min(), -1, 0, 1,
+                                 std::numeric_limits<int64_t>::max()};
+    encode_and_decode_check(data);
+}
+
+// Flush without any encoded values
+TEST_F(Int64RleCodecTest, EncodeFlushWithoutData) {
+    Int64RleEncoder encoder;
+    common::ByteStream stream(1024, common::MOD_ENCODER_OBJ);
+    encoder.flush(stream);
+    EXPECT_EQ(stream.total_size(), 0u);
+}
+
+}  // namespace storage
diff --git a/cpp/test/encoding/sprintz_codec_test.cc b/cpp/test/encoding/sprintz_codec_test.cc
new file mode 100644
index 0000000..ec43ff4
--- /dev/null
+++ b/cpp/test/encoding/sprintz_codec_test.cc
@@ -0,0 +1,387 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License a
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include <gtest/gtest.h>
+
+#include <cfloat>
+#include <climits>
+#include <cmath>
+
+#include "common/allocator/byte_stream.h"
+#include "encoding/double_sprintz_decoder.h"
+#include "encoding/double_sprintz_encoder.h"
+#include "encoding/float_sprintz_decoder.h"
+#include "encoding/float_sprintz_encoder.h"
+#include "encoding/int32_sprintz_decoder.h"
+#include "encoding/int32_sprintz_encoder.h"
+#include "encoding/int64_sprintz_decoder.h"
+#include "encoding/int64_sprintz_encoder.h"
+
+using namespace storage;
+using namespace common;
+
+namespace {
+
+constexpr int float_max_point_value = 10000;
+constexpr int64_t double_max_point_value = 1000000000000000LL;
+
+std::vector<int32_t> int_list;
+std::vector<int64_t> long_list;
+std::vector<float> float_list;
+std::vector<double> double_list;
+std::vector<int> iterations = {1, 3, 8, 16, 1000, 10000};
+
+void PrepareHybridData() {
+    int hybrid_count = 11;
+    int hybrid_num = 50;
+    int hybrid_start = 2000;
+    for (int i = 0; i < hybrid_num; i++) {
+        for (int j = 0; j < hybrid_count; j++) {
+            float_list.push_back(static_cast<float>(hybrid_start) /
+                                 float_max_point_value);
+            double_list.push_back(static_cast<double>(hybrid_start) /
+                                  double_max_point_value);
+            int_list.push_back(hybrid_start);
+            long_list.push_back(hybrid_start);
+        }
+        for (int j = 0; j < hybrid_count; j++) {
+            float_list.push_back(static_cast<float>(hybrid_start) /
+                                 float_max_point_value);
+            double_list.push_back(static_cast<double>(hybrid_start) /
+                                  double_max_point_value);
+            int_list.push_back(hybrid_start);
+            long_list.push_back(hybrid_start);
+            hybrid_start += 3;
+        }
+        hybrid_count += 2;
+    }
+}
+
+class SprintzCodecTest : public ::testing::Test {
+   protected:
+    void SetUp() override {
+        if (int_list.empty()) PrepareHybridData();
+    }
+};
+
+TEST_F(SprintzCodecTest, Int32SingleValue) {
+    Int32SprintzEncoder encoder;
+    ByteStream stream(128, MOD_ENCODER_OBJ);
+    ASSERT_EQ(encoder.encode(777, stream), E_OK);
+    ASSERT_EQ(encoder.flush(stream), E_OK);
+
+    Int32SprintzDecoder decoder;
+    int32_t val;
+    ASSERT_TRUE(decoder.has_remaining(stream));
+    ASSERT_EQ(decoder.read_int32(val, stream), E_OK);
+    ASSERT_EQ(val, 777);
+    ASSERT_FALSE(decoder.has_remaining(stream));
+}
+
+TEST_F(SprintzCodecTest, Int64SingleValue) {
+    Int64SprintzEncoder encoder;
+    ByteStream stream(128, MOD_ENCODER_OBJ);
+    int64_t value = static_cast<int64_t>(INT32_MAX) + 10;
+    ASSERT_EQ(encoder.encode(value, stream), E_OK);
+    ASSERT_EQ(encoder.flush(stream), E_OK);
+
+    Int64SprintzDecoder decoder;
+    int64_t actual;
+    ASSERT_TRUE(decoder.has_remaining(stream));
+    ASSERT_EQ(decoder.read_int64(actual, stream), E_OK);
+    ASSERT_EQ(actual, value);
+    ASSERT_FALSE(decoder.has_remaining(stream));
+}
+
+TEST_F(SprintzCodecTest, Int32EdgeValues) {
+    std::vector<int32_t> values = {INT32_MIN, -1, 0, 1, INT32_MAX};
+
+    Int32SprintzEncoder encoder;
+    ByteStream stream(128, MOD_ENCODER_OBJ);
+    for (auto v : values) {
+        encoder.encode(v, stream);
+    }
+    encoder.flush(stream);
+
+    Int32SprintzDecoder decoder;
+    for (auto expected : values) {
+        int32_t actual;
+        ASSERT_TRUE(decoder.has_remaining(stream));
+        ASSERT_EQ(decoder.read_int32(actual, stream), E_OK);
+        ASSERT_EQ(actual, expected);
+    }
+    ASSERT_FALSE(decoder.has_remaining(stream));
+}
+
+TEST_F(SprintzCodecTest, Int64EdgeValues) {
+    std::vector<int64_t> values = {INT64_MIN, -1, 0, 1, INT64_MAX};
+
+    Int64SprintzEncoder encoder;
+    ByteStream stream(128, MOD_ENCODER_OBJ);
+    for (auto v : values) {
+        encoder.encode(v, stream);
+    }
+    encoder.flush(stream);
+
+    Int64SprintzDecoder decoder;
+    for (auto expected : values) {
+        int64_t actual;
+        ASSERT_TRUE(decoder.has_remaining(stream));
+        ASSERT_EQ(decoder.read_int64(actual, stream), E_OK);
+        ASSERT_EQ(actual, expected);
+    }
+    ASSERT_FALSE(decoder.has_remaining(stream));
+}
+
+TEST_F(SprintzCodecTest, Int32ZeroNumber) {
+    Int32SprintzEncoder encoder;
+    ByteStream stream(128, MOD_ENCODER_OBJ);
+    for (int i = 0; i < 3; ++i) encoder.encode(0, stream);
+    encoder.flush(stream);
+    for (int i = 0; i < 3; ++i) encoder.encode(0, stream);
+    encoder.flush(stream);
+
+    for (int round = 0; round < 2; ++round) {
+        Int32SprintzDecoder decoder;
+        for (int i = 0; i < 3; ++i) {
+            ASSERT_TRUE(decoder.has_remaining(stream));
+            int32_t actual;
+            ASSERT_EQ(decoder.read_int32(actual, stream), E_OK);
+            ASSERT_EQ(actual, 0);
+        }
+    }
+}
+
+TEST_F(SprintzCodecTest, Int64ZeroNumber) {
+    Int64SprintzEncoder encoder;
+    ByteStream stream(128, MOD_ENCODER_OBJ);
+    for (int i = 0; i < 3; ++i) encoder.encode(static_cast<int64_t>(0), stream);
+    encoder.flush(stream);
+    for (int i = 0; i < 3; ++i) encoder.encode(static_cast<int64_t>(0), stream);
+    encoder.flush(stream);
+
+    for (int round = 0; round < 2; ++round) {
+        Int64SprintzDecoder decoder;
+        for (int i = 0; i < 3; ++i) {
+            ASSERT_TRUE(decoder.has_remaining(stream));
+            int64_t actual;
+            ASSERT_EQ(decoder.read_int64(actual, stream), E_OK);
+            ASSERT_EQ(actual, 0);
+        }
+    }
+}
+
+TEST_F(SprintzCodecTest, Int32Increasing) {
+    for (int num : iterations) {
+        Int32SprintzEncoder encoder;
+        ByteStream stream(1024, MOD_ENCODER_OBJ);
+        for (int i = 0; i < num; ++i) encoder.encode(7 + 2 * i, stream);
+        encoder.flush(stream);
+
+        Int32SprintzDecoder decoder;
+        for (int i = 0; i < num; ++i) {
+            ASSERT_TRUE(decoder.has_remaining(stream));
+            int32_t actual;
+            ASSERT_EQ(decoder.read_int32(actual, stream), E_OK);
+            ASSERT_EQ(actual, 7 + 2 * i);
+        }
+        ASSERT_FALSE(decoder.has_remaining(stream));
+    }
+}
+
+TEST_F(SprintzCodecTest, Int64Increasing) {
+    for (int num : iterations) {
+        Int64SprintzEncoder encoder;
+        ByteStream stream(1024, MOD_ENCODER_OBJ);
+        for (int i = 0; i < num; ++i)
+            encoder.encode(static_cast<int64_t>(7) + 2 * i, stream);
+        encoder.flush(stream);
+
+        Int64SprintzDecoder decoder;
+        for (int i = 0; i < num; ++i) {
+            ASSERT_TRUE(decoder.has_remaining(stream));
+            int64_t actual;
+            ASSERT_EQ(decoder.read_int64(actual, stream), E_OK);
+            ASSERT_EQ(actual, 7 + 2 * i);
+        }
+        ASSERT_FALSE(decoder.has_remaining(stream));
+    }
+}
+
+TEST_F(SprintzCodecTest, FloatSingleValue) {
+    FloatSprintzEncoder encoder;
+    ByteStream stream(128, MOD_ENCODER_OBJ);
+    ASSERT_EQ(encoder.encode(FLT_MAX, stream), E_OK);
+    ASSERT_EQ(encoder.flush(stream), E_OK);
+
+    FloatSprintzDecoder decoder;
+    float actual;
+    ASSERT_TRUE(decoder.has_remaining(stream));
+    ASSERT_EQ(decoder.read_float(actual, stream), E_OK);
+    ASSERT_EQ(actual, FLT_MAX);
+    ASSERT_FALSE(decoder.has_remaining(stream));
+}
+
+TEST_F(SprintzCodecTest, DoubleSingleValue) {
+    DoubleSprintzEncoder encoder;
+    ByteStream stream(128, MOD_ENCODER_OBJ);
+    ASSERT_EQ(encoder.encode(DBL_MAX, stream), E_OK);
+    ASSERT_EQ(encoder.flush(stream), E_OK);
+
+    DoubleSprintzDecoder decoder;
+    double actual;
+    ASSERT_TRUE(decoder.has_remaining(stream));
+    ASSERT_EQ(decoder.read_double(actual, stream), E_OK);
+    ASSERT_EQ(actual, DBL_MAX);
+    ASSERT_FALSE(decoder.has_remaining(stream));
+}
+
+TEST_F(SprintzCodecTest, FloatZeroNumber) {
+    FloatSprintzEncoder encoder;
+    ByteStream stream(128, MOD_ENCODER_OBJ);
+    float value = 0.0f;
+    for (int i = 0; i < 3; ++i) encoder.encode(value, stream);
+    encoder.flush(stream);
+    for (int i = 0; i < 3; ++i) encoder.encode(value, stream);
+    encoder.flush(stream);
+
+    for (int round = 0; round < 2; ++round) {
+        FloatSprintzDecoder decoder;
+        for (int i = 0; i < 3; ++i) {
+            ASSERT_TRUE(decoder.has_remaining(stream));
+            float actual;
+            ASSERT_EQ(decoder.read_float(actual, stream), E_OK);
+            ASSERT_EQ(actual, value);
+        }
+    }
+}
+
+TEST_F(SprintzCodecTest, DoubleZeroNumber) {
+    DoubleSprintzEncoder encoder;
+    ByteStream stream(128, MOD_ENCODER_OBJ);
+    double value = 0.0;
+    for (int i = 0; i < 3; ++i) encoder.encode(value, stream);
+    encoder.flush(stream);
+    for (int i = 0; i < 3; ++i) encoder.encode(value, stream);
+    encoder.flush(stream);
+
+    for (int round = 0; round < 2; ++round) {
+        DoubleSprintzDecoder decoder;
+        for (int i = 0; i < 3; ++i) {
+            ASSERT_TRUE(decoder.has_remaining(stream));
+            double actual;
+            ASSERT_EQ(decoder.read_double(actual, stream), E_OK);
+            ASSERT_EQ(actual, value);
+        }
+    }
+}
+
+TEST_F(SprintzCodecTest, FloatIncreasing) {
+    for (int num : iterations) {
+        FloatSprintzEncoder encoder;
+        ByteStream stream(1024, MOD_ENCODER_OBJ);
+        float value = 7.101f;
+        for (int i = 0; i < num; ++i) encoder.encode(value + 2.0f * i, stream);
+        encoder.flush(stream);
+
+        FloatSprintzDecoder decoder;
+        for (int i = 0; i < num; ++i) {
+            ASSERT_TRUE(decoder.has_remaining(stream));
+            float actual;
+            ASSERT_EQ(decoder.read_float(actual, stream), E_OK);
+            ASSERT_FLOAT_EQ(actual, value + 2 * i);
+        }
+        ASSERT_FALSE(decoder.has_remaining(stream));
+    }
+}
+
+TEST_F(SprintzCodecTest, DoubleIncreasing) {
+    for (int num : iterations) {
+        DoubleSprintzEncoder encoder;
+        ByteStream stream(1024, MOD_ENCODER_OBJ);
+        float f = 7.101f;
+        double value = static_cast<double>(f);
+        for (int i = 0; i < num; ++i) {
+            double input_val = value + 2.0 * i;
+            encoder.encode(input_val, stream);
+        }
+
+        encoder.flush(stream);
+
+        DoubleSprintzDecoder decoder;
+        for (int i = 0; i < num; ++i) {
+            ASSERT_TRUE(decoder.has_remaining(stream));
+            double actual;
+            ASSERT_EQ(decoder.read_double(actual, stream), E_OK);
+            ASSERT_DOUBLE_EQ(actual, value + 2 * i);
+        }
+        ASSERT_FALSE(decoder.has_remaining(stream));
+    }
+}
+
+TEST_F(SprintzCodecTest, FloatExtremeValues) {
+    std::vector<float> test_vals = {FLT_MIN, FLT_MAX, -FLT_MIN,      -FLT_MAX,
+                                    -0.0f,   0.0f,    std::nanf("1")};
+
+    FloatSprintzEncoder encoder;
+    ByteStream stream(256, MOD_ENCODER_OBJ);
+    for (auto v : test_vals) {
+        encoder.encode(v, stream);
+    }
+    encoder.flush(stream);
+
+    FloatSprintzDecoder decoder;
+    for (auto expected : test_vals) {
+        float actual;
+        ASSERT_TRUE(decoder.has_remaining(stream));
+        ASSERT_EQ(decoder.read_float(actual, stream), E_OK);
+        if (std::isnan(expected)) {
+            ASSERT_TRUE(std::isnan(actual));
+        } else {
+            ASSERT_FLOAT_EQ(actual, expected);
+        }
+    }
+    ASSERT_FALSE(decoder.has_remaining(stream));
+}
+
+TEST_F(SprintzCodecTest, DoubleExtremeValues) {
+    std::vector<double> test_vals = {DBL_MIN, DBL_MAX, -DBL_MIN,    -DBL_MAX,
+                                     -0.0,    0.0,     std::nan("")};
+
+    DoubleSprintzEncoder encoder;
+    ByteStream stream(256, MOD_ENCODER_OBJ);
+    for (auto v : test_vals) {
+        encoder.encode(v, stream);
+    }
+    encoder.flush(stream);
+
+    DoubleSprintzDecoder decoder;
+    for (auto expected : test_vals) {
+        double actual;
+        ASSERT_TRUE(decoder.has_remaining(stream));
+        ASSERT_EQ(decoder.read_double(actual, stream), E_OK);
+        if (std::isnan(expected)) {
+            ASSERT_TRUE(std::isnan(actual));
+        } else {
+            ASSERT_DOUBLE_EQ(actual, expected);
+        }
+    }
+    ASSERT_FALSE(decoder.has_remaining(stream));
+}
+
+}  // namespace
diff --git a/cpp/test/encoding/ts2diff_codec_test.cc b/cpp/test/encoding/ts2diff_codec_test.cc
index 8698808..f0f5b1e 100644
--- a/cpp/test/encoding/ts2diff_codec_test.cc
+++ b/cpp/test/encoding/ts2diff_codec_test.cc
@@ -190,12 +190,17 @@
     std::vector<int32_t> decoded(row_num);
     auto start_decode = std::chrono::steady_clock::now();
     for (int i = 0; i < row_num; i++) {
-        EXPECT_EQ(decoder_int_->read_int32(decoded[i], out_stream), common::E_OK);
+        EXPECT_EQ(decoder_int_->read_int32(decoded[i], out_stream),
+                  common::E_OK);
     }
     auto end_decode = std::chrono::steady_clock::now();
 
-    auto encode_duration = std::chrono::duration_cast<std::chrono::milliseconds>(end_encode - start_encode);
-    auto decode_duration = std::chrono::duration_cast<std::chrono::milliseconds>(end_decode - start_decode);
+    auto encode_duration =
+        std::chrono::duration_cast<std::chrono::milliseconds>(end_encode -
+                                                              start_encode);
+    auto decode_duration =
+        std::chrono::duration_cast<std::chrono::milliseconds>(end_decode -
+                                                              start_decode);
 
     std::cout << "Encode time: " << encode_duration.count() << "ms\n";
     std::cout << "Decode time: " << decode_duration.count() << "ms\n";
diff --git a/cpp/test/file/write_file_test.cc b/cpp/test/file/write_file_test.cc
index e7ee54e..0ea9c0a 100644
--- a/cpp/test/file/write_file_test.cc
+++ b/cpp/test/file/write_file_test.cc
@@ -44,9 +44,9 @@
     remove(file_name.c_str());
 }
 
-#if defined(__GNUC__) && !defined(__clang__)  
-    #pragma GCC push_options
-    #pragma GCC optimize ("O0")  
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC push_options
+#pragma GCC optimize("O0")
 #endif
 TEST_F(WriteFileTest, WriteToFile) {
     WriteFile write_file;
@@ -73,7 +73,7 @@
     remove(file_name.c_str());
 }
 #if defined(__GNUC__) && !defined(__clang__)
-    #pragma GCC pop_options
+#pragma GCC pop_options
 #endif
 
 TEST_F(WriteFileTest, SyncFile) {
diff --git a/cpp/test/parser/path_name_test.cc b/cpp/test/parser/path_name_test.cc
index 3a8a758..2e91122 100644
--- a/cpp/test/parser/path_name_test.cc
+++ b/cpp/test/parser/path_name_test.cc
@@ -127,49 +127,40 @@
 }
 
 TEST_F(PathNameTest, TestIllegalPathName) {
-    EXPECT_THROW ({
-      Path("root.sg`", true);
-    } , std::runtime_error);
+    EXPECT_THROW({ Path("root.sg`", true); }, std::runtime_error);
 
-    EXPECT_THROW ({
-      Path("root.sg\na", true);
-    }, std::runtime_error);
+    EXPECT_THROW({ Path("root.sg\na", true); }, std::runtime_error);
 
-    EXPECT_THROW ({
-      Path("root.select`", true);
-    } , std::runtime_error);
+    EXPECT_THROW({ Path("root.select`", true); }, std::runtime_error);
 
-    EXPECT_THROW ({
-      // pure digits
-      Path("root.111", true);
-    } , std::runtime_error);
+    EXPECT_THROW(
+        {
+            // pure digits
+            Path("root.111", true);
+        },
+        std::runtime_error);
 
-    EXPECT_THROW ({
-      // single ` in quoted node
-      Path("root.`a``", true);
-    } , std::runtime_error);
+    EXPECT_THROW(
+        {
+            // single ` in quoted node
+            Path("root.`a``", true);
+        },
+        std::runtime_error);
 
-    EXPECT_THROW ({
-      // single ` in quoted node
-      Path("root.``a`", true);
-    } , std::runtime_error);
+    EXPECT_THROW(
+        {
+            // single ` in quoted node
+            Path("root.``a`", true);
+        },
+        std::runtime_error);
 
-    EXPECT_THROW ({
-      Path("root.a*%", true);
-    } , std::runtime_error);
+    EXPECT_THROW({ Path("root.a*%", true); }, std::runtime_error);
 
-    EXPECT_THROW ({
-      Path("root.a*b", true);
-    } , std::runtime_error);
+    EXPECT_THROW({ Path("root.a*b", true); }, std::runtime_error);
 
-    EXPECT_THROW ({
-      Path("root.0e38", true);
-    } , std::runtime_error);
+    EXPECT_THROW({ Path("root.0e38", true); }, std::runtime_error);
 
-    EXPECT_THROW ({
-      Path("root.0000", true);
-    }, std::runtime_error);
+    EXPECT_THROW({ Path("root.0000", true); }, std::runtime_error);
 }
- 
 
 }  // namespace storage
diff --git a/cpp/test/reader/bloom_filter_test.cc b/cpp/test/reader/bloom_filter_test.cc
new file mode 100644
index 0000000..7e754df
--- /dev/null
+++ b/cpp/test/reader/bloom_filter_test.cc
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License a
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include "reader/bloom_filter.h"
+
+#include <gtest/gtest.h>
+
+#include <unordered_set>
+using namespace storage;
+TEST(BloomfilterTest, BloomFilter) {
+    BloomFilter filter;
+
+    std::unordered_set<uint8_t> my_set = {0, 0, 0,   0,  0, 0, 0, 0, 2,
+                                          0, 2, 128, 32, 0, 0, 1, 0, 4,
+                                          0, 0, 0,   16, 0, 0, 0, 0, 32};
+
+    filter.init(0.1, 10);
+    common::PageArena arena;
+    common::String device1 = common::String("test_table.test1.test", arena);
+    common::String sensor = common::String();
+    filter.add_path_entry(device1, sensor);
+    common::String sensor1 = common::String("value", arena);
+    filter.add_path_entry(device1, sensor1);
+    common::ByteStream out(1024, common::MOD_DEFAULT);
+    uint8_t *filter_data_bytes = nullptr;
+    int32_t filter_data_bytes_len = 0;
+    filter.get_bit_set()->to_bytes(filter_data_bytes, filter_data_bytes_len);
+    std::unordered_set<uint8_t> data;
+    for (int i = 0; i < filter_data_bytes_len; i++) {
+        data.insert(static_cast<int>(filter_data_bytes[i]));
+        ASSERT_TRUE(my_set.find(static_cast<int>(filter_data_bytes[i])) !=
+                    my_set.end());
+    }
+    filter.serialize_to(out);
+
+    BloomFilter filter2;
+    filter2.deserialize_from(out);
+    // ASSERT_EQ(filter, filter2);
+    uint8_t *filter_data_bytes2 = nullptr;
+    int32_t filter_data_bytes_len2 = 0;
+    filter2.get_bit_set()->to_bytes(filter_data_bytes2, filter_data_bytes_len2);
+    ASSERT_EQ(filter_data_bytes_len, filter_data_bytes_len2);
+    for (int i = 0; i < filter_data_bytes_len2; i++) {
+        ASSERT_TRUE(data.find(static_cast<int>(filter_data_bytes2[i])) !=
+                    data.end());
+        ASSERT_TRUE(my_set.find(static_cast<int>(filter_data_bytes2[i])) !=
+                    my_set.end());
+    }
+    common::mem_free(filter_data_bytes);
+    common::mem_free(filter_data_bytes2);
+}
diff --git a/cpp/test/reader/table_view/tsfile_reader_table_test.cc b/cpp/test/reader/table_view/tsfile_reader_table_test.cc
index d2f4a8a..72c29aa 100644
--- a/cpp/test/reader/table_view/tsfile_reader_table_test.cc
+++ b/cpp/test/reader/table_view/tsfile_reader_table_test.cc
@@ -47,9 +47,7 @@
         mode_t mode = 0666;
         write_file_.create(file_name_, flags, mode);
     }
-    void TearDown() override {
-        remove(file_name_.c_str());
-    }
+    void TearDown() override { remove(file_name_.c_str()); }
     std::string file_name_;
     WriteFile write_file_;
 
@@ -80,17 +78,17 @@
         int measurement_schema_num = 5;
         for (int i = 0; i < id_schema_num; i++) {
             measurement_schemas.emplace_back(new MeasurementSchema(
-                "id" + to_string(i), TSDataType::STRING, TSEncoding::PLAIN,
+                "id" + std::to_string(i), TSDataType::STRING, TSEncoding::PLAIN,
                 CompressionType::UNCOMPRESSED));
             column_categories.emplace_back(ColumnCategory::TAG);
         }
         for (int i = 0; i < measurement_schema_num; i++) {
             measurement_schemas.emplace_back(new MeasurementSchema(
-                "s" + to_string(i), TSDataType::INT64, TSEncoding::PLAIN,
+                "s" + std::to_string(i), TSDataType::INT64, TSEncoding::PLAIN,
                 CompressionType::UNCOMPRESSED));
             column_categories.emplace_back(ColumnCategory::FIELD);
         }
-        return new TableSchema("testTable" + to_string(table_num),
+        return new TableSchema("testTable" + std::to_string(table_num),
                                measurement_schemas, column_categories);
     }
 
@@ -133,12 +131,15 @@
         return tablet;
     }
 
-    void test_table_model_query(uint32_t points_per_device = 10, uint32_t device_num = 1, int64_t end_time = 1000000000000) {
+    void test_table_model_query(uint32_t points_per_device = 10,
+                                uint32_t device_num = 1,
+                                int64_t end_time = 1000000000000) {
         auto table_schema = gen_table_schema(0);
         auto tsfile_table_writer_ =
             std::make_shared<TsFileTableWriter>(&write_file_, table_schema);
 
-        auto tablet = gen_tablet(table_schema, 0, device_num, points_per_device);
+        auto tablet =
+            gen_tablet(table_schema, 0, device_num, points_per_device);
         ASSERT_EQ(tsfile_table_writer_->write_table(tablet), common::E_OK);
         ASSERT_EQ(tsfile_table_writer_->flush(), common::E_OK);
         ASSERT_EQ(tsfile_table_writer_->close(), common::E_OK);
@@ -148,8 +149,8 @@
 
         ResultSet* tmp_result_set = nullptr;
         ret = reader.query(table_schema->get_table_name(),
-                           table_schema->get_measurement_names(), 0,
-                           end_time, tmp_result_set);
+                           table_schema->get_measurement_names(), 0, end_time,
+                           tmp_result_set);
         auto* table_result_set = (TableResultSet*)tmp_result_set;
         char* literal = new char[std::strlen("device_id") + 1];
         std::strcpy(literal, "device_id");
@@ -183,12 +184,15 @@
                     0);
             }
             for (int i = 7; i <= 11; i++) {
-                ASSERT_EQ(table_result_set->get_value<int64_t>(i),  (row_num / points_per_device) % device_num);
+                ASSERT_EQ(table_result_set->get_value<int64_t>(i),
+                          (row_num / points_per_device) % device_num);
             }
-            ASSERT_EQ(table_result_set->get_value<int64_t>(1), row_num % points_per_device);
+            ASSERT_EQ(table_result_set->get_value<int64_t>(1),
+                      row_num % points_per_device);
             row_num++;
         }
-        ASSERT_EQ(row_num, std::min<int64_t>(points_per_device * device_num, end_time + 1));
+        ASSERT_EQ(row_num, std::min<int64_t>(points_per_device * device_num,
+                                             end_time + 1));
         reader.destroy_query_data_set(table_result_set);
         delete[] literal;
         ASSERT_EQ(reader.close(), common::E_OK);
@@ -254,12 +258,12 @@
     ASSERT_EQ(result_set_metadata->get_column_type(1), INT64);
     for (int i = 2; i <= 6; i++) {
         ASSERT_EQ(result_set_metadata->get_column_name(i),
-                  "id" + to_string(i - 2));
+                  "id" + std::to_string(i - 2));
         ASSERT_EQ(result_set_metadata->get_column_type(i), TSDataType::STRING);
     }
     for (int i = 7; i <= 11; i++) {
         ASSERT_EQ(result_set_metadata->get_column_name(i),
-                  "s" + to_string(i - 7));
+                  "s" + std::to_string(i - 7));
         ASSERT_EQ(result_set_metadata->get_column_type(i), TSDataType::INT64);
     }
     reader.destroy_query_data_set(table_result_set);
@@ -296,7 +300,7 @@
     ASSERT_EQ(table_schemas.size(), 10);
     for (int i = 0; i < 10; i++) {
         ASSERT_EQ(table_schemas[i]->get_table_name(),
-                  "testtable" + to_string(i));
+                  "testtable" + std::to_string(i));
         for (int j = 0; j < 5; j++) {
             ASSERT_EQ(table_schemas[i]->get_data_types()[j],
                       TSDataType::STRING);
@@ -341,7 +345,6 @@
         auto tablet = gen_tablet(tmp_table_schema, cur_row, 1, tablet_size);
         ASSERT_EQ(tsfile_table_writer_->write_table(tablet), common::E_OK);
         cur_row += tablet_size;
-        std::cout << "finish writing " << cur_row << " rows" << std::endl;
     }
     ASSERT_EQ(tsfile_table_writer_->flush(), common::E_OK);
     ASSERT_EQ(tsfile_table_writer_->close(), common::E_OK);
@@ -350,10 +353,8 @@
     int ret = reader.open(file_name_);
     ASSERT_EQ(ret, common::E_OK);
     storage::ResultSet* tmp_result_set = nullptr;
-    ret = reader.query("testtable0",
-                       tmp_table_schema->get_measurement_names(), 0, 1000000000000,
-                       tmp_result_set);
-    std::cout << "begin to dump data from tsfile ---" << std::endl;
+    ret = reader.query("testtable0", tmp_table_schema->get_measurement_names(),
+                       0, 1000000000000, tmp_result_set);
     auto* table_result_set = (storage::TableResultSet*)tmp_result_set;
     bool has_next = false;
     char* literal = new char[std::strlen("device_id") + 1];
@@ -365,10 +366,14 @@
             for (int j = 0; j < column_schemas.size(); j++) {
                 switch (column_schemas[j]->data_type_) {
                     case TSDataType::INT64:
-                        ASSERT_EQ(table_result_set->get_value<int64_t>(j + 2), i);
+                        ASSERT_EQ(table_result_set->get_value<int64_t>(j + 2),
+                                  i);
                         break;
                     case TSDataType::STRING:
-                        ASSERT_EQ(table_result_set->get_value<common::String*>(j + 2)->compare(literal_str), 0);
+                        ASSERT_EQ(
+                            table_result_set->get_value<common::String*>(j + 2)
+                                ->compare(literal_str),
+                            0);
                         break;
                     default:
                         break;
@@ -380,3 +385,39 @@
     delete[] literal;
     delete tmp_table_schema;
 }
+
+TEST_F(TsFileTableReaderTest, ReadNonExistColumn) {
+    std::vector<MeasurementSchema*> measurement_schemas;
+    std::vector<ColumnCategory> column_categories;
+    measurement_schemas.resize(2);
+    measurement_schemas[0] = new MeasurementSchema("device", STRING);
+    measurement_schemas[1] = new MeasurementSchema("value", DOUBLE);
+    column_categories.emplace_back(ColumnCategory::TAG);
+    column_categories.emplace_back(ColumnCategory::FIELD);
+    TableSchema* table_schema =
+        new TableSchema("test_table", measurement_schemas, column_categories);
+    auto tsfile_table_writer =
+        std::make_shared<TsFileTableWriter>(&write_file_, table_schema);
+    Tablet tablet = Tablet(table_schema->get_measurement_names(),
+                           table_schema->get_data_types());
+    tablet.set_table_name("test_table");
+    for (int i = 0; i < 100; i++) {
+        tablet.add_timestamp(i, static_cast<int64_t>(i));
+        tablet.add_value(i, "device",
+                         std::string("device" + std::to_string(i)).c_str());
+        tablet.add_value(i, "value", i * 1.1);
+    }
+    tsfile_table_writer->write_table(tablet);
+    tsfile_table_writer->flush();
+    tsfile_table_writer->close();
+
+    TsFileReader reader = TsFileReader();
+    reader.open(write_file_.get_file_path());
+    ResultSet* ret = nullptr;
+    std::vector<std::string> column_names = {"non-exist-column"};
+    int ret_value = reader.query("test_table", column_names, 0, 50, ret);
+    ASSERT_NE(common::E_OK, ret_value);
+    ASSERT_EQ(ret, nullptr);
+    reader.close();
+    delete table_schema;
+}
diff --git a/cpp/test/reader/tsfile_reader_test.cc b/cpp/test/reader/tsfile_reader_test.cc
index a52eec7..c2dfe2c 100644
--- a/cpp/test/reader/tsfile_reader_test.cc
+++ b/cpp/test/reader/tsfile_reader_test.cc
@@ -35,7 +35,7 @@
 using namespace common;
 
 class TsFileReaderTest : public ::testing::Test {
-protected:
+   protected:
     void SetUp() override {
         tsfile_writer_ = new TsFileWriter();
         libtsfile_init();
@@ -58,7 +58,7 @@
     std::string file_name_;
     TsFileWriter* tsfile_writer_ = nullptr;
 
-public:
+   public:
     static std::string generate_random_string(int length) {
         std::mt19937 gen(static_cast<unsigned int>(
             std::chrono::system_clock::now().time_since_epoch().count()));
@@ -141,7 +141,8 @@
                        tmp_qds);
     auto* qds = (QDSWithoutTimeGenerator*)tmp_qds;
 
-    std::shared_ptr<ResultSetMetadata> result_set_metadata = qds->get_metadata();
+    std::shared_ptr<ResultSetMetadata> result_set_metadata =
+        qds->get_metadata();
     ASSERT_EQ(result_set_metadata->get_column_type(1), INT64);
     ASSERT_EQ(result_set_metadata->get_column_name(1), "time");
     ASSERT_EQ(result_set_metadata->get_column_type(2), data_type);
@@ -160,13 +161,13 @@
 
     for (size_t i = 0; i < 1024; i++) {
         tsfile_writer_->register_timeseries(
-            "device.ln" + to_string(i),
+            "device.ln" + std::to_string(i),
             storage::MeasurementSchema(measurement_name, data_type, encoding,
                                        compression_type));
     }
 
     for (size_t i = 0; i < 1024; i++) {
-        TsRecord record(1622505600000, "device.ln" + to_string(i));
+        TsRecord record(1622505600000, "device.ln" + std::to_string(i));
         record.add_point(measurement_name, (int32_t)0);
         ASSERT_EQ(tsfile_writer_->write_record(record), E_OK);
     }
@@ -180,12 +181,12 @@
     ASSERT_EQ(devices.size(), 1024);
     std::vector<std::shared_ptr<IDeviceID> > devices_name_expected;
     for (size_t i = 0; i < 1024; i++) {
-        devices_name_expected.push_back(
-            std::make_shared<StringArrayDeviceID>(
-                "device.ln" + std::to_string(i)));
+        devices_name_expected.push_back(std::make_shared<StringArrayDeviceID>(
+            "device.ln" + std::to_string(i)));
     }
     std::sort(devices_name_expected.begin(), devices_name_expected.end(),
-              [](const std::shared_ptr<IDeviceID>& left_str, const std::shared_ptr<IDeviceID>& right_str) {
+              [](const std::shared_ptr<IDeviceID>& left_str,
+                 const std::shared_ptr<IDeviceID>& right_str) {
                   return left_str->operator<(*right_str);
               });
 
diff --git a/cpp/test/utils/db_utils_test.cc b/cpp/test/utils/db_utils_test.cc
index ae0e837..fb455ee 100644
--- a/cpp/test/utils/db_utils_test.cc
+++ b/cpp/test/utils/db_utils_test.cc
@@ -117,56 +117,6 @@
     EXPECT_FALSE(ts_id2 < ts_id1);
 }
 
-TEST(DeviceIDTest, Constructor) {
-    DeviceID device_id;
-    EXPECT_EQ(device_id.db_nid_, 0);
-    EXPECT_EQ(device_id.device_nid_, 0);
-}
-
-TEST(DeviceIDTest, ParameterizedConstructor) {
-    DeviceID device_id(1, 2);
-    EXPECT_EQ(device_id.db_nid_, 1);
-    EXPECT_EQ(device_id.device_nid_, 2);
-}
-
-TEST(DeviceIDTest, TsIDConstructor) {
-    TsID ts_id(1, 2, 3);
-    DeviceID device_id(ts_id);
-    EXPECT_EQ(device_id.db_nid_, 1);
-    EXPECT_EQ(device_id.device_nid_, 2);
-}
-
-TEST(DeviceIDTest, OperatorEqual) {
-    DeviceID device_id1(1, 2);
-    DeviceID device_id2(1, 2);
-    EXPECT_TRUE(device_id1 == device_id2);
-    device_id2.db_nid_ = 3;
-    EXPECT_FALSE(device_id1 == device_id2);
-}
-
-TEST(DeviceIDTest, OperatorNotEqual) {
-    DeviceID device_id1(1, 2);
-    DeviceID device_id2(1, 2);
-    EXPECT_FALSE(device_id1 != device_id2);
-    device_id2.db_nid_ = 3;
-    EXPECT_TRUE(device_id1 != device_id2);
-}
-
-TEST(DatabaseDescTest, Constructor) {
-    DatabaseDesc db_desc;
-    EXPECT_EQ(db_desc.ttl_, INVALID_TTL);
-    EXPECT_EQ(db_desc.db_name_, "");
-    EXPECT_EQ(db_desc.ts_id_.db_nid_, 0);
-}
-
-TEST(DatabaseDescTest, ParameterizedConstructor) {
-    TsID ts_id(1, 2, 3);
-    DatabaseDesc db_desc(1000, "test_db", ts_id);
-    EXPECT_EQ(db_desc.ttl_, 1000);
-    EXPECT_EQ(db_desc.db_name_, "test_db");
-    EXPECT_EQ(db_desc.ts_id_, ts_id);
-}
-
 TEST(ColumnSchemaTest, Constructor) {
     ColumnSchema col_schema;
     EXPECT_EQ(col_schema.data_type_, INVALID_DATATYPE);
diff --git a/cpp/test/writer/table_view/tsfile_writer_table_test.cc b/cpp/test/writer/table_view/tsfile_writer_table_test.cc
index a13c20b..8c373a3 100644
--- a/cpp/test/writer/table_view/tsfile_writer_table_test.cc
+++ b/cpp/test/writer/table_view/tsfile_writer_table_test.cc
@@ -69,24 +69,25 @@
         return random_string;
     }
 
-    static TableSchema* gen_table_schema(int table_num) {
+    static TableSchema* gen_table_schema(int table_num, int id_col_num = 5,
+                                         int field_col_num = 5) {
         std::vector<MeasurementSchema*> measurement_schemas;
         std::vector<ColumnCategory> column_categories;
-        int id_schema_num = 5;
-        int measurement_schema_num = 5;
+        int id_schema_num = id_col_num;
+        int measurement_schema_num = field_col_num;
         for (int i = 0; i < id_schema_num; i++) {
             measurement_schemas.emplace_back(new MeasurementSchema(
-                "id" + to_string(i), TSDataType::STRING, TSEncoding::PLAIN,
+                "id" + std::to_string(i), TSDataType::STRING, TSEncoding::PLAIN,
                 CompressionType::UNCOMPRESSED));
             column_categories.emplace_back(ColumnCategory::TAG);
         }
         for (int i = 0; i < measurement_schema_num; i++) {
             measurement_schemas.emplace_back(new MeasurementSchema(
-                "s" + to_string(i), TSDataType::INT64, TSEncoding::PLAIN,
+                "s" + std::to_string(i), TSDataType::INT64, TSEncoding::PLAIN,
                 CompressionType::UNCOMPRESSED));
             column_categories.emplace_back(ColumnCategory::FIELD);
         }
-        return new TableSchema("testTable" + to_string(table_num),
+        return new TableSchema("testTable" + std::to_string(table_num),
                                measurement_schemas, column_categories);
     }
 
@@ -96,14 +97,16 @@
         storage::Tablet tablet(table_schema->get_measurement_names(),
                                table_schema->get_data_types(),
                                device_num * num_timestamp_per_device);
-
-        char* literal = new char[std::strlen("device_id") + 1];
-        std::strcpy(literal, "device_id");
-        String literal_str(literal, std::strlen("device_id"));
+        static int timestamp = 0;
         for (int i = 0; i < device_num; i++) {
+            PageArena pa;
+            pa.init(512, MOD_DEFAULT);
+            std::string device_str =
+                std::string("device_id_") + std::to_string(i);
+            String literal_str(device_str, pa);
             for (int l = 0; l < num_timestamp_per_device; l++) {
                 int row_index = i * num_timestamp_per_device + l;
-                tablet.add_timestamp(row_index, offset + l);
+                tablet.add_timestamp(row_index, timestamp++);
                 auto column_schemas = table_schema->get_measurement_schemas();
                 for (const auto& column_schema : column_schemas) {
                     switch (column_schema->data_type_) {
@@ -123,7 +126,6 @@
                 }
             }
         }
-        delete[] literal;
         return tablet;
     }
 };
@@ -187,6 +189,56 @@
     delete table_schema;
 }
 
+TEST_F(TsFileWriterTableTest, WriteDisorderTest) {
+    auto table_schema = gen_table_schema(0);
+    auto tsfile_table_writer_ =
+        std::make_shared<TsFileTableWriter>(&write_file_, table_schema);
+
+    int device_num = 1;
+    int num_timestamp_per_device = 10;
+    int offset = 0;
+    storage::Tablet tablet(table_schema->get_measurement_names(),
+                           table_schema->get_data_types(),
+                           device_num * num_timestamp_per_device);
+
+    char* literal = new char[std::strlen("device_id") + 1];
+    std::strcpy(literal, "device_id");
+    String literal_str(literal, std::strlen("device_id"));
+    for (int i = 0; i < device_num; i++) {
+        for (int l = 0; l < num_timestamp_per_device; l++) {
+            int row_index = i * num_timestamp_per_device + l;
+            // disordered timestamp.
+            tablet.add_timestamp(row_index, l > num_timestamp_per_device / 2
+                                                ? l - num_timestamp_per_device
+                                                : offset + l);
+            auto column_schemas = table_schema->get_measurement_schemas();
+            for (const auto& column_schema : column_schemas) {
+                switch (column_schema->data_type_) {
+                    case TSDataType::INT64:
+                        tablet.add_value(row_index,
+                                         column_schema->measurement_name_,
+                                         static_cast<int64_t>(i));
+                        break;
+                    case TSDataType::STRING:
+                        tablet.add_value(row_index,
+                                         column_schema->measurement_name_,
+                                         literal_str);
+                        break;
+                    default:
+                        break;
+                }
+            }
+        }
+    }
+    delete[] literal;
+
+    ASSERT_EQ(tsfile_table_writer_->write_table(tablet),
+              common::E_OUT_OF_ORDER);
+    ASSERT_EQ(tsfile_table_writer_->flush(), common::E_OK);
+    ASSERT_EQ(tsfile_table_writer_->close(), common::E_OK);
+    delete table_schema;
+}
+
 TEST_F(TsFileWriterTableTest, WriteTableTestMultiFlush) {
     auto table_schema = gen_table_schema(0);
     auto tsfile_table_writer_ = std::make_shared<TsFileTableWriter>(
@@ -250,6 +302,62 @@
     delete table_schema;
 }
 
+TEST_F(TsFileWriterTableTest, EmptyTagWrite) {
+    std::vector<MeasurementSchema*> measurement_schemas;
+    std::vector<ColumnCategory> column_categories;
+    measurement_schemas.resize(3);
+    measurement_schemas[0] = new MeasurementSchema("device1", STRING);
+    measurement_schemas[1] = new MeasurementSchema("device2", STRING);
+    measurement_schemas[2] = new MeasurementSchema("value", DOUBLE);
+    column_categories.emplace_back(ColumnCategory::TAG);
+    column_categories.emplace_back(ColumnCategory::TAG);
+    column_categories.emplace_back(ColumnCategory::FIELD);
+    TableSchema* table_schema =
+        new TableSchema("test_table", measurement_schemas, column_categories);
+    auto tsfile_table_writer =
+        std::make_shared<TsFileTableWriter>(&write_file_, table_schema);
+    Tablet tablet = Tablet(table_schema->get_measurement_names(),
+                           table_schema->get_data_types());
+    tablet.set_table_name("test_table");
+    for (int i = 0; i < 100; i++) {
+        tablet.add_timestamp(i, static_cast<int64_t>(i));
+        tablet.add_value(i, "device1",
+                         std::string("device" + std::to_string(i)).c_str());
+        tablet.add_value(i, "device2", "");
+        tablet.add_value(i, "value", i * 1.1);
+    }
+    tsfile_table_writer->write_table(tablet);
+    tsfile_table_writer->flush();
+    tsfile_table_writer->close();
+
+    TsFileReader reader = TsFileReader();
+    reader.open(write_file_.get_file_path());
+    ResultSet* ret = nullptr;
+    int ret_value =
+        reader.query("test_table", {"device1", "device2", "value"}, 0, 50, ret);
+    ASSERT_EQ(common::E_OK, ret_value);
+
+    ASSERT_EQ(ret_value, 0);
+    auto* table_result_set = (TableResultSet*)ret;
+    bool has_next = false;
+    int cur_line = 0;
+    while (IS_SUCC(table_result_set->next(has_next)) && has_next) {
+        cur_line++;
+        int64_t timestamp = table_result_set->get_value<int64_t>("time");
+        ASSERT_EQ(table_result_set->get_value<common::String*>("device1")
+                      ->to_std_string(),
+                  "device" + std::to_string(timestamp));
+        ASSERT_EQ(table_result_set->get_value<double>("value"),
+                  timestamp * 1.1);
+    }
+    ASSERT_EQ(cur_line, 51);
+    table_result_set->close();
+    reader.destroy_query_data_set(table_result_set);
+
+    reader.close();
+    delete table_schema;
+}
+
 TEST_F(TsFileWriterTableTest, WritehDataTypeMisMatch) {
     auto table_schema = gen_table_schema(0);
     auto tsfile_table_writer_ = std::make_shared<TsFileTableWriter>(
@@ -329,11 +437,14 @@
     TsFileReader reader = TsFileReader();
     reader.open(write_file_.get_file_path());
     ResultSet* ret = nullptr;
-    int ret_value = reader.query("test_table", {"device", "value"}, 0, 50, ret);
+    std::vector<std::string> column_names = {"device", "VALUE"};
+    int ret_value = reader.query("test_table", column_names, 0, 50, ret);
     ASSERT_EQ(common::E_OK, ret_value);
 
     ASSERT_EQ(ret_value, 0);
     auto* table_result_set = (TableResultSet*)ret;
+    auto metadata = ret->get_metadata();
+    ASSERT_EQ(metadata->get_column_name(column_names.size() + 1), "VALUE");
     bool has_next = false;
     int cur_line = 0;
     while (IS_SUCC(table_result_set->next(has_next)) && has_next) {
@@ -341,8 +452,8 @@
         int64_t timestamp = table_result_set->get_value<int64_t>("time");
         ASSERT_EQ(table_result_set->get_value<common::String*>("device")
                       ->to_std_string(),
-                  "device" + to_string(timestamp));
-        ASSERT_EQ(table_result_set->get_value<double>("value"),
+                  "device" + std::to_string(timestamp));
+        ASSERT_EQ(table_result_set->get_value<double>("VaLue"),
                   timestamp * 1.1);
     }
     ASSERT_EQ(cur_line, 51);
@@ -389,4 +500,602 @@
     ASSERT_EQ(E_INVALID_ARG, tsfile_table_writer->register_table(
                                  std::make_shared<TableSchema>(*table_schema)));
     delete table_schema;
+}
+
+TEST_F(TsFileWriterTableTest, WriteWithNullAndEmptyTag) {
+    std::vector<MeasurementSchema*> measurement_schemas;
+    std::vector<ColumnCategory> column_categories;
+    for (int i = 0; i < 3; i++) {
+        measurement_schemas.emplace_back(new MeasurementSchema(
+            "id" + std::to_string(i), TSDataType::STRING));
+        column_categories.emplace_back(ColumnCategory::TAG);
+    }
+    measurement_schemas.emplace_back(new MeasurementSchema("value", DOUBLE));
+    column_categories.emplace_back(ColumnCategory::FIELD);
+    auto table_schema =
+        new TableSchema("testTable", measurement_schemas, column_categories);
+    auto tsfile_table_writer =
+        std::make_shared<TsFileTableWriter>(&write_file_, table_schema);
+    int time = 0;
+    Tablet tablet = Tablet(table_schema->get_measurement_names(),
+                           table_schema->get_data_types(), 10);
+
+    for (int i = 0; i < 10; i++) {
+        tablet.add_timestamp(i, static_cast<int64_t>(time++));
+        tablet.add_value(i, "ID0", "tag1");
+        tablet.add_value(i, 1, "tag2");
+        tablet.add_value(i, 2, "tag3");
+        tablet.add_value(i, 3, 100.0f);
+    }
+
+    tsfile_table_writer->write_table(tablet);
+    Tablet tablet2 = Tablet(table_schema->get_measurement_names(),
+                            table_schema->get_data_types(), 10);
+
+    for (int i = 0; i < 10; i++) {
+        tablet2.add_timestamp(i, static_cast<int64_t>(time++));
+        tablet2.add_value(i, 0, i % 2 == 0 ? "" : "tag4");
+        tablet2.add_value(i, 1, i % 2 == 1 ? "" : "tag5");
+        tablet2.add_value(i, 2, i % 3 == 0 ? "" : "tag6");
+        tablet2.add_value(i, 3, 101.0f);
+    }
+    tsfile_table_writer->write_table(tablet2);
+
+    Tablet tablet3 = Tablet(table_schema->get_measurement_names(),
+                            table_schema->get_data_types(), 10);
+    for (int i = 0; i < 10; i++) {
+        tablet3.add_timestamp(i, static_cast<int64_t>(time++));
+        tablet3.add_value(i, 0, "tag7");
+        if (i % 2 == 0) {
+            tablet3.add_value(i, 1, "tag8\0ta");
+        } else {
+            tablet3.add_value(i, 2, "tag9");
+        }
+        tablet3.add_value(i, 3, 102.0f);
+    }
+
+    tsfile_table_writer->write_table(tablet3);
+    tsfile_table_writer->flush();
+    tsfile_table_writer->close();
+
+    delete table_schema;
+
+    auto reader = TsFileReader();
+    reader.open(write_file_.get_file_path());
+    ResultSet* ret = nullptr;
+    int ret_value =
+        reader.query("testTable", {"id0", "id1", "id2", "value"}, 0, 50, ret);
+    ASSERT_EQ(common::E_OK, ret_value);
+
+    auto table_result_set = (TableResultSet*)ret;
+    bool has_next = false;
+    auto schema = table_result_set->get_metadata();
+    while (IS_SUCC(table_result_set->next(has_next)) && has_next) {
+        int64_t timestamp = table_result_set->get_value<int64_t>(1);
+        switch (timestamp) {
+            case 0: {
+                // All tag fields have valid values.
+                ASSERT_EQ(common::String(std::string("tag1")),
+                          *table_result_set->get_value<common::String*>(2));
+                ASSERT_EQ(common::String(std::string("tag2")),
+                          *table_result_set->get_value<common::String*>(3));
+                ASSERT_EQ(common::String(std::string("tag3")),
+                          *table_result_set->get_value<common::String*>(4));
+                ASSERT_EQ(100.0f, table_result_set->get_value<double>(5));
+                break;
+            }
+            case 10: {
+                // The first and last tag fields are empty strings.
+                ASSERT_EQ(common::String(std::string("")),
+                          *table_result_set->get_value<common::String*>(2));
+                ASSERT_EQ(common::String(std::string("tag5")),
+                          *table_result_set->get_value<common::String*>(3));
+                ASSERT_EQ(common::String(std::string("")),
+                          *table_result_set->get_value<common::String*>(4));
+                ASSERT_EQ(101.0f, table_result_set->get_value<double>(5));
+                break;
+            }
+            case 11: {
+                // The middle tag field is an empty string.
+                ASSERT_EQ(common::String(std::string("tag4")),
+                          *table_result_set->get_value<common::String*>(2));
+                ASSERT_EQ(common::String(std::string("")),
+                          *table_result_set->get_value<common::String*>(3));
+                ASSERT_EQ(common::String(std::string("tag6")),
+                          *table_result_set->get_value<common::String*>(4));
+                ASSERT_EQ(101.0f, table_result_set->get_value<double>(5));
+                break;
+            }
+            case 20: {
+                // The last tag field is null.
+                ASSERT_EQ(common::String(std::string("tag7")),
+                          *table_result_set->get_value<common::String*>(2));
+                ASSERT_EQ(common::String(std::string("tag8\0ta")),
+                          *table_result_set->get_value<common::String*>(3));
+                ASSERT_TRUE(table_result_set->is_null(4));
+                ASSERT_EQ(102.0f, table_result_set->get_value<double>(5));
+                break;
+            }
+            case 21: {
+                // The middle tag field is null.
+                ASSERT_EQ(common::String(std::string("tag7")),
+                          *table_result_set->get_value<common::String*>(2));
+                ASSERT_EQ(common::String(std::string("tag9")),
+                          *table_result_set->get_value<common::String*>(4));
+                ASSERT_TRUE(table_result_set->is_null(3));
+                ASSERT_EQ(102.0f, table_result_set->get_value<double>(5));
+                break;
+            }
+            default:
+                break;
+        }
+    }
+    reader.destroy_query_data_set(table_result_set);
+    ASSERT_EQ(reader.close(), common::E_OK);
+}
+
+TEST_F(TsFileWriterTableTest, MultiDeviceMultiFields) {
+    common::config_set_max_degree_of_index_node(5);
+    auto table_schema = gen_table_schema(0, 1, 100);
+    auto tsfile_table_writer_ =
+        std::make_shared<TsFileTableWriter>(&write_file_, table_schema);
+    int num_row_per_device = 10;
+    auto tablet = gen_tablet(table_schema, 0, 100, num_row_per_device);
+    ASSERT_EQ(tsfile_table_writer_->write_table(tablet), common::E_OK);
+    ASSERT_EQ(tsfile_table_writer_->flush(), common::E_OK);
+    ASSERT_EQ(tsfile_table_writer_->close(), common::E_OK);
+
+    storage::TsFileReader reader;
+    int ret = reader.open(file_name_);
+    ASSERT_EQ(ret, common::E_OK);
+
+    ResultSet* tmp_result_set = nullptr;
+    ret = reader.query(table_schema->get_table_name(),
+                       table_schema->get_measurement_names(), 0, INT32_MAX,
+                       tmp_result_set);
+    auto* table_result_set = (TableResultSet*)tmp_result_set;
+    bool has_next = false;
+    int64_t row_num = 0;
+    auto result_set_meta = table_result_set->get_metadata();
+    ASSERT_EQ(result_set_meta->get_column_count(),
+              table_schema->get_columns_num() + 1);  // +1: time column
+    while (IS_SUCC(table_result_set->next(has_next)) && has_next) {
+        auto column_schemas = table_schema->get_measurement_schemas();
+        std::string tag_col_val;  // "device_id_[num]"
+        std::string tag_col_val_prefix = "device_id_";
+        for (const auto& column_schema : column_schemas) {
+            switch (column_schema->data_type_) {
+                case TSDataType::INT64:
+                    if (!table_result_set->is_null(
+                            column_schema->measurement_name_)) {
+                        std::string num = tag_col_val.substr(
+                            tag_col_val_prefix.length(),
+                            tag_col_val.length() - tag_col_val_prefix.length());
+                        EXPECT_EQ(table_result_set->get_value<int64_t>(
+                                      column_schema->measurement_name_),
+                                  std::stoi(num));
+                    }
+                    break;
+                case TSDataType::STRING:
+                    tag_col_val = table_result_set
+                                      ->get_value<common::String*>(
+                                          column_schema->measurement_name_)
+                                      ->to_std_string();
+                default:
+                    break;
+            }
+        }
+        row_num++;
+    }
+    ASSERT_EQ(row_num, tablet.get_cur_row_size());
+    reader.destroy_query_data_set(table_result_set);
+    ASSERT_EQ(reader.close(), common::E_OK);
+    delete table_schema;
+}
+
+TEST_F(TsFileWriterTableTest, WriteDataWithEmptyField) {
+    std::vector<MeasurementSchema*> measurement_schemas;
+    std::vector<ColumnCategory> column_categories;
+    for (int i = 0; i < 3; i++) {
+        measurement_schemas.emplace_back(new MeasurementSchema(
+            "id" + std::to_string(i), TSDataType::STRING));
+        column_categories.emplace_back(ColumnCategory::TAG);
+    }
+    measurement_schemas.emplace_back(new MeasurementSchema("value", DOUBLE));
+    measurement_schemas.emplace_back(new MeasurementSchema("value1", INT32));
+    column_categories.emplace_back(ColumnCategory::FIELD);
+    column_categories.emplace_back(ColumnCategory::FIELD);
+    auto table_schema =
+        new TableSchema("testTable", measurement_schemas, column_categories);
+    auto tsfile_table_writer =
+        std::make_shared<TsFileTableWriter>(&write_file_, table_schema);
+    int time = 0;
+    Tablet tablet = Tablet(table_schema->get_measurement_names(),
+                           table_schema->get_data_types(), 100);
+
+    for (int i = 0; i < 100; i++) {
+        tablet.add_timestamp(i, static_cast<int64_t>(time++));
+        tablet.add_value(i, 0, "tag1");
+        tablet.add_value(i, 1, "tag2");
+        if (i % 3 == 0) {
+            // all device has no data
+            tablet.add_value(i, 2, "tag_null");
+        } else {
+            tablet.add_value(i, 2, "tag3");
+            tablet.add_value(i, 3, 100.0f);
+            if (i % 5 == 0) {
+                tablet.add_value(i, 4, 100);
+            }
+        }
+    }
+    tsfile_table_writer->write_table(tablet);
+    tsfile_table_writer->flush();
+    tsfile_table_writer->close();
+
+    delete table_schema;
+
+    auto reader = TsFileReader();
+    reader.open(write_file_.get_file_path());
+    ResultSet* ret = nullptr;
+    int ret_value = reader.query(
+        "testTable", {"id0", "id1", "id2", "value", "value1"}, 0, 100, ret);
+    ASSERT_EQ(common::E_OK, ret_value);
+
+    auto table_result_set = (TableResultSet*)ret;
+    bool has_next = false;
+    auto schema = table_result_set->get_metadata();
+    while (IS_SUCC(table_result_set->next(has_next)) && has_next) {
+        int64_t timestamp = table_result_set->get_value<int64_t>(1);
+        ASSERT_EQ(common::String("tag1"),
+                  *table_result_set->get_value<common::String*>(2));
+        ASSERT_EQ(common::String("tag2"),
+                  *table_result_set->get_value<common::String*>(3));
+        if (timestamp % 3 == 0) {
+            ASSERT_EQ(common::String("tag_null"),
+                      *table_result_set->get_value<common::String*>(4));
+            ASSERT_TRUE(table_result_set->is_null(5));
+            ASSERT_TRUE(table_result_set->is_null(6));
+        } else {
+            ASSERT_EQ(common::String("tag3"),
+                      *table_result_set->get_value<common::String*>(4));
+            ASSERT_EQ(100.0f, table_result_set->get_value<double>(5));
+            if (timestamp % 5 == 0) {
+                ASSERT_EQ(100, table_result_set->get_value<int32_t>(6));
+            } else {
+                ASSERT_TRUE(table_result_set->is_null(6));
+            }
+        }
+    }
+    reader.destroy_query_data_set(table_result_set);
+    ASSERT_EQ(reader.close(), common::E_OK);
+}
+
+TEST_F(TsFileWriterTableTest, MultiDatatypes) {
+    std::vector<MeasurementSchema*> measurement_schemas;
+    std::vector<ColumnCategory> column_categories;
+
+    std::vector<std::string> measurement_names = {
+        "level", "num", "bools", "double", "id", "ts", "text", "blob", "date"};
+    std::vector<common::TSDataType> data_types = {
+        FLOAT, INT64, BOOLEAN, DOUBLE, STRING, TIMESTAMP, TEXT, BLOB, DATE};
+
+    for (int i = 0; i < measurement_names.size(); i++) {
+        measurement_schemas.emplace_back(
+            new MeasurementSchema(measurement_names[i], data_types[i]));
+        column_categories.emplace_back(ColumnCategory::FIELD);
+    }
+    auto table_schema =
+        new TableSchema("testTable", measurement_schemas, column_categories);
+    auto tsfile_table_writer =
+        std::make_shared<TsFileTableWriter>(&write_file_, table_schema);
+    int time = 0;
+    Tablet tablet = Tablet(table_schema->get_measurement_names(),
+                           table_schema->get_data_types(), 100);
+
+    char* literal = new char[std::strlen("device_id") + 1];
+    std::strcpy(literal, "device_id");
+    String literal_str(literal, std::strlen("device_id"));
+    std::time_t now = std::time(nullptr);
+    std::tm* local_time = std::localtime(&now);
+    std::tm today = {};
+    today.tm_year = local_time->tm_year;
+    today.tm_mon = local_time->tm_mon;
+    today.tm_mday = local_time->tm_mday;
+    for (int i = 0; i < 100; i++) {
+        tablet.add_timestamp(i, static_cast<int64_t>(time++));
+        for (int j = 0; j < measurement_schemas.size(); j++) {
+            switch (data_types[j]) {
+                case BOOLEAN:
+                    ASSERT_EQ(tablet.add_value(i, j, true), E_OK);
+                    break;
+                case INT64:
+                    ASSERT_EQ(tablet.add_value(i, j, (int64_t)415412), E_OK);
+                    break;
+                case FLOAT:
+                    ASSERT_EQ(tablet.add_value(i, j, (float)1.0), E_OK);
+                    break;
+                case DOUBLE:
+                    ASSERT_EQ(tablet.add_value(i, j, (double)2.0), E_OK);
+                    break;
+                case STRING:
+                    ASSERT_EQ(tablet.add_value(i, j, literal_str), E_OK);
+                    break;
+                case TEXT:
+                    ASSERT_EQ(tablet.add_value(i, j, literal_str), E_OK);
+                    break;
+                case BLOB:
+                    ASSERT_EQ(tablet.add_value(i, j, literal_str), E_OK);
+                    break;
+                case TIMESTAMP:
+                    ASSERT_EQ(tablet.add_value(i, j, (int64_t)415412), E_OK);
+                    break;
+                case DATE:
+                    ASSERT_EQ(tablet.add_value(i, j, today), E_OK);
+                default:
+                    break;
+            }
+        }
+    }
+    ASSERT_EQ(tsfile_table_writer->write_table(tablet), E_OK);
+    ASSERT_EQ(tsfile_table_writer->flush(), E_OK);
+    ASSERT_EQ(tsfile_table_writer->close(), E_OK);
+
+    delete table_schema;
+
+    auto reader = TsFileReader();
+    reader.open(write_file_.get_file_path());
+    ResultSet* ret = nullptr;
+    int ret_value = reader.query("testTable", measurement_names, 0, 100, ret);
+    ASSERT_EQ(common::E_OK, ret_value);
+
+    auto table_result_set = (TableResultSet*)ret;
+    bool has_next = false;
+    int cur_line = 0;
+    auto schema = table_result_set->get_metadata();
+    while (IS_SUCC(table_result_set->next(has_next)) && has_next) {
+        int64_t timestamp = table_result_set->get_value<int64_t>(1);
+        ASSERT_EQ(table_result_set->get_value<float>(2), (float)1.0);
+        ASSERT_EQ(table_result_set->get_value<int64_t>(3), (int64_t)415412);
+        ASSERT_EQ(table_result_set->get_value<bool>(4), true);
+        ASSERT_EQ(table_result_set->get_value<double>(5), (double)2.0);
+        ASSERT_EQ(table_result_set->get_value<common::String*>(6)->compare(
+                      literal_str),
+                  0);
+        ASSERT_EQ(table_result_set->get_value<int64_t>(7), (int64_t)415412);
+        ASSERT_EQ(table_result_set->get_value<common::String*>(8)->compare(
+                      literal_str),
+                  0);
+        ASSERT_EQ(table_result_set->get_value<common::String*>(9)->compare(
+                      literal_str),
+                  0);
+        ASSERT_TRUE(DateConverter::is_tm_ymd_equal(
+            table_result_set->get_value<std::tm>(10), today));
+    }
+    reader.destroy_query_data_set(table_result_set);
+    ASSERT_EQ(reader.close(), common::E_OK);
+    delete[] literal;
+}
+
+TEST_F(TsFileWriterTableTest, DiffCodecTypes) {
+    std::vector<MeasurementSchema*> measurement_schemas;
+    std::vector<ColumnCategory> column_categories;
+
+    common::CompressionType compression_type =
+        common::CompressionType::UNCOMPRESSED;
+    std::vector<std::string> measurement_names = {
+        "int32_zigzag",  "int64_zigzag",   "string_dic",    "text_dic",
+        "float_gorilla", "double_gorilla", "int32_ts2diff", "int64_ts2diff",
+        "int32_rle",     "int64_rle",      "int32_sprintz", "int64_sprintz",
+        "float_sprintz", "double_sprintz",
+    };
+    std::vector<common::TSDataType> data_types = {
+        INT32, INT64, STRING, TEXT,  FLOAT, DOUBLE, INT32,
+        INT64, INT32, INT64,  INT32, INT64, FLOAT,  DOUBLE};
+    std::vector<common::TSEncoding> encodings = {
+        ZIGZAG,   ZIGZAG, DICTIONARY, DICTIONARY, GORILLA, GORILLA, TS_2DIFF,
+        TS_2DIFF, RLE,    RLE,        SPRINTZ,    SPRINTZ, SPRINTZ, SPRINTZ};
+
+    for (int i = 0; i < measurement_names.size(); i++) {
+        measurement_schemas.emplace_back(new MeasurementSchema(
+            measurement_names[i], data_types[i], encodings[i], UNCOMPRESSED));
+        column_categories.emplace_back(ColumnCategory::FIELD);
+    }
+    auto table_schema =
+        new TableSchema("testTable", measurement_schemas, column_categories);
+    auto tsfile_table_writer =
+        std::make_shared<TsFileTableWriter>(&write_file_, table_schema);
+    int time = 0;
+    Tablet tablet = Tablet(table_schema->get_measurement_names(),
+                           table_schema->get_data_types(), 100);
+
+    char* literal = new char[std::strlen("device_id") + 1];
+    std::strcpy(literal, "device_id");
+    String literal_str(literal, std::strlen("device_id"));
+    for (int i = 0; i < 100; i++) {
+        tablet.add_timestamp(i, static_cast<int64_t>(time++));
+        for (int j = 0; j < measurement_schemas.size(); j++) {
+            std::string measurement_name = measurement_names[j];
+            switch (data_types[j]) {
+                case BOOLEAN:
+                    ASSERT_EQ(tablet.add_value(i, j, true), E_OK);
+                    break;
+                case INT32:
+                    ASSERT_EQ(tablet.add_value(i, j, (int32_t)32), E_OK);
+                    break;
+                case INT64:
+                    ASSERT_EQ(tablet.add_value(i, j, (int64_t)64), E_OK);
+                    break;
+                case FLOAT:
+                    ASSERT_EQ(tablet.add_value(i, j, (float)1.0), E_OK);
+                    break;
+                case DOUBLE:
+                    ASSERT_EQ(tablet.add_value(i, j, (double)2.0), E_OK);
+                    break;
+                case TEXT:
+                case STRING:
+                case BLOB:
+                    ASSERT_EQ(tablet.add_value(i, j, literal_str), E_OK);
+                    break;
+                default:
+                    break;
+            }
+        }
+    }
+    ASSERT_EQ(tsfile_table_writer->write_table(tablet), E_OK);
+    ASSERT_EQ(tsfile_table_writer->flush(), E_OK);
+    ASSERT_EQ(tsfile_table_writer->close(), E_OK);
+
+    delete table_schema;
+
+    auto reader = TsFileReader();
+    reader.open(write_file_.get_file_path());
+    ResultSet* ret = nullptr;
+    int ret_value = reader.query("testTable", measurement_names, 0, 100, ret);
+    ASSERT_EQ(common::E_OK, ret_value);
+
+    auto table_result_set = (TableResultSet*)ret;
+    bool has_next = false;
+    int cur_line = 0;
+    auto schema = table_result_set->get_metadata();
+    while (IS_SUCC(table_result_set->next(has_next)) && has_next) {
+        int64_t timestamp = table_result_set->get_value<int64_t>(1);
+        ASSERT_EQ(table_result_set->get_value<int32_t>(2), 32);
+        ASSERT_EQ(table_result_set->get_value<int64_t>(3), 64);
+
+        ASSERT_EQ(table_result_set->get_value<common::String*>(4)->compare(
+                      literal_str),
+                  0);
+        ASSERT_EQ(table_result_set->get_value<common::String*>(5)->compare(
+                      literal_str),
+                  0);
+
+        ASSERT_EQ(table_result_set->get_value<float>(6), (float)1.0);
+        ASSERT_EQ(table_result_set->get_value<double>(7), (double)2.0);
+
+        ASSERT_EQ(table_result_set->get_value<int32_t>(8), 32);
+        ASSERT_EQ(table_result_set->get_value<int64_t>(9), 64);
+
+        ASSERT_EQ(table_result_set->get_value<int32_t>(10), 32);
+        ASSERT_EQ(table_result_set->get_value<int64_t>(11), 64);
+        // SPRINTZ
+        ASSERT_EQ(table_result_set->get_value<int32_t>(12), 32);
+        ASSERT_EQ(table_result_set->get_value<int64_t>(13), 64);
+        ASSERT_FLOAT_EQ(table_result_set->get_value<float>(14), (float)1.0);
+        ASSERT_DOUBLE_EQ(table_result_set->get_value<double>(15), (double)2.0);
+    }
+    reader.destroy_query_data_set(table_result_set);
+    ASSERT_EQ(reader.close(), common::E_OK);
+    delete[] literal;
+}
+
+TEST_F(TsFileWriterTableTest, EncodingConfigIntegration) {
+    // 1. Test setting global compression type
+    ASSERT_EQ(E_OK, set_global_compression(SNAPPY));
+
+    // 2. Test setting encoding types for different data types
+    ASSERT_EQ(E_OK, set_datatype_encoding(INT32, SPRINTZ));
+    ASSERT_EQ(E_OK, set_datatype_encoding(INT64, TS_2DIFF));
+    ASSERT_EQ(E_OK, set_datatype_encoding(FLOAT, GORILLA));
+    ASSERT_EQ(E_OK, set_datatype_encoding(DOUBLE, GORILLA));
+    ASSERT_EQ(E_OK, set_datatype_encoding(STRING, DICTIONARY));
+    ASSERT_EQ(E_OK, set_datatype_encoding(DATE, PLAIN));  // Added DATE support
+    ASSERT_EQ(E_OK,
+              set_datatype_encoding(TEXT, DICTIONARY));  // Added TEXT support
+
+    // 3. Create schema using these configurations
+    std::vector<MeasurementSchema*> measurement_schemas;
+    std::vector<ColumnCategory> column_categories;
+
+    std::vector<std::string> measurement_names = {
+        "int32_sprintz", "int64_ts2diff", "float_gorilla", "double_gorilla",
+        "string_dict",   "date_plain",    "text_dict"};
+
+    std::vector<common::TSDataType> data_types = {INT32,  INT64, FLOAT, DOUBLE,
+                                                  STRING, DATE,  TEXT};
+
+    std::vector<common::TSEncoding> encodings = {
+        SPRINTZ, TS_2DIFF, GORILLA, GORILLA, DICTIONARY, PLAIN, DICTIONARY};
+
+    // Create measurement schemas with configured encodings and compression
+    for (int i = 0; i < measurement_names.size(); i++) {
+        measurement_schemas.emplace_back(new MeasurementSchema(
+            measurement_names[i], data_types[i], encodings[i], SNAPPY));
+        column_categories.emplace_back(ColumnCategory::FIELD);
+    }
+
+    // 4. Write and verify data
+    auto table_schema = new TableSchema("configTestTable", measurement_schemas,
+                                        column_categories);
+    auto tsfile_table_writer =
+        std::make_shared<TsFileTableWriter>(&write_file_, table_schema);
+
+    // Create test data tablet
+    Tablet tablet(table_schema->get_measurement_names(),
+                  table_schema->get_data_types(), 10);
+    char* literal = new char[std::strlen("test_str") + 1];
+    std::strcpy(literal, "test_str");
+    String literal_str(literal, std::strlen("test_str"));
+
+    // Prepare DATE and TEXT values
+    std::time_t now = std::time(nullptr);
+    std::tm* local_time = std::localtime(&now);
+    std::tm today = {};
+    today.tm_year = local_time->tm_year;
+    today.tm_mon = local_time->tm_mon;
+    today.tm_mday = local_time->tm_mday;
+    char* text_literal = new char[std::strlen("sample_text") + 1];
+    std::strcpy(text_literal, "sample_text");
+    String text_str(text_literal, std::strlen("sample_text"));
+
+    // Fill tablet with test values
+    for (int i = 0; i < 10; i++) {
+        tablet.add_timestamp(i, static_cast<int64_t>(i));
+        tablet.add_value(i, 0, (int32_t)32);  // INT32 with SPRINTZ encoding
+        tablet.add_value(i, 1, (int64_t)64);  // INT64 with TS_2DIFF encoding
+        tablet.add_value(i, 2, (float)1.0);   // FLOAT with GORILLA encoding
+        tablet.add_value(i, 3, (double)2.0);  // DOUBLE with GORILLA encoding
+        tablet.add_value(i, 4, literal_str);  // STRING with DICTIONARY encoding
+        tablet.add_value(i, 5, today);  // DATE with PLAIN encoding (added)
+        tablet.add_value(i, 6,
+                         text_str);  // TEXT with DICTIONARY encoding (added)
+    }
+
+    // Write and flush data
+    ASSERT_EQ(tsfile_table_writer->write_table(tablet), E_OK);
+    ASSERT_EQ(tsfile_table_writer->flush(), E_OK);
+    ASSERT_EQ(tsfile_table_writer->close(), E_OK);
+
+    // 5. Verify read data matches what was written
+    auto reader = TsFileReader();
+    reader.open(write_file_.get_file_path());
+    ResultSet* ret = nullptr;
+    int ret_value =
+        reader.query("configTestTable", measurement_names, 0, 10, ret);
+    ASSERT_EQ(common::E_OK, ret_value);
+
+    auto table_result_set = (TableResultSet*)ret;
+    bool has_next = false;
+    while (IS_SUCC(table_result_set->next(has_next)) && has_next) {
+        // Verify all values were correctly encoded/decoded
+        ASSERT_EQ(table_result_set->get_value<int32_t>(2), 32);        // INT32
+        ASSERT_EQ(table_result_set->get_value<int64_t>(3), 64);        // INT64
+        ASSERT_FLOAT_EQ(table_result_set->get_value<float>(4), 1.0f);  // FLOAT
+        ASSERT_DOUBLE_EQ(table_result_set->get_value<double>(5),
+                         2.0);  // DOUBLE
+        ASSERT_EQ(table_result_set->get_value<common::String*>(6)->compare(
+                      literal_str),
+                  0);  // STRING
+        ASSERT_TRUE(DateConverter::is_tm_ymd_equal(
+            table_result_set->get_value<std::tm>(7), today));
+        ASSERT_EQ(
+            table_result_set->get_value<common::String*>(8)->compare(text_str),
+            0);  // TEXT (added)
+    }
+
+    // 6. Clean up resources
+    reader.destroy_query_data_set(table_result_set);
+    ASSERT_EQ(reader.close(), common::E_OK);
+    delete[] literal;
+    delete[] text_literal;
+    delete table_schema;
 }
\ No newline at end of file
diff --git a/cpp/test/writer/time_chunk_writer_test.cc b/cpp/test/writer/time_chunk_writer_test.cc
index 1f1910c..df79ab2 100644
--- a/cpp/test/writer/time_chunk_writer_test.cc
+++ b/cpp/test/writer/time_chunk_writer_test.cc
@@ -52,11 +52,6 @@
     writer.destroy();
 }
 
-TEST_F(TimeChunkWriterTest, WriteBoolean) {
-    EXPECT_EQ(time_chunk_writer.write(true), E_OK);
-    EXPECT_EQ(time_chunk_writer.write(false), E_OK);
-}
-
 TEST_F(TimeChunkWriterTest, WriteLargeDataSet) {
     for (int i = 0; i < 10000; ++i) {
         time_chunk_writer.write(i);
diff --git a/cpp/test/writer/tsfile_writer_test.cc b/cpp/test/writer/tsfile_writer_test.cc
index 6f58f0c..be8be1f 100644
--- a/cpp/test/writer/tsfile_writer_test.cc
+++ b/cpp/test/writer/tsfile_writer_test.cc
@@ -78,7 +78,8 @@
     }
 
     static std::string field_to_string(storage::Field *value) {
-        if (value->type_ == common::TEXT) {
+        if (value->type_ == common::TEXT || value->type_ == STRING ||
+            value->type_ == BLOB) {
             return std::string(value->value_.sval_);
         } else {
             std::stringstream ss;
@@ -90,6 +91,7 @@
                     ss << value->value_.ival_;
                     break;
                 case common::INT64:
+                case common::TIMESTAMP:
                     ss << value->value_.lval_;
                     break;
                 case common::FLOAT:
@@ -122,10 +124,10 @@
     common::TSEncoding encoding = common::TSEncoding::PLAIN;
     common::CompressionType compression_type =
         common::CompressionType::UNCOMPRESSED;
-    std::vector<std::string> measurement_names = {"level", "num", "bools",
-                                                  "double", "id"};
-    std::vector<common::TSDataType> data_types = {FLOAT, INT64, BOOLEAN, DOUBLE,
-                                                  STRING};
+    std::vector<std::string> measurement_names = {
+        "level", "num", "bools", "double", "id", "ts", "text", "blob", "date"};
+    std::vector<common::TSDataType> data_types = {
+        FLOAT, INT64, BOOLEAN, DOUBLE, STRING, TIMESTAMP, TEXT, BLOB, DATE};
     for (uint32_t i = 0; i < measurement_names.size(); i++) {
         std::string measurement_name = measurement_names[i];
         common::TSDataType data_type = data_types[i];
@@ -139,6 +141,13 @@
     std::strcpy(literal, "device_id");
     String literal_str(literal, std::strlen("device_id"));
 
+    std::time_t now = std::time(nullptr);
+    std::tm *local_time = std::localtime(&now);
+    std::tm today = {};
+    today.tm_year = local_time->tm_year;
+    today.tm_mon = local_time->tm_mon;
+    today.tm_mday = local_time->tm_mday;
+
     int row_num = 100000;
     for (int i = 0; i < row_num; ++i) {
         TsRecord record(1622505600000 + i * 100, device_name);
@@ -161,6 +170,17 @@
                 case STRING:
                     record.add_point(measurement_name, literal_str);
                     break;
+                case TEXT:
+                    record.add_point(measurement_name, literal_str);
+                    break;
+                case BLOB:
+                    record.add_point(measurement_name, literal_str);
+                    break;
+                case TIMESTAMP:
+                    record.add_point(measurement_name, (int64_t)415412);
+                    break;
+                case DATE:
+                    record.add_point(measurement_name, today);
                 default:
                     break;
             }
@@ -198,6 +218,11 @@
         ASSERT_EQ(qds->get_value<bool>(4), true);
         ASSERT_EQ(qds->get_value<double>(5), (double)2.0);
         ASSERT_EQ(qds->get_value<common::String *>(6)->compare(literal_str), 0);
+        ASSERT_EQ(qds->get_value<int64_t>(7), (int64_t)415412);
+        ASSERT_EQ(qds->get_value<common::String *>(8)->compare(literal_str), 0);
+        ASSERT_EQ(qds->get_value<common::String *>(9)->compare(literal_str), 0);
+        ASSERT_TRUE(
+            DateConverter::is_tm_ymd_equal(qds->get_value<std::tm>(10), today));
 
         ASSERT_EQ(qds->get_value<float>(measurement_names[0]), (float)1.0);
         ASSERT_EQ(qds->get_value<int64_t>(measurement_names[1]),
@@ -207,11 +232,21 @@
         ASSERT_EQ(qds->get_value<common::String *>(measurement_names[4])
                       ->compare(literal_str),
                   0);
+        ASSERT_EQ(qds->get_value<int64_t>(measurement_names[5]),
+                  (int64_t)415412);
+        ASSERT_EQ(qds->get_value<common::String *>(measurement_names[6])
+                      ->compare(literal_str),
+                  0);
+        ASSERT_EQ(qds->get_value<common::String *>(measurement_names[7])
+                      ->compare(literal_str),
+                  0);
+        ASSERT_TRUE(DateConverter::is_tm_ymd_equal(
+            qds->get_value<std::tm>(measurement_names[8]), today));
     } while (true);
     delete[] literal;
     EXPECT_EQ(cur_record_num, row_num);
     reader.destroy_query_data_set(qds);
-    reader.close();
+    ASSERT_EQ(reader.close(), E_OK);
 }
 
 TEST_F(TsFileWriterTest, RegisterTimeSeries) {
@@ -304,6 +339,7 @@
 }
 
 TEST_F(TsFileWriterTest, WriteMultipleTabletsMultiFlush) {
+    common::config_set_max_degree_of_index_node(3);
     const int device_num = 20;
     const int measurement_num = 20;
     int max_tablet_num = 100;
@@ -386,6 +422,7 @@
 }
 
 TEST_F(TsFileWriterTest, WriteMultipleTabletsAlignedMultiFlush) {
+    common::config_set_max_degree_of_index_node(3);
     const int device_num = 20;
     const int measurement_num = 20;
     int max_tablet_num = 100;
@@ -706,7 +743,7 @@
     std::string device_name = "device";
     std::vector<std::string> measurement_names;
     for (int i = 0; i < measurement_num; i++) {
-        measurement_names.emplace_back("temperature" + to_string(i));
+        measurement_names.emplace_back("temperature" + std::to_string(i));
     }
 
     common::TSDataType data_type = common::TSDataType::INT32;
@@ -735,7 +772,7 @@
 
     std::vector<storage::Path> select_list;
     for (int i = 0; i < measurement_num; ++i) {
-        std::string measurement_name = "temperature" + to_string(i);
+        std::string measurement_name = "temperature" + std::to_string(i);
         storage::Path path(device_name, measurement_name);
         select_list.push_back(path);
     }
@@ -776,7 +813,7 @@
     std::string device_name = "device";
     std::vector<std::string> measurement_names;
     for (int i = 0; i < measurement_num; i++) {
-        measurement_names.emplace_back("temperature" + to_string(i));
+        measurement_names.emplace_back("temperature" + std::to_string(i));
     }
 
     common::TSDataType data_type = common::TSDataType::INT32;
@@ -805,7 +842,7 @@
 
     std::vector<storage::Path> select_list;
     for (int i = 0; i < measurement_num; ++i) {
-        std::string measurement_name = "temperature" + to_string(i);
+        std::string measurement_name = "temperature" + std::to_string(i);
         storage::Path path(device_name, measurement_name);
         select_list.push_back(path);
     }
@@ -846,7 +883,7 @@
     std::string device_name = "device";
     std::vector<std::string> measurement_names;
     for (int i = 0; i < measurement_num; i++) {
-        measurement_names.emplace_back("temperature" + to_string(i));
+        measurement_names.emplace_back("temperature" + std::to_string(i));
     }
 
     common::TSDataType data_type = common::TSDataType::INT32;
@@ -874,7 +911,7 @@
 
     std::vector<storage::Path> select_list;
     for (int i = 0; i < measurement_num; ++i) {
-        std::string measurement_name = "temperature" + to_string(i);
+        std::string measurement_name = "temperature" + std::to_string(i);
         storage::Path path(device_name, measurement_name);
         select_list.push_back(path);
     }
diff --git a/doap_tsfile.rdf b/doap_tsfile.rdf
index 1b12ab2..f74f45b 100644
--- a/doap_tsfile.rdf
+++ b/doap_tsfile.rdf
@@ -37,6 +37,7 @@
 
     <programming-language>Java</programming-language>
     <programming-language>C++</programming-language>
+    <programming-language>C</programming-language>
     <programming-language>Python</programming-language>
 
     <category rdf:resource="https://projects.apache.org/category/big-data" />
@@ -44,6 +45,23 @@
     <category rdf:resource="http://projects.apache.org/category/java"/>
     <category rdf:resource="http://projects.apache.org/category/python"/>
     <category rdf:resource="http://projects.apache.org/category/c++"/>
+    <category rdf:resource="http://projects.apache.org/category/c"/>
+
+    <release>
+      <Version>
+        <name>Apache TsFile</name>
+        <created>2025-08-18</created>
+        <revision>2.1.1</revision>
+      </Version>
+    </release>
+
+    <release>
+      <Version>
+        <name>Apache TsFile</name>
+        <created>2025-07-11</created>
+        <revision>2.1.0</revision>
+      </Version>
+    </release>
 
     <release>
       <Version>
diff --git a/docs/pnpm-lock.yaml b/docs/pnpm-lock.yaml
new file mode 100644
index 0000000..cbd478c
--- /dev/null
+++ b/docs/pnpm-lock.yaml
@@ -0,0 +1,3737 @@
+lockfileVersion: '6.0'
+
+settings:
+  autoInstallPeers: true
+  excludeLinksFromLockfile: false
+
+devDependencies:
+  '@docsearch/css':
+    specifier: ^3.5.2
+    version: 3.6.0
+  '@docsearch/js':
+    specifier: ^3.5.2
+    version: 3.6.0(@algolia/client-search@4.23.3)(search-insights@2.14.0)
+  '@docsearch/react':
+    specifier: ^3.5.2
+    version: 3.6.0(@algolia/client-search@4.23.3)(search-insights@2.14.0)
+  '@vuepress/client':
+    specifier: 2.0.0-rc.0
+    version: 2.0.0-rc.0
+  '@vuepress/plugin-docsearch':
+    specifier: 2.0.0-rc.0
+    version: 2.0.0-rc.0(@algolia/client-search@4.23.3)(search-insights@2.14.0)
+  '@vuepress/shared':
+    specifier: 2.0.0-rc.0
+    version: 2.0.0-rc.0
+  '@vuepress/utils':
+    specifier: 2.0.0-rc.0
+    version: 2.0.0-rc.0
+  gh-pages:
+    specifier: ^6.1.1
+    version: 6.1.1
+  ts-debounce:
+    specifier: ^4.0.0
+    version: 4.0.0
+  vue:
+    specifier: ^3.4.3
+    version: 3.4.30
+  vue-router:
+    specifier: ^4.2.5
+    version: 4.4.0(vue@3.4.30)
+  vuepress:
+    specifier: 2.0.0-rc.0
+    version: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+  vuepress-theme-hope:
+    specifier: 2.0.0-rc.11
+    version: 2.0.0-rc.11(@vuepress/plugin-docsearch@2.0.0-rc.0)(markdown-it@13.0.2)(vuepress@2.0.0-rc.0)
+
+packages:
+
+  /@algolia/autocomplete-core@1.9.3(@algolia/client-search@4.23.3)(algoliasearch@4.23.3)(search-insights@2.14.0):
+    resolution: {integrity: sha512-009HdfugtGCdC4JdXUbVJClA0q0zh24yyePn+KUGk3rP7j8FEe/m5Yo/z65gn6nP/cM39PxpzqKrL7A6fP6PPw==}
+    dependencies:
+      '@algolia/autocomplete-plugin-algolia-insights': 1.9.3(@algolia/client-search@4.23.3)(algoliasearch@4.23.3)(search-insights@2.14.0)
+      '@algolia/autocomplete-shared': 1.9.3(@algolia/client-search@4.23.3)(algoliasearch@4.23.3)
+    transitivePeerDependencies:
+      - '@algolia/client-search'
+      - algoliasearch
+      - search-insights
+    dev: true
+
+  /@algolia/autocomplete-plugin-algolia-insights@1.9.3(@algolia/client-search@4.23.3)(algoliasearch@4.23.3)(search-insights@2.14.0):
+    resolution: {integrity: sha512-a/yTUkcO/Vyy+JffmAnTWbr4/90cLzw+CC3bRbhnULr/EM0fGNvM13oQQ14f2moLMcVDyAx/leczLlAOovhSZg==}
+    peerDependencies:
+      search-insights: '>= 1 < 3'
+    dependencies:
+      '@algolia/autocomplete-shared': 1.9.3(@algolia/client-search@4.23.3)(algoliasearch@4.23.3)
+      search-insights: 2.14.0
+    transitivePeerDependencies:
+      - '@algolia/client-search'
+      - algoliasearch
+    dev: true
+
+  /@algolia/autocomplete-preset-algolia@1.9.3(@algolia/client-search@4.23.3)(algoliasearch@4.23.3):
+    resolution: {integrity: sha512-d4qlt6YmrLMYy95n5TB52wtNDr6EgAIPH81dvvvW8UmuWRgxEtY0NJiPwl/h95JtG2vmRM804M0DSwMCNZlzRA==}
+    peerDependencies:
+      '@algolia/client-search': '>= 4.9.1 < 6'
+      algoliasearch: '>= 4.9.1 < 6'
+    dependencies:
+      '@algolia/autocomplete-shared': 1.9.3(@algolia/client-search@4.23.3)(algoliasearch@4.23.3)
+      '@algolia/client-search': 4.23.3
+      algoliasearch: 4.23.3
+    dev: true
+
+  /@algolia/autocomplete-shared@1.9.3(@algolia/client-search@4.23.3)(algoliasearch@4.23.3):
+    resolution: {integrity: sha512-Wnm9E4Ye6Rl6sTTqjoymD+l8DjSTHsHboVRYrKgEt8Q7UHm9nYbqhN/i0fhUYA3OAEH7WA8x3jfpnmJm3rKvaQ==}
+    peerDependencies:
+      '@algolia/client-search': '>= 4.9.1 < 6'
+      algoliasearch: '>= 4.9.1 < 6'
+    dependencies:
+      '@algolia/client-search': 4.23.3
+      algoliasearch: 4.23.3
+    dev: true
+
+  /@algolia/cache-browser-local-storage@4.23.3:
+    resolution: {integrity: sha512-vRHXYCpPlTDE7i6UOy2xE03zHF2C8MEFjPN2v7fRbqVpcOvAUQK81x3Kc21xyb5aSIpYCjWCZbYZuz8Glyzyyg==}
+    dependencies:
+      '@algolia/cache-common': 4.23.3
+    dev: true
+
+  /@algolia/cache-common@4.23.3:
+    resolution: {integrity: sha512-h9XcNI6lxYStaw32pHpB1TMm0RuxphF+Ik4o7tcQiodEdpKK+wKufY6QXtba7t3k8eseirEMVB83uFFF3Nu54A==}
+    dev: true
+
+  /@algolia/cache-in-memory@4.23.3:
+    resolution: {integrity: sha512-yvpbuUXg/+0rbcagxNT7un0eo3czx2Uf0y4eiR4z4SD7SiptwYTpbuS0IHxcLHG3lq22ukx1T6Kjtk/rT+mqNg==}
+    dependencies:
+      '@algolia/cache-common': 4.23.3
+    dev: true
+
+  /@algolia/client-account@4.23.3:
+    resolution: {integrity: sha512-hpa6S5d7iQmretHHF40QGq6hz0anWEHGlULcTIT9tbUssWUriN9AUXIFQ8Ei4w9azD0hc1rUok9/DeQQobhQMA==}
+    dependencies:
+      '@algolia/client-common': 4.23.3
+      '@algolia/client-search': 4.23.3
+      '@algolia/transporter': 4.23.3
+    dev: true
+
+  /@algolia/client-analytics@4.23.3:
+    resolution: {integrity: sha512-LBsEARGS9cj8VkTAVEZphjxTjMVCci+zIIiRhpFun9jGDUlS1XmhCW7CTrnaWeIuCQS/2iPyRqSy1nXPjcBLRA==}
+    dependencies:
+      '@algolia/client-common': 4.23.3
+      '@algolia/client-search': 4.23.3
+      '@algolia/requester-common': 4.23.3
+      '@algolia/transporter': 4.23.3
+    dev: true
+
+  /@algolia/client-common@4.23.3:
+    resolution: {integrity: sha512-l6EiPxdAlg8CYhroqS5ybfIczsGUIAC47slLPOMDeKSVXYG1n0qGiz4RjAHLw2aD0xzh2EXZ7aRguPfz7UKDKw==}
+    dependencies:
+      '@algolia/requester-common': 4.23.3
+      '@algolia/transporter': 4.23.3
+    dev: true
+
+  /@algolia/client-personalization@4.23.3:
+    resolution: {integrity: sha512-3E3yF3Ocr1tB/xOZiuC3doHQBQ2zu2MPTYZ0d4lpfWads2WTKG7ZzmGnsHmm63RflvDeLK/UVx7j2b3QuwKQ2g==}
+    dependencies:
+      '@algolia/client-common': 4.23.3
+      '@algolia/requester-common': 4.23.3
+      '@algolia/transporter': 4.23.3
+    dev: true
+
+  /@algolia/client-search@4.23.3:
+    resolution: {integrity: sha512-P4VAKFHqU0wx9O+q29Q8YVuaowaZ5EM77rxfmGnkHUJggh28useXQdopokgwMeYw2XUht49WX5RcTQ40rZIabw==}
+    dependencies:
+      '@algolia/client-common': 4.23.3
+      '@algolia/requester-common': 4.23.3
+      '@algolia/transporter': 4.23.3
+    dev: true
+
+  /@algolia/logger-common@4.23.3:
+    resolution: {integrity: sha512-y9kBtmJwiZ9ZZ+1Ek66P0M68mHQzKRxkW5kAAXYN/rdzgDN0d2COsViEFufxJ0pb45K4FRcfC7+33YB4BLrZ+g==}
+    dev: true
+
+  /@algolia/logger-console@4.23.3:
+    resolution: {integrity: sha512-8xoiseoWDKuCVnWP8jHthgaeobDLolh00KJAdMe9XPrWPuf1by732jSpgy2BlsLTaT9m32pHI8CRfrOqQzHv3A==}
+    dependencies:
+      '@algolia/logger-common': 4.23.3
+    dev: true
+
+  /@algolia/recommend@4.23.3:
+    resolution: {integrity: sha512-9fK4nXZF0bFkdcLBRDexsnGzVmu4TSYZqxdpgBW2tEyfuSSY54D4qSRkLmNkrrz4YFvdh2GM1gA8vSsnZPR73w==}
+    dependencies:
+      '@algolia/cache-browser-local-storage': 4.23.3
+      '@algolia/cache-common': 4.23.3
+      '@algolia/cache-in-memory': 4.23.3
+      '@algolia/client-common': 4.23.3
+      '@algolia/client-search': 4.23.3
+      '@algolia/logger-common': 4.23.3
+      '@algolia/logger-console': 4.23.3
+      '@algolia/requester-browser-xhr': 4.23.3
+      '@algolia/requester-common': 4.23.3
+      '@algolia/requester-node-http': 4.23.3
+      '@algolia/transporter': 4.23.3
+    dev: true
+
+  /@algolia/requester-browser-xhr@4.23.3:
+    resolution: {integrity: sha512-jDWGIQ96BhXbmONAQsasIpTYWslyjkiGu0Quydjlowe+ciqySpiDUrJHERIRfELE5+wFc7hc1Q5hqjGoV7yghw==}
+    dependencies:
+      '@algolia/requester-common': 4.23.3
+    dev: true
+
+  /@algolia/requester-common@4.23.3:
+    resolution: {integrity: sha512-xloIdr/bedtYEGcXCiF2muajyvRhwop4cMZo+K2qzNht0CMzlRkm8YsDdj5IaBhshqfgmBb3rTg4sL4/PpvLYw==}
+    dev: true
+
+  /@algolia/requester-node-http@4.23.3:
+    resolution: {integrity: sha512-zgu++8Uj03IWDEJM3fuNl34s746JnZOWn1Uz5taV1dFyJhVM/kTNw9Ik7YJWiUNHJQXcaD8IXD1eCb0nq/aByA==}
+    dependencies:
+      '@algolia/requester-common': 4.23.3
+    dev: true
+
+  /@algolia/transporter@4.23.3:
+    resolution: {integrity: sha512-Wjl5gttqnf/gQKJA+dafnD0Y6Yw97yvfY8R9h0dQltX1GXTgNs1zWgvtWW0tHl1EgMdhAyw189uWiZMnL3QebQ==}
+    dependencies:
+      '@algolia/cache-common': 4.23.3
+      '@algolia/logger-common': 4.23.3
+      '@algolia/requester-common': 4.23.3
+    dev: true
+
+  /@babel/helper-string-parser@7.24.7:
+    resolution: {integrity: sha512-7MbVt6xrwFQbunH2DNQsAP5sTGxfqQtErvBIvIMi6EQnbgUOuVYanvREcmFrOPhoXBrTtjhhP+lW+o5UfK+tDg==}
+    engines: {node: '>=6.9.0'}
+    dev: true
+
+  /@babel/helper-validator-identifier@7.24.7:
+    resolution: {integrity: sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==}
+    engines: {node: '>=6.9.0'}
+    dev: true
+
+  /@babel/parser@7.24.7:
+    resolution: {integrity: sha512-9uUYRm6OqQrCqQdG1iCBwBPZgN8ciDBro2nIOFaiRz1/BCxaI7CNvQbDHvsArAC7Tw9Hda/B3U+6ui9u4HWXPw==}
+    engines: {node: '>=6.0.0'}
+    hasBin: true
+    dependencies:
+      '@babel/types': 7.24.7
+    dev: true
+
+  /@babel/types@7.24.7:
+    resolution: {integrity: sha512-XEFXSlxiG5td2EJRe8vOmRbaXVgfcBlszKujvVmWIK/UpywWljQCfzAv3RQCGujWQ1RD4YYWEAqDXfuJiy8f5Q==}
+    engines: {node: '>=6.9.0'}
+    dependencies:
+      '@babel/helper-string-parser': 7.24.7
+      '@babel/helper-validator-identifier': 7.24.7
+      to-fast-properties: 2.0.0
+    dev: true
+
+  /@docsearch/css@3.6.0:
+    resolution: {integrity: sha512-+sbxb71sWre+PwDK7X2T8+bhS6clcVMLwBPznX45Qu6opJcgRjAp7gYSDzVFp187J+feSj5dNBN1mJoi6ckkUQ==}
+    dev: true
+
+  /@docsearch/js@3.6.0(@algolia/client-search@4.23.3)(search-insights@2.14.0):
+    resolution: {integrity: sha512-QujhqINEElrkIfKwyyyTfbsfMAYCkylInLYMRqHy7PHc8xTBQCow73tlo/Kc7oIwBrCLf0P3YhjlOeV4v8hevQ==}
+    dependencies:
+      '@docsearch/react': 3.6.0(@algolia/client-search@4.23.3)(search-insights@2.14.0)
+      preact: 10.22.0
+    transitivePeerDependencies:
+      - '@algolia/client-search'
+      - '@types/react'
+      - react
+      - react-dom
+      - search-insights
+    dev: true
+
+  /@docsearch/react@3.6.0(@algolia/client-search@4.23.3)(search-insights@2.14.0):
+    resolution: {integrity: sha512-HUFut4ztcVNmqy9gp/wxNbC7pTOHhgVVkHVGCACTuLhUKUhKAF9KYHJtMiLUJxEqiFLQiuri1fWF8zqwM/cu1w==}
+    peerDependencies:
+      '@types/react': '>= 16.8.0 < 19.0.0'
+      react: '>= 16.8.0 < 19.0.0'
+      react-dom: '>= 16.8.0 < 19.0.0'
+      search-insights: '>= 1 < 3'
+    peerDependenciesMeta:
+      '@types/react':
+        optional: true
+      react:
+        optional: true
+      react-dom:
+        optional: true
+      search-insights:
+        optional: true
+    dependencies:
+      '@algolia/autocomplete-core': 1.9.3(@algolia/client-search@4.23.3)(algoliasearch@4.23.3)(search-insights@2.14.0)
+      '@algolia/autocomplete-preset-algolia': 1.9.3(@algolia/client-search@4.23.3)(algoliasearch@4.23.3)
+      '@docsearch/css': 3.6.0
+      algoliasearch: 4.23.3
+      search-insights: 2.14.0
+    transitivePeerDependencies:
+      - '@algolia/client-search'
+    dev: true
+
+  /@esbuild/aix-ppc64@0.19.12:
+    resolution: {integrity: sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==}
+    engines: {node: '>=12'}
+    cpu: [ppc64]
+    os: [aix]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/android-arm64@0.19.12:
+    resolution: {integrity: sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/android-arm@0.19.12:
+    resolution: {integrity: sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w==}
+    engines: {node: '>=12'}
+    cpu: [arm]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/android-x64@0.19.12:
+    resolution: {integrity: sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/darwin-arm64@0.19.12:
+    resolution: {integrity: sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [darwin]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/darwin-x64@0.19.12:
+    resolution: {integrity: sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [darwin]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/freebsd-arm64@0.19.12:
+    resolution: {integrity: sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [freebsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/freebsd-x64@0.19.12:
+    resolution: {integrity: sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [freebsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-arm64@0.19.12:
+    resolution: {integrity: sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-arm@0.19.12:
+    resolution: {integrity: sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w==}
+    engines: {node: '>=12'}
+    cpu: [arm]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-ia32@0.19.12:
+    resolution: {integrity: sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA==}
+    engines: {node: '>=12'}
+    cpu: [ia32]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-loong64@0.19.12:
+    resolution: {integrity: sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA==}
+    engines: {node: '>=12'}
+    cpu: [loong64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-mips64el@0.19.12:
+    resolution: {integrity: sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w==}
+    engines: {node: '>=12'}
+    cpu: [mips64el]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-ppc64@0.19.12:
+    resolution: {integrity: sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg==}
+    engines: {node: '>=12'}
+    cpu: [ppc64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-riscv64@0.19.12:
+    resolution: {integrity: sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg==}
+    engines: {node: '>=12'}
+    cpu: [riscv64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-s390x@0.19.12:
+    resolution: {integrity: sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg==}
+    engines: {node: '>=12'}
+    cpu: [s390x]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-x64@0.19.12:
+    resolution: {integrity: sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/netbsd-x64@0.19.12:
+    resolution: {integrity: sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [netbsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/openbsd-x64@0.19.12:
+    resolution: {integrity: sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [openbsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/sunos-x64@0.19.12:
+    resolution: {integrity: sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [sunos]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/win32-arm64@0.19.12:
+    resolution: {integrity: sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/win32-ia32@0.19.12:
+    resolution: {integrity: sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ==}
+    engines: {node: '>=12'}
+    cpu: [ia32]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/win32-x64@0.19.12:
+    resolution: {integrity: sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@jridgewell/sourcemap-codec@1.4.15:
+    resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==}
+    dev: true
+
+  /@lit-labs/ssr-dom-shim@1.2.0:
+    resolution: {integrity: sha512-yWJKmpGE6lUURKAaIltoPIE/wrbY3TEkqQt+X0m+7fQNnAv0keydnYvbiJFP1PnMhizmIWRWOG5KLhYyc/xl+g==}
+    dev: true
+
+  /@lit/reactive-element@2.0.4:
+    resolution: {integrity: sha512-GFn91inaUa2oHLak8awSIigYz0cU0Payr1rcFsrkf5OJ5eSPxElyZfKh0f2p9FsTiZWXQdWGJeXZICEfXXYSXQ==}
+    dependencies:
+      '@lit-labs/ssr-dom-shim': 1.2.0
+    dev: true
+
+  /@mdit-vue/plugin-component@1.0.0:
+    resolution: {integrity: sha512-ZXsJwxkG5yyTHARIYbR74cT4AZ0SfMokFFjiHYCbypHIeYWgJhso4+CZ8+3V9EWFG3EHlGoKNGqKp9chHnqntQ==}
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit-vue/plugin-frontmatter@1.0.0:
+    resolution: {integrity: sha512-MMA7Ny+YPZA7eDOY1t4E+rKuEWO39mzDdP/M68fKdXJU6VfcGkPr7gnpnJfW2QBJ5qIvMrK/3lDAA2JBy5TfpA==}
+    dependencies:
+      '@mdit-vue/types': 1.0.0
+      '@types/markdown-it': 13.0.8
+      gray-matter: 4.0.3
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit-vue/plugin-headers@1.0.0:
+    resolution: {integrity: sha512-0rK/iKy6x13d/Pp5XxdLBshTD0+YjZvtHIaIV+JO+/H2WnOv7oaRgs48G5d44z3XJVUE2u6fNnTlI169fef0/A==}
+    dependencies:
+      '@mdit-vue/shared': 1.0.0
+      '@mdit-vue/types': 1.0.0
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit-vue/plugin-sfc@1.0.0:
+    resolution: {integrity: sha512-agMUe0fY4YHxsZivSvplBwRwrFvsIf/JNUJCAYq1+2Sg9+2hviTBZwjZDxYqHDHOVLtiNr+wuo68tE24mAx3AQ==}
+    dependencies:
+      '@mdit-vue/types': 1.0.0
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit-vue/plugin-title@1.0.0:
+    resolution: {integrity: sha512-8yC60fCZ95xcJ/cvJH4Lv43Rs4k+33UGyKrRWj5J8TNyMwUyGcwur0XyPM+ffJH4/Bzq4myZLsj/TTFSkXRxvw==}
+    dependencies:
+      '@mdit-vue/shared': 1.0.0
+      '@mdit-vue/types': 1.0.0
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit-vue/plugin-toc@1.0.0:
+    resolution: {integrity: sha512-WN8blfX0X/5Nolic0ClDWP7eVo9IB+U4g0jbycX3lolIZX5Bai1UpsD3QYZr5VVsPbQJMKMGvTrCEtCNTGvyWQ==}
+    dependencies:
+      '@mdit-vue/shared': 1.0.0
+      '@mdit-vue/types': 1.0.0
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit-vue/shared@1.0.0:
+    resolution: {integrity: sha512-nbYBfmEi+pR2Lm0Z6TMVX2/iBjfr/kGEsHW8CC0rQw+3+sG5dY6VG094HuFAkiAmmvZx9DZZb+7ZMWp9vkwCRw==}
+    dependencies:
+      '@mdit-vue/types': 1.0.0
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit-vue/types@1.0.0:
+    resolution: {integrity: sha512-xeF5+sHLzRNF7plbksywKCph4qli20l72of2fMlZQQ7RECvXYrRkE9+bjRFQCyULC7B8ydUYbpbkux5xJlVWyw==}
+    dev: true
+
+  /@mdit/plugin-alert@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-Z+/bHBDniCz/Q+TMa3M6f47KG4tUKvJI8FHXhDwgbKLzDLn045ZBHcOTeqvuWrrjCIKBEo4fVAlYszYcehxmfg==}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-align@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-NYGrsnX1c84dtY1tugDVX71zxxfcGSIjWANzQ0/od4B0+N31eXkq3SXdAjCXOWUUHSa6phfvtok+x4V9ExQwHA==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@mdit/plugin-container': 0.7.6(markdown-it@13.0.2)
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-attrs@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-vTSsqZUXglZRQ4cLNou6N2cTLudHS01Tir+HPtrWkN+VB4VAIRlCKV3hf0vzKRM+HR3DSe+vQMrWzfnQdD0o4A==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-container@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-egEYoJLkar4hxrBfFf6tO3IfoLzeUHYChGRI3FA2fxiMwwyclPvBMMQTtG2rY3sjPy497Z86QiqYwjRM0qA8Mw==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-demo@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-pybxLVpIKYlxt7fgjHK4Zd6f/IMCjACB6eZmIIlow0eOnijRnikHdRDVkoyDOxcFuQvP0yvT6LWcQlhHF+CGwQ==}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-figure@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-ysH5O3WWuDrfxLWQO4wYXYGdo8oi+EbMQFgbaSTxhoPKTFf3HTovCn3RANn7qATBqmGP26zf0hY55mX9BFUu5A==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-footnote@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-fQvbi3+/Hm+k4GJptXCc5i0n9/+ZpQx4yqpjOTGGSUz1k22XU07YaK3wpL9w+nPAfcBfzD06D72Y+eDIG5wi8w==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-img-lazyload@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-BBgxlXCOO7+9TMHJXtUyvi48jlH4ZYQtC9lNfgu1rvmq56iblZ7etOzg61/CmXmLgMHNvCbb/Kx7gRLkOBhv6A==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-img-mark@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-IhbkmTLbxr+c04ZQcSV2IFq8n1GeRFO08qQagkONUijI1O2G/RE2y6QvCVBUy0gB1Hc8c1i9vEyK1F0e4GpheQ==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-img-size@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-GZGEbuR0l4+ENXDG1Y2HsrNO2JuEmjI6PPe7pgsolk5yveWOiqzcEV4ushrWnpvwNal3Acuj+dpFDmZFOtm42g==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-include@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-4Pu0SdD2IOONLor/3GtQOYOBDv1xZ1LWByXLhht0kqEioX5D+aDZ5KD5MZ0AxH0xoNUGMynMEn/ak4/D9LLI7Q==}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+      upath: 2.0.1
+    dev: true
+
+  /@mdit/plugin-katex@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-ZbPIks1SlgzS5R6YOL5s0J5vk20ROl5hF+Yj9o6CiEljodK2ln0ewpX36qM6POVrS/cu6E4Lx4X0fc5JTI6nAQ==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      katex: ^0.16.9
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      katex:
+        optional: true
+      markdown-it:
+        optional: true
+    dependencies:
+      '@mdit/plugin-tex': 0.7.6(markdown-it@13.0.2)
+      '@types/katex': 0.16.7
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-mark@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-D9gv+ebVORa4r69t0JeJg3NW9gCR/NOGYa1DKYDEQOJoZ1WwjZVuhdxd3wCpLKtqDLnyHTFWd3cnV/HHrmca3w==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-mathjax@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-Sn3nYbkPftAF5tgemIJ1aClxgU4NnElHPV5PIgkMxwusaSsN4RB+GZ1NmTKrPUqVoIZi0pO1oYxy08TlIHcrPg==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+      mathjax-full: ^3.2.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+      mathjax-full:
+        optional: true
+    dependencies:
+      '@mdit/plugin-tex': 0.7.6(markdown-it@13.0.2)
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+      upath: 2.0.1
+    dev: true
+
+  /@mdit/plugin-stylize@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-dhhYxo4KdnB66g1080qeuz8X/80q3h4Cpmwnwi2rCbQfl29Nv26H5tz5pp15NKQfdfVgrZnXXLsDskJeg5IcaQ==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-sub@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-jo60gUC2KwnG4SqtyrbyI16hOcxb+Y1LwUKxXKfZRbZbcPcOfrzjE8q7XEq4MhmU51mfqY6EvCoB0yo49Zh2QA==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-sup@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-bCR1DxNuPAyYOaTtl3VkrRc7dMsJjrqt9HnM9T1ZiprW08uciaT37fLXF7DeUHWhGpcklI9dFtaU5cQkjUosTg==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-tab@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-kWwWmhv+PeeA9aC5InGyY4eJeIsCDDMhi1tbzyKW/wJ1eeFp+rpWpSfWwUe6QyTy/ZOhQ1nGXz0/uXI4xWz4Xw==}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-tasklist@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-ZsPHqQv/Cd9TUG3JfmrPOMRFR/SOG3/menWTz2kwE1HtJ1CUfBmoCRtfq2Sm7Rlqg/P6ZfWAd1t9bOwGkxD/5w==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-tex@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-TZfIJp98n7NI0TxDSxPs4Il1fqyJ/1GE3v8UZHA1DbfAdiVMxno/Dun9381ZxoJYibl+dnX3Kz7Ej3BDOceGGA==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@mdit/plugin-uml@0.7.6(markdown-it@13.0.2):
+    resolution: {integrity: sha512-P/aRntMnMfvtAEcLCkg6vhzNFEidj6jIno7VXr3HZNLitonr9ihnksM6jgrqG1rrMo4okBWUGFFGqsR8hHJk6g==}
+    engines: {node: '>= 18'}
+    peerDependencies:
+      markdown-it: ^13.0.2
+    peerDependenciesMeta:
+      markdown-it:
+        optional: true
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /@nodelib/fs.scandir@2.1.5:
+    resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==}
+    engines: {node: '>= 8'}
+    dependencies:
+      '@nodelib/fs.stat': 2.0.5
+      run-parallel: 1.2.0
+    dev: true
+
+  /@nodelib/fs.stat@2.0.5:
+    resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==}
+    engines: {node: '>= 8'}
+    dev: true
+
+  /@nodelib/fs.walk@1.2.8:
+    resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==}
+    engines: {node: '>= 8'}
+    dependencies:
+      '@nodelib/fs.scandir': 2.1.5
+      fastq: 1.17.1
+    dev: true
+
+  /@rollup/rollup-android-arm-eabi@4.18.0:
+    resolution: {integrity: sha512-Tya6xypR10giZV1XzxmH5wr25VcZSncG0pZIjfePT0OVBvqNEurzValetGNarVrGiq66EBVAFn15iYX4w6FKgQ==}
+    cpu: [arm]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-android-arm64@4.18.0:
+    resolution: {integrity: sha512-avCea0RAP03lTsDhEyfy+hpfr85KfyTctMADqHVhLAF3MlIkq83CP8UfAHUssgXTYd+6er6PaAhx/QGv4L1EiA==}
+    cpu: [arm64]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-darwin-arm64@4.18.0:
+    resolution: {integrity: sha512-IWfdwU7KDSm07Ty0PuA/W2JYoZ4iTj3TUQjkVsO/6U+4I1jN5lcR71ZEvRh52sDOERdnNhhHU57UITXz5jC1/w==}
+    cpu: [arm64]
+    os: [darwin]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-darwin-x64@4.18.0:
+    resolution: {integrity: sha512-n2LMsUz7Ynu7DoQrSQkBf8iNrjOGyPLrdSg802vk6XT3FtsgX6JbE8IHRvposskFm9SNxzkLYGSq9QdpLYpRNA==}
+    cpu: [x64]
+    os: [darwin]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-linux-arm-gnueabihf@4.18.0:
+    resolution: {integrity: sha512-C/zbRYRXFjWvz9Z4haRxcTdnkPt1BtCkz+7RtBSuNmKzMzp3ZxdM28Mpccn6pt28/UWUCTXa+b0Mx1k3g6NOMA==}
+    cpu: [arm]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-linux-arm-musleabihf@4.18.0:
+    resolution: {integrity: sha512-l3m9ewPgjQSXrUMHg93vt0hYCGnrMOcUpTz6FLtbwljo2HluS4zTXFy2571YQbisTnfTKPZ01u/ukJdQTLGh9A==}
+    cpu: [arm]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-linux-arm64-gnu@4.18.0:
+    resolution: {integrity: sha512-rJ5D47d8WD7J+7STKdCUAgmQk49xuFrRi9pZkWoRD1UeSMakbcepWXPF8ycChBoAqs1pb2wzvbY6Q33WmN2ftw==}
+    cpu: [arm64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-linux-arm64-musl@4.18.0:
+    resolution: {integrity: sha512-be6Yx37b24ZwxQ+wOQXXLZqpq4jTckJhtGlWGZs68TgdKXJgw54lUUoFYrg6Zs/kjzAQwEwYbp8JxZVzZLRepQ==}
+    cpu: [arm64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-linux-powerpc64le-gnu@4.18.0:
+    resolution: {integrity: sha512-hNVMQK+qrA9Todu9+wqrXOHxFiD5YmdEi3paj6vP02Kx1hjd2LLYR2eaN7DsEshg09+9uzWi2W18MJDlG0cxJA==}
+    cpu: [ppc64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-linux-riscv64-gnu@4.18.0:
+    resolution: {integrity: sha512-ROCM7i+m1NfdrsmvwSzoxp9HFtmKGHEqu5NNDiZWQtXLA8S5HBCkVvKAxJ8U+CVctHwV2Gb5VUaK7UAkzhDjlg==}
+    cpu: [riscv64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-linux-s390x-gnu@4.18.0:
+    resolution: {integrity: sha512-0UyyRHyDN42QL+NbqevXIIUnKA47A+45WyasO+y2bGJ1mhQrfrtXUpTxCOrfxCR4esV3/RLYyucGVPiUsO8xjg==}
+    cpu: [s390x]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-linux-x64-gnu@4.18.0:
+    resolution: {integrity: sha512-xuglR2rBVHA5UsI8h8UbX4VJ470PtGCf5Vpswh7p2ukaqBGFTnsfzxUBetoWBWymHMxbIG0Cmx7Y9qDZzr648w==}
+    cpu: [x64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-linux-x64-musl@4.18.0:
+    resolution: {integrity: sha512-LKaqQL9osY/ir2geuLVvRRs+utWUNilzdE90TpyoX0eNqPzWjRm14oMEE+YLve4k/NAqCdPkGYDaDF5Sw+xBfg==}
+    cpu: [x64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-win32-arm64-msvc@4.18.0:
+    resolution: {integrity: sha512-7J6TkZQFGo9qBKH0pk2cEVSRhJbL6MtfWxth7Y5YmZs57Pi+4x6c2dStAUvaQkHQLnEQv1jzBUW43GvZW8OFqA==}
+    cpu: [arm64]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-win32-ia32-msvc@4.18.0:
+    resolution: {integrity: sha512-Txjh+IxBPbkUB9+SXZMpv+b/vnTEtFyfWZgJ6iyCmt2tdx0OF5WhFowLmnh8ENGNpfUlUZkdI//4IEmhwPieNg==}
+    cpu: [ia32]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@rollup/rollup-win32-x64-msvc@4.18.0:
+    resolution: {integrity: sha512-UOo5FdvOL0+eIVTgS4tIdbW+TtnBLWg1YBCcU2KWM7nuNwRz9bksDX1bekJJCpu25N1DVWaCwnT39dVQxzqS8g==}
+    cpu: [x64]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@sindresorhus/merge-streams@2.3.0:
+    resolution: {integrity: sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg==}
+    engines: {node: '>=18'}
+    dev: true
+
+  /@stackblitz/sdk@1.10.0:
+    resolution: {integrity: sha512-IcvE9Xifo2c4/f+yNqjFM/OW5VTBPLed3TxsQ+n8n81Py358IqD5w0IYfFgV5gbDjp2g5H5YK2/Shls/kQNTWQ==}
+    dev: true
+
+  /@types/debug@4.1.12:
+    resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==}
+    dependencies:
+      '@types/ms': 0.7.34
+    dev: true
+
+  /@types/estree@1.0.5:
+    resolution: {integrity: sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==}
+    dev: true
+
+  /@types/fs-extra@11.0.4:
+    resolution: {integrity: sha512-yTbItCNreRooED33qjunPthRcSjERP1r4MqCZc7wv0u2sUkzTFp45tgUfS5+r7FrZPdmCCNflLhVSP/o+SemsQ==}
+    dependencies:
+      '@types/jsonfile': 6.1.4
+      '@types/node': 20.14.8
+    dev: true
+
+  /@types/hash-sum@1.0.2:
+    resolution: {integrity: sha512-UP28RddqY8xcU0SCEp9YKutQICXpaAq9N8U2klqF5hegGha7KzTOL8EdhIIV3bOSGBzjEpN9bU/d+nNZBdJYVw==}
+    dev: true
+
+  /@types/jsonfile@6.1.4:
+    resolution: {integrity: sha512-D5qGUYwjvnNNextdU59/+fI+spnwtTFmyQP0h+PfIOSkNfpU6AOICUOkm4i0OnSk+NyjdPJrxCDro0sJsWlRpQ==}
+    dependencies:
+      '@types/node': 20.14.8
+    dev: true
+
+  /@types/katex@0.16.7:
+    resolution: {integrity: sha512-HMwFiRujE5PjrgwHQ25+bsLJgowjGjm5Z8FVSf0N6PwgJrwxH0QxzHYDcKsTfV3wva0vzrpqMTJS2jXPr5BMEQ==}
+    dev: true
+
+  /@types/linkify-it@3.0.5:
+    resolution: {integrity: sha512-yg6E+u0/+Zjva+buc3EIb+29XEg4wltq7cSmd4Uc2EE/1nUVmxyzpX6gUXD0V8jIrG0r7YeOGVIbYRkxeooCtw==}
+    dev: true
+
+  /@types/markdown-it-emoji@2.0.5:
+    resolution: {integrity: sha512-iJLsmCNpSWKtV6Ia3mLSjcXJPEt7ubGG342z+hGvYx++TpM19oTUrJcI7XjbOqRQ+W2UQ323E7B0eCLwlgT/9g==}
+    dependencies:
+      '@types/markdown-it': 13.0.8
+    dev: true
+
+  /@types/markdown-it@13.0.8:
+    resolution: {integrity: sha512-V+KmpgiipS+zoypeUSS9ojesWtY/0k4XfqcK2fnVrX/qInJhX7rsCxZ/rygiPH2zxlPPrhfuW0I6ddMcWTKLsg==}
+    dependencies:
+      '@types/linkify-it': 3.0.5
+      '@types/mdurl': 1.0.5
+    dev: true
+
+  /@types/mdurl@1.0.5:
+    resolution: {integrity: sha512-6L6VymKTzYSrEf4Nev4Xa1LCHKrlTlYCBMTlQKFuddo1CvQcE52I0mwfOJayueUC7MJuXOeHTcIU683lzd0cUA==}
+    dev: true
+
+  /@types/ms@0.7.34:
+    resolution: {integrity: sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==}
+    dev: true
+
+  /@types/node@17.0.45:
+    resolution: {integrity: sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==}
+    dev: true
+
+  /@types/node@20.14.8:
+    resolution: {integrity: sha512-DO+2/jZinXfROG7j7WKFn/3C6nFwxy2lLpgLjEXJz+0XKphZlTLJ14mo8Vfg8X5BWN6XjyESXq+LcYdT7tR3bA==}
+    dependencies:
+      undici-types: 5.26.5
+    dev: true
+
+  /@types/sax@1.2.7:
+    resolution: {integrity: sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==}
+    dependencies:
+      '@types/node': 17.0.45
+    dev: true
+
+  /@types/trusted-types@2.0.7:
+    resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==}
+    dev: true
+
+  /@types/web-bluetooth@0.0.20:
+    resolution: {integrity: sha512-g9gZnnXVq7gM7v3tJCWV/qw7w+KeOlSHAhgF9RytFyifW6AF61hdT2ucrYhPq9hLs5JIryeupHV3qGk95dH9ow==}
+    dev: true
+
+  /@vitejs/plugin-vue@4.6.2(vite@5.0.13)(vue@3.4.30):
+    resolution: {integrity: sha512-kqf7SGFoG+80aZG6Pf+gsZIVvGSCKE98JbiWqcCV9cThtg91Jav0yvYFC9Zb+jKetNGF6ZKeoaxgZfND21fWKw==}
+    engines: {node: ^14.18.0 || >=16.0.0}
+    peerDependencies:
+      vite: ^4.0.0 || ^5.0.0
+      vue: ^3.2.25
+    dependencies:
+      vite: 5.0.13
+      vue: 3.4.30
+    dev: true
+
+  /@vue/compiler-core@3.4.30:
+    resolution: {integrity: sha512-ZL8y4Xxdh8O6PSwfdZ1IpQ24PjTAieOz3jXb/MDTfDtANcKBMxg1KLm6OX2jofsaQGYfIVzd3BAG22i56/cF1w==}
+    dependencies:
+      '@babel/parser': 7.24.7
+      '@vue/shared': 3.4.30
+      entities: 4.5.0
+      estree-walker: 2.0.2
+      source-map-js: 1.2.0
+    dev: true
+
+  /@vue/compiler-dom@3.4.30:
+    resolution: {integrity: sha512-+16Sd8lYr5j/owCbr9dowcNfrHd+pz+w2/b5Lt26Oz/kB90C9yNbxQ3bYOvt7rI2bxk0nqda39hVcwDFw85c2Q==}
+    dependencies:
+      '@vue/compiler-core': 3.4.30
+      '@vue/shared': 3.4.30
+    dev: true
+
+  /@vue/compiler-sfc@3.4.30:
+    resolution: {integrity: sha512-8vElKklHn/UY8+FgUFlQrYAPbtiSB2zcgeRKW7HkpSRn/JjMRmZvuOtwDx036D1aqKNSTtXkWRfqx53Qb+HmMg==}
+    dependencies:
+      '@babel/parser': 7.24.7
+      '@vue/compiler-core': 3.4.30
+      '@vue/compiler-dom': 3.4.30
+      '@vue/compiler-ssr': 3.4.30
+      '@vue/shared': 3.4.30
+      estree-walker: 2.0.2
+      magic-string: 0.30.10
+      postcss: 8.4.38
+      source-map-js: 1.2.0
+    dev: true
+
+  /@vue/compiler-ssr@3.4.30:
+    resolution: {integrity: sha512-ZJ56YZGXJDd6jky4mmM0rNaNP6kIbQu9LTKZDhcpddGe/3QIalB1WHHmZ6iZfFNyj5mSypTa4+qDJa5VIuxMSg==}
+    dependencies:
+      '@vue/compiler-dom': 3.4.30
+      '@vue/shared': 3.4.30
+    dev: true
+
+  /@vue/devtools-api@6.6.3:
+    resolution: {integrity: sha512-0MiMsFma/HqA6g3KLKn+AGpL1kgKhFWszC9U29NfpWK5LE7bjeXxySWJrOJ77hBz+TBrBQ7o4QJqbPbqbs8rJw==}
+    dev: true
+
+  /@vue/reactivity@3.4.30:
+    resolution: {integrity: sha512-bVJurnCe3LS0JII8PPoAA63Zd2MBzcKrEzwdQl92eHCcxtIbxD2fhNwJpa+KkM3Y/A4T5FUnmdhgKwOf6BfbcA==}
+    dependencies:
+      '@vue/shared': 3.4.30
+    dev: true
+
+  /@vue/runtime-core@3.4.30:
+    resolution: {integrity: sha512-qaFEbnNpGz+tlnkaualomogzN8vBLkgzK55uuWjYXbYn039eOBZrWxyXWq/7qh9Bz2FPifZqGjVDl/FXiq9L2g==}
+    dependencies:
+      '@vue/reactivity': 3.4.30
+      '@vue/shared': 3.4.30
+    dev: true
+
+  /@vue/runtime-dom@3.4.30:
+    resolution: {integrity: sha512-tV6B4YiZRj5QsaJgw2THCy5C1H+2UeywO9tqgWEc21tn85qHEERndHN/CxlyXvSBFrpmlexCIdnqPuR9RM9thw==}
+    dependencies:
+      '@vue/reactivity': 3.4.30
+      '@vue/runtime-core': 3.4.30
+      '@vue/shared': 3.4.30
+      csstype: 3.1.3
+    dev: true
+
+  /@vue/server-renderer@3.4.30(vue@3.4.30):
+    resolution: {integrity: sha512-TBD3eqR1DeDc0cMrXS/vEs/PWzq1uXxnvjoqQuDGFIEHFIwuDTX/KWAQKIBjyMWLFHEeTDGYVsYci85z2UbTDg==}
+    peerDependencies:
+      vue: 3.4.30
+    dependencies:
+      '@vue/compiler-ssr': 3.4.30
+      '@vue/shared': 3.4.30
+      vue: 3.4.30
+    dev: true
+
+  /@vue/shared@3.4.30:
+    resolution: {integrity: sha512-CLg+f8RQCHQnKvuHY9adMsMaQOcqclh6Z5V9TaoMgy0ut0tz848joZ7/CYFFyF/yZ5i2yaw7Fn498C+CNZVHIg==}
+    dev: true
+
+  /@vuepress/bundler-vite@2.0.0-rc.0:
+    resolution: {integrity: sha512-rX8S8IYpqqlJfNPstS/joorpxXx/4WuE7+gDM31i2HUrxOKGZVzq8ZsRRRU2UdoTwHZSd3LpUS4sMtxE5xLK1A==}
+    dependencies:
+      '@vitejs/plugin-vue': 4.6.2(vite@5.0.13)(vue@3.4.30)
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      autoprefixer: 10.4.19(postcss@8.4.38)
+      connect-history-api-fallback: 2.0.0
+      postcss: 8.4.38
+      postcss-load-config: 4.0.2(postcss@8.4.38)
+      rollup: 4.18.0
+      vite: 5.0.13
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+    transitivePeerDependencies:
+      - '@types/node'
+      - '@vue/composition-api'
+      - less
+      - lightningcss
+      - sass
+      - stylus
+      - sugarss
+      - supports-color
+      - terser
+      - ts-node
+      - typescript
+    dev: true
+
+  /@vuepress/cli@2.0.0-rc.0:
+    resolution: {integrity: sha512-XWSIFO9iOR7N4O2lXIwS5vZuLjU9WU/aGAtmhMWEMxrdMx7TQaJbgrfpTUEbHMf+cPI1DXBbUbtmkqIvtfOV0w==}
+    hasBin: true
+    dependencies:
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      cac: 6.7.14
+      chokidar: 3.6.0
+      envinfo: 7.13.0
+      esbuild: 0.19.12
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/client@2.0.0-rc.0:
+    resolution: {integrity: sha512-TwQx8hJgYONYxX+QltZ2aw9O5Ym6SKelfiUduuIRb555B1gece/jSVap3H/ZwyBhpgJMtG4+/Mrmf8nlDSHjvw==}
+    dependencies:
+      '@vue/devtools-api': 6.6.3
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vueuse/core': 10.11.0(vue@3.4.30)
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - typescript
+    dev: true
+
+  /@vuepress/core@2.0.0-rc.0:
+    resolution: {integrity: sha512-uoOaZP1MdxZYJIAJcRcmYKKeCIVnxZeOuLMOOB9CPuAKSalT1RvJ1lztw6RX3q9SPnlqtSZPQXDncPAZivw4pA==}
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/markdown': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      vue: 3.4.30
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/markdown@2.0.0-rc.0:
+    resolution: {integrity: sha512-USmqdKKMT6ZFHYRztTjKUlO8qgGfnEygMAAq4AzC/uYXiEfrbMBLAWJhteyGS56P3rGLj0OPAhksE681bX/wOg==}
+    dependencies:
+      '@mdit-vue/plugin-component': 1.0.0
+      '@mdit-vue/plugin-frontmatter': 1.0.0
+      '@mdit-vue/plugin-headers': 1.0.0
+      '@mdit-vue/plugin-sfc': 1.0.0
+      '@mdit-vue/plugin-title': 1.0.0
+      '@mdit-vue/plugin-toc': 1.0.0
+      '@mdit-vue/shared': 1.0.0
+      '@mdit-vue/types': 1.0.0
+      '@types/markdown-it': 13.0.8
+      '@types/markdown-it-emoji': 2.0.5
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      markdown-it: 13.0.2
+      markdown-it-anchor: 8.6.7(@types/markdown-it@13.0.8)(markdown-it@13.0.2)
+      markdown-it-emoji: 2.0.2
+      mdurl: 1.0.1
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /@vuepress/plugin-active-header-links@2.0.0-rc.0:
+    resolution: {integrity: sha512-UJdXLYNGL5Wjy5YGY8M2QgqT75bZ95EHebbqGi8twBdIJE9O+bM+dPJyYtAk2PIVqFORiw3Hj+PchsNSxdn9+g==}
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      ts-debounce: 4.0.0
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/plugin-back-to-top@2.0.0-rc.0:
+    resolution: {integrity: sha512-6GPfuzV5lkAnR00BxRUhqMXwMWt741alkq2R6bln4N8BneSOwEpX/7vi19MGf232aKdS/Va4pF5p0/nJ8Sed/g==}
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      ts-debounce: 4.0.0
+      vue: 3.4.30
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/plugin-container@2.0.0-rc.0:
+    resolution: {integrity: sha512-b7vrLN11YE7qiUDPfA3N9P7Z8fupe9Wbcr9KAE/bmfZ9VT4d6kzpVyoU7XHi99XngitsmnkaXP4aBvBF1c2AnA==}
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/markdown': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      markdown-it: 13.0.2
+      markdown-it-container: 3.0.0
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/plugin-docsearch@2.0.0-rc.0(@algolia/client-search@4.23.3)(search-insights@2.14.0):
+    resolution: {integrity: sha512-bFbb+RxNyoLVbojv3Fh3UNfMmx9tszdae5ni9nG2xa05giCRwGKT0wFG3Q6n0a9kIQ6V7z3PjCj9x1k4SALPEA==}
+    dependencies:
+      '@docsearch/css': 3.6.0
+      '@docsearch/js': 3.6.0(@algolia/client-search@4.23.3)(search-insights@2.14.0)
+      '@docsearch/react': 3.6.0(@algolia/client-search@4.23.3)(search-insights@2.14.0)
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      '@vueuse/core': 10.11.0(vue@3.4.30)
+      ts-debounce: 4.0.0
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+    transitivePeerDependencies:
+      - '@algolia/client-search'
+      - '@types/react'
+      - '@vue/composition-api'
+      - react
+      - react-dom
+      - search-insights
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/plugin-external-link-icon@2.0.0-rc.0:
+    resolution: {integrity: sha512-o8bk0oIlj/BkKc02mq91XLDloq1VOz/8iNcRwKAeqBE6svXzdYiyoTGet0J/4iPuAetsCn75S57W6RioDJHMnQ==}
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/markdown': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      vue: 3.4.30
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/plugin-git@2.0.0-rc.0:
+    resolution: {integrity: sha512-r7UF77vZxaYeJQLygzodKv+15z3/dTLuGp4VcYO21W6BlJZvd4u9zqgiV7A//bZQvK4+3Hprylr0G3KgXqMewA==}
+    dependencies:
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      execa: 8.0.1
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/plugin-medium-zoom@2.0.0-rc.0:
+    resolution: {integrity: sha512-peU1lYKsmKikIe/0pkJuHzD/k6xW2TuqdvKVhV4I//aOE1WxsREKJ4ACcldmoIsnysoDydAUqKT6xDPGyDsH2g==}
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      medium-zoom: 1.1.0
+      vue: 3.4.30
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/plugin-nprogress@2.0.0-rc.0:
+    resolution: {integrity: sha512-rI+eK0Pg1KiZE+7hGmDUeSbgdWCid8Vnw0hFKNmjinDzGVmx4m03M6qfvclsI0SryH+lR7itZGLaR4gbTlrz/w==}
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/plugin-palette@2.0.0-rc.0:
+    resolution: {integrity: sha512-wW70SCp3/K7s1lln5YQsBGTog2WXaQv5piva5zhXcQ47YGf4aAJpThDa5C/ot4HhkPOKn8Iz5s0ckxXZzW8DIg==}
+    dependencies:
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      chokidar: 3.6.0
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/plugin-prismjs@2.0.0-rc.0:
+    resolution: {integrity: sha512-c5WRI7+FhVjdbymOKQ8F2KY/Bnv7aQtWScVk8vCMUimNi7v7Wff/A/i3KSFNz/tge3LxiAeH/Dc2WS/OnQXwCg==}
+    dependencies:
+      '@vuepress/core': 2.0.0-rc.0
+      prismjs: 1.29.0
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/plugin-theme-data@2.0.0-rc.0:
+    resolution: {integrity: sha512-FXY3/Ml+rM6gNKvwdBF6vKAcwnSvtXCzKgQwJAw3ppQTKUkLcbOxqM+h4d8bzHWAAvdnEvQFug5uEZgWllBQbA==}
+    dependencies:
+      '@vue/devtools-api': 6.6.3
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      vue: 3.4.30
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/shared@2.0.0-rc.0:
+    resolution: {integrity: sha512-ikdSfjRv5LGM1iv4HHwF9P6gqTjaFCXKPK+hzlkHFHNZO1GLqk7/BPc4F51tAG1s8TcLhUZc+54LrfgS7PkXXA==}
+    dependencies:
+      '@mdit-vue/types': 1.0.0
+      '@vue/shared': 3.4.30
+    dev: true
+
+  /@vuepress/theme-default@2.0.0-rc.0:
+    resolution: {integrity: sha512-I8Y08evDmMuD1jh3NftPpFFSlCWOizQDJLjN7EQwcg7jiAP4A7c2REo6nBN2EmP24Mi7UrRM+RnytHR5V+pElA==}
+    peerDependencies:
+      sass-loader: ^13.3.2
+    peerDependenciesMeta:
+      sass-loader:
+        optional: true
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/plugin-active-header-links': 2.0.0-rc.0
+      '@vuepress/plugin-back-to-top': 2.0.0-rc.0
+      '@vuepress/plugin-container': 2.0.0-rc.0
+      '@vuepress/plugin-external-link-icon': 2.0.0-rc.0
+      '@vuepress/plugin-git': 2.0.0-rc.0
+      '@vuepress/plugin-medium-zoom': 2.0.0-rc.0
+      '@vuepress/plugin-nprogress': 2.0.0-rc.0
+      '@vuepress/plugin-palette': 2.0.0-rc.0
+      '@vuepress/plugin-prismjs': 2.0.0-rc.0
+      '@vuepress/plugin-theme-data': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      '@vueuse/core': 10.11.0(vue@3.4.30)
+      sass: 1.77.6
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /@vuepress/utils@2.0.0-rc.0:
+    resolution: {integrity: sha512-Q1ay/woClDHcW0Qe91KsnHoupdNN0tp/vhjvVLuAYxlv/1Obii7hz9WFcajyyGEhmsYxdvG2sGmcxFA02tuKkw==}
+    dependencies:
+      '@types/debug': 4.1.12
+      '@types/fs-extra': 11.0.4
+      '@types/hash-sum': 1.0.2
+      '@vuepress/shared': 2.0.0-rc.0
+      debug: 4.3.5
+      fs-extra: 11.2.0
+      globby: 14.0.1
+      hash-sum: 2.0.0
+      ora: 7.0.1
+      picocolors: 1.0.1
+      upath: 2.0.1
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /@vueuse/core@10.11.0(vue@3.4.30):
+    resolution: {integrity: sha512-x3sD4Mkm7PJ+pcq3HX8PLPBadXCAlSDR/waK87dz0gQE+qJnaaFhc/dZVfJz+IUYzTMVGum2QlR7ImiJQN4s6g==}
+    dependencies:
+      '@types/web-bluetooth': 0.0.20
+      '@vueuse/metadata': 10.11.0
+      '@vueuse/shared': 10.11.0(vue@3.4.30)
+      vue-demi: 0.14.8(vue@3.4.30)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - vue
+    dev: true
+
+  /@vueuse/metadata@10.11.0:
+    resolution: {integrity: sha512-kQX7l6l8dVWNqlqyN3ePW3KmjCQO3ZMgXuBMddIu83CmucrsBfXlH+JoviYyRBws/yLTQO8g3Pbw+bdIoVm4oQ==}
+    dev: true
+
+  /@vueuse/shared@10.11.0(vue@3.4.30):
+    resolution: {integrity: sha512-fyNoIXEq3PfX1L3NkNhtVQUSRtqYwJtJg+Bp9rIzculIZWHTkKSysujrOk2J+NrRulLTQH9+3gGSfYLWSEWU1A==}
+    dependencies:
+      vue-demi: 0.14.8(vue@3.4.30)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - vue
+    dev: true
+
+  /algoliasearch@4.23.3:
+    resolution: {integrity: sha512-Le/3YgNvjW9zxIQMRhUHuhiUjAlKY/zsdZpfq4dlLqg6mEm0nL6yk+7f2hDOtLpxsgE4jSzDmvHL7nXdBp5feg==}
+    dependencies:
+      '@algolia/cache-browser-local-storage': 4.23.3
+      '@algolia/cache-common': 4.23.3
+      '@algolia/cache-in-memory': 4.23.3
+      '@algolia/client-account': 4.23.3
+      '@algolia/client-analytics': 4.23.3
+      '@algolia/client-common': 4.23.3
+      '@algolia/client-personalization': 4.23.3
+      '@algolia/client-search': 4.23.3
+      '@algolia/logger-common': 4.23.3
+      '@algolia/logger-console': 4.23.3
+      '@algolia/recommend': 4.23.3
+      '@algolia/requester-browser-xhr': 4.23.3
+      '@algolia/requester-common': 4.23.3
+      '@algolia/requester-node-http': 4.23.3
+      '@algolia/transporter': 4.23.3
+    dev: true
+
+  /ansi-regex@5.0.1:
+    resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /ansi-regex@6.0.1:
+    resolution: {integrity: sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /ansi-styles@4.3.0:
+    resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==}
+    engines: {node: '>=8'}
+    dependencies:
+      color-convert: 2.0.1
+    dev: true
+
+  /anymatch@3.1.3:
+    resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==}
+    engines: {node: '>= 8'}
+    dependencies:
+      normalize-path: 3.0.0
+      picomatch: 2.3.1
+    dev: true
+
+  /arg@5.0.2:
+    resolution: {integrity: sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==}
+    dev: true
+
+  /argparse@1.0.10:
+    resolution: {integrity: sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==}
+    dependencies:
+      sprintf-js: 1.0.3
+    dev: true
+
+  /argparse@2.0.1:
+    resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
+    dev: true
+
+  /array-union@1.0.2:
+    resolution: {integrity: sha512-Dxr6QJj/RdU/hCaBjOfxW+q6lyuVE6JFWIrAUpuOOhoJJoQ99cUn3igRaHVB5P9WrgFVN0FfArM3x0cueOU8ng==}
+    engines: {node: '>=0.10.0'}
+    dependencies:
+      array-uniq: 1.0.3
+    dev: true
+
+  /array-uniq@1.0.3:
+    resolution: {integrity: sha512-MNha4BWQ6JbwhFhj03YK552f7cb3AzoE8SzeljgChvL1dl3IcvggXVz1DilzySZkCja+CXuZbdW7yATchWn8/Q==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /async@3.2.5:
+    resolution: {integrity: sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==}
+    dev: true
+
+  /autoprefixer@10.4.19(postcss@8.4.38):
+    resolution: {integrity: sha512-BaENR2+zBZ8xXhM4pUaKUxlVdxZ0EZhjvbopwnXmxRUfqDmwSpC2lAi/QXvx7NRdPCo1WKEcEF6mV64si1z4Ew==}
+    engines: {node: ^10 || ^12 || >=14}
+    hasBin: true
+    peerDependencies:
+      postcss: ^8.1.0
+    dependencies:
+      browserslist: 4.23.1
+      caniuse-lite: 1.0.30001636
+      fraction.js: 4.3.7
+      normalize-range: 0.1.2
+      picocolors: 1.0.1
+      postcss: 8.4.38
+      postcss-value-parser: 4.2.0
+    dev: true
+
+  /balanced-match@1.0.2:
+    resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
+    dev: true
+
+  /balloon-css@1.2.0:
+    resolution: {integrity: sha512-urXwkHgwp6GsXVF+it01485Z2Cj4pnW02ICnM0TemOlkKmCNnDLmyy+ZZiRXBpwldUXO+aRNr7Hdia4CBvXJ5A==}
+    dev: true
+
+  /base64-js@1.5.1:
+    resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==}
+    dev: true
+
+  /bcrypt-ts@5.0.2:
+    resolution: {integrity: sha512-gDwQ5784AkkfhHACh3jGcg1hUubyZyeq9AtVd5gXkcyHGVOC+mORjRIHSj+fHfqwY5vxwyBLXQpcfk8MpK0ROg==}
+    engines: {node: '>=18'}
+    dev: true
+
+  /binary-extensions@2.3.0:
+    resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /bl@5.1.0:
+    resolution: {integrity: sha512-tv1ZJHLfTDnXE6tMHv73YgSJaWR2AFuPwMntBe7XL/GBFHnT0CLnsHMogfk5+GzCDC5ZWarSCYaIGATZt9dNsQ==}
+    dependencies:
+      buffer: 6.0.3
+      inherits: 2.0.4
+      readable-stream: 3.6.2
+    dev: true
+
+  /boolbase@1.0.0:
+    resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==}
+    dev: true
+
+  /brace-expansion@1.1.11:
+    resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==}
+    dependencies:
+      balanced-match: 1.0.2
+      concat-map: 0.0.1
+    dev: true
+
+  /braces@3.0.3:
+    resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==}
+    engines: {node: '>=8'}
+    dependencies:
+      fill-range: 7.1.1
+    dev: true
+
+  /browserslist@4.23.1:
+    resolution: {integrity: sha512-TUfofFo/KsK/bWZ9TWQ5O26tsWW4Uhmt8IYklbnUa70udB6P2wA7w7o4PY4muaEPBQaAX+CEnmmIA41NVHtPVw==}
+    engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
+    hasBin: true
+    dependencies:
+      caniuse-lite: 1.0.30001636
+      electron-to-chromium: 1.4.811
+      node-releases: 2.0.14
+      update-browserslist-db: 1.0.16(browserslist@4.23.1)
+    dev: true
+
+  /buffer@6.0.3:
+    resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==}
+    dependencies:
+      base64-js: 1.5.1
+      ieee754: 1.2.1
+    dev: true
+
+  /cac@6.7.14:
+    resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /camelcase@5.3.1:
+    resolution: {integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /caniuse-lite@1.0.30001636:
+    resolution: {integrity: sha512-bMg2vmr8XBsbL6Lr0UHXy/21m84FTxDLWn2FSqMd5PrlbMxwJlQnC2YWYxVgp66PZE+BBNF2jYQUBKCo1FDeZg==}
+    dev: true
+
+  /chalk@5.3.0:
+    resolution: {integrity: sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==}
+    engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
+    dev: true
+
+  /cheerio-select@2.1.0:
+    resolution: {integrity: sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==}
+    dependencies:
+      boolbase: 1.0.0
+      css-select: 5.1.0
+      css-what: 6.1.0
+      domelementtype: 2.3.0
+      domhandler: 5.0.3
+      domutils: 3.1.0
+    dev: true
+
+  /cheerio@1.0.0-rc.12:
+    resolution: {integrity: sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==}
+    engines: {node: '>= 6'}
+    dependencies:
+      cheerio-select: 2.1.0
+      dom-serializer: 2.0.0
+      domhandler: 5.0.3
+      domutils: 3.1.0
+      htmlparser2: 8.0.2
+      parse5: 7.1.2
+      parse5-htmlparser2-tree-adapter: 7.0.0
+    dev: true
+
+  /chokidar@3.6.0:
+    resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==}
+    engines: {node: '>= 8.10.0'}
+    dependencies:
+      anymatch: 3.1.3
+      braces: 3.0.3
+      glob-parent: 5.1.2
+      is-binary-path: 2.1.0
+      is-glob: 4.0.3
+      normalize-path: 3.0.0
+      readdirp: 3.6.0
+    optionalDependencies:
+      fsevents: 2.3.3
+    dev: true
+
+  /cli-cursor@4.0.0:
+    resolution: {integrity: sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    dependencies:
+      restore-cursor: 4.0.0
+    dev: true
+
+  /cli-spinners@2.9.2:
+    resolution: {integrity: sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /cliui@6.0.0:
+    resolution: {integrity: sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==}
+    dependencies:
+      string-width: 4.2.3
+      strip-ansi: 6.0.1
+      wrap-ansi: 6.2.0
+    dev: true
+
+  /color-convert@2.0.1:
+    resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==}
+    engines: {node: '>=7.0.0'}
+    dependencies:
+      color-name: 1.1.4
+    dev: true
+
+  /color-name@1.1.4:
+    resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==}
+    dev: true
+
+  /commander@11.1.0:
+    resolution: {integrity: sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==}
+    engines: {node: '>=16'}
+    dev: true
+
+  /commondir@1.0.1:
+    resolution: {integrity: sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==}
+    dev: true
+
+  /concat-map@0.0.1:
+    resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==}
+    dev: true
+
+  /connect-history-api-fallback@2.0.0:
+    resolution: {integrity: sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==}
+    engines: {node: '>=0.8'}
+    dev: true
+
+  /create-codepen@1.0.1:
+    resolution: {integrity: sha512-XzSWwGCFNeOnNGp3KdCDGaKq4Cp1SvjzpPGQqO0tj1HT3BhksLdl/xQ2ZEY4+0MQ3m1I/K1Fvpm4GGMthtamyA==}
+    dev: true
+
+  /cross-spawn@7.0.3:
+    resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==}
+    engines: {node: '>= 8'}
+    dependencies:
+      path-key: 3.1.1
+      shebang-command: 2.0.0
+      which: 2.0.2
+    dev: true
+
+  /css-select@5.1.0:
+    resolution: {integrity: sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==}
+    dependencies:
+      boolbase: 1.0.0
+      css-what: 6.1.0
+      domhandler: 5.0.3
+      domutils: 3.1.0
+      nth-check: 2.1.1
+    dev: true
+
+  /css-what@6.1.0:
+    resolution: {integrity: sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==}
+    engines: {node: '>= 6'}
+    dev: true
+
+  /csstype@3.1.3:
+    resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==}
+    dev: true
+
+  /dayjs@1.11.11:
+    resolution: {integrity: sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==}
+    dev: true
+
+  /debug@4.3.5:
+    resolution: {integrity: sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==}
+    engines: {node: '>=6.0'}
+    peerDependencies:
+      supports-color: '*'
+    peerDependenciesMeta:
+      supports-color:
+        optional: true
+    dependencies:
+      ms: 2.1.2
+    dev: true
+
+  /decamelize@1.2.0:
+    resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /dijkstrajs@1.0.3:
+    resolution: {integrity: sha512-qiSlmBq9+BCdCA/L46dw8Uy93mloxsPSbwnm5yrKn2vMPiy8KyAskTF6zuV/j5BMsmOGZDPs7KjU+mjb670kfA==}
+    dev: true
+
+  /dom-serializer@2.0.0:
+    resolution: {integrity: sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==}
+    dependencies:
+      domelementtype: 2.3.0
+      domhandler: 5.0.3
+      entities: 4.5.0
+    dev: true
+
+  /domelementtype@2.3.0:
+    resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==}
+    dev: true
+
+  /domhandler@5.0.3:
+    resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==}
+    engines: {node: '>= 4'}
+    dependencies:
+      domelementtype: 2.3.0
+    dev: true
+
+  /domutils@3.1.0:
+    resolution: {integrity: sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==}
+    dependencies:
+      dom-serializer: 2.0.0
+      domelementtype: 2.3.0
+      domhandler: 5.0.3
+    dev: true
+
+  /eastasianwidth@0.2.0:
+    resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==}
+    dev: true
+
+  /electron-to-chromium@1.4.811:
+    resolution: {integrity: sha512-CDyzcJ5XW78SHzsIOdn27z8J4ist8eaFLhdto2hSMSJQgsiwvbv2fbizcKUICryw1Wii1TI/FEkvzvJsR3awrA==}
+    dev: true
+
+  /email-addresses@5.0.0:
+    resolution: {integrity: sha512-4OIPYlA6JXqtVn8zpHpGiI7vE6EQOAg16aGnDMIAlZVinnoZ8208tW1hAbjWydgN/4PLTT9q+O1K6AH/vALJGw==}
+    dev: true
+
+  /emoji-regex@10.3.0:
+    resolution: {integrity: sha512-QpLs9D9v9kArv4lfDEgg1X/gN5XLnf/A6l9cs8SPZLRZR3ZkY9+kwIQTxm+fsSej5UMYGE8fdoaZVIBlqG0XTw==}
+    dev: true
+
+  /emoji-regex@8.0.0:
+    resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==}
+    dev: true
+
+  /encode-utf8@1.0.3:
+    resolution: {integrity: sha512-ucAnuBEhUK4boH2HjVYG5Q2mQyPorvv0u/ocS+zhdw0S8AlHYY+GOFhP1Gio5z4icpP2ivFSvhtFjQi8+T9ppw==}
+    dev: true
+
+  /entities@3.0.1:
+    resolution: {integrity: sha512-WiyBqoomrwMdFG1e0kqvASYfnlb0lp8M5o5Fw2OFq1hNZxxcNk8Ik0Xm7LxzBhuidnZB/UtBqVCgUz3kBOP51Q==}
+    engines: {node: '>=0.12'}
+    dev: true
+
+  /entities@4.5.0:
+    resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==}
+    engines: {node: '>=0.12'}
+    dev: true
+
+  /envinfo@7.13.0:
+    resolution: {integrity: sha512-cvcaMr7KqXVh4nyzGTVqTum+gAiL265x5jUWQIDLq//zOGbW+gSW/C+OWLleY/rs9Qole6AZLMXPbtIFQbqu+Q==}
+    engines: {node: '>=4'}
+    hasBin: true
+    dev: true
+
+  /esbuild@0.19.12:
+    resolution: {integrity: sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==}
+    engines: {node: '>=12'}
+    hasBin: true
+    requiresBuild: true
+    optionalDependencies:
+      '@esbuild/aix-ppc64': 0.19.12
+      '@esbuild/android-arm': 0.19.12
+      '@esbuild/android-arm64': 0.19.12
+      '@esbuild/android-x64': 0.19.12
+      '@esbuild/darwin-arm64': 0.19.12
+      '@esbuild/darwin-x64': 0.19.12
+      '@esbuild/freebsd-arm64': 0.19.12
+      '@esbuild/freebsd-x64': 0.19.12
+      '@esbuild/linux-arm': 0.19.12
+      '@esbuild/linux-arm64': 0.19.12
+      '@esbuild/linux-ia32': 0.19.12
+      '@esbuild/linux-loong64': 0.19.12
+      '@esbuild/linux-mips64el': 0.19.12
+      '@esbuild/linux-ppc64': 0.19.12
+      '@esbuild/linux-riscv64': 0.19.12
+      '@esbuild/linux-s390x': 0.19.12
+      '@esbuild/linux-x64': 0.19.12
+      '@esbuild/netbsd-x64': 0.19.12
+      '@esbuild/openbsd-x64': 0.19.12
+      '@esbuild/sunos-x64': 0.19.12
+      '@esbuild/win32-arm64': 0.19.12
+      '@esbuild/win32-ia32': 0.19.12
+      '@esbuild/win32-x64': 0.19.12
+    dev: true
+
+  /escalade@3.1.2:
+    resolution: {integrity: sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /escape-string-regexp@1.0.5:
+    resolution: {integrity: sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==}
+    engines: {node: '>=0.8.0'}
+    dev: true
+
+  /esprima@4.0.1:
+    resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==}
+    engines: {node: '>=4'}
+    hasBin: true
+    dev: true
+
+  /estree-walker@2.0.2:
+    resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==}
+    dev: true
+
+  /execa@8.0.1:
+    resolution: {integrity: sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==}
+    engines: {node: '>=16.17'}
+    dependencies:
+      cross-spawn: 7.0.3
+      get-stream: 8.0.1
+      human-signals: 5.0.0
+      is-stream: 3.0.0
+      merge-stream: 2.0.0
+      npm-run-path: 5.3.0
+      onetime: 6.0.0
+      signal-exit: 4.1.0
+      strip-final-newline: 3.0.0
+    dev: true
+
+  /extend-shallow@2.0.1:
+    resolution: {integrity: sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==}
+    engines: {node: '>=0.10.0'}
+    dependencies:
+      is-extendable: 0.1.1
+    dev: true
+
+  /fast-glob@3.3.2:
+    resolution: {integrity: sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==}
+    engines: {node: '>=8.6.0'}
+    dependencies:
+      '@nodelib/fs.stat': 2.0.5
+      '@nodelib/fs.walk': 1.2.8
+      glob-parent: 5.1.2
+      merge2: 1.4.1
+      micromatch: 4.0.7
+    dev: true
+
+  /fastq@1.17.1:
+    resolution: {integrity: sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==}
+    dependencies:
+      reusify: 1.0.4
+    dev: true
+
+  /fflate@0.8.2:
+    resolution: {integrity: sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==}
+    dev: true
+
+  /filename-reserved-regex@2.0.0:
+    resolution: {integrity: sha512-lc1bnsSr4L4Bdif8Xb/qrtokGbq5zlsms/CYH8PP+WtCkGNF65DPiQY8vG3SakEdRn8Dlnm+gW/qWKKjS5sZzQ==}
+    engines: {node: '>=4'}
+    dev: true
+
+  /filenamify@4.3.0:
+    resolution: {integrity: sha512-hcFKyUG57yWGAzu1CMt/dPzYZuv+jAJUT85bL8mrXvNe6hWj6yEHEc4EdcgiA6Z3oi1/9wXJdZPXF2dZNgwgOg==}
+    engines: {node: '>=8'}
+    dependencies:
+      filename-reserved-regex: 2.0.0
+      strip-outer: 1.0.1
+      trim-repeated: 1.0.0
+    dev: true
+
+  /fill-range@7.1.1:
+    resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==}
+    engines: {node: '>=8'}
+    dependencies:
+      to-regex-range: 5.0.1
+    dev: true
+
+  /find-cache-dir@3.3.2:
+    resolution: {integrity: sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==}
+    engines: {node: '>=8'}
+    dependencies:
+      commondir: 1.0.1
+      make-dir: 3.1.0
+      pkg-dir: 4.2.0
+    dev: true
+
+  /find-up@4.1.0:
+    resolution: {integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==}
+    engines: {node: '>=8'}
+    dependencies:
+      locate-path: 5.0.0
+      path-exists: 4.0.0
+    dev: true
+
+  /fraction.js@4.3.7:
+    resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==}
+    dev: true
+
+  /fs-extra@11.2.0:
+    resolution: {integrity: sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==}
+    engines: {node: '>=14.14'}
+    dependencies:
+      graceful-fs: 4.2.11
+      jsonfile: 6.1.0
+      universalify: 2.0.1
+    dev: true
+
+  /fs.realpath@1.0.0:
+    resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
+    dev: true
+
+  /fsevents@2.3.3:
+    resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
+    engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
+    os: [darwin]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /get-caller-file@2.0.5:
+    resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==}
+    engines: {node: 6.* || 8.* || >= 10.*}
+    dev: true
+
+  /get-stream@8.0.1:
+    resolution: {integrity: sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==}
+    engines: {node: '>=16'}
+    dev: true
+
+  /gh-pages@6.1.1:
+    resolution: {integrity: sha512-upnohfjBwN5hBP9w2dPE7HO5JJTHzSGMV1JrLrHvNuqmjoYHg6TBrCcnEoorjG/e0ejbuvnwyKMdTyM40PEByw==}
+    engines: {node: '>=10'}
+    hasBin: true
+    dependencies:
+      async: 3.2.5
+      commander: 11.1.0
+      email-addresses: 5.0.0
+      filenamify: 4.3.0
+      find-cache-dir: 3.3.2
+      fs-extra: 11.2.0
+      globby: 6.1.0
+    dev: true
+
+  /giscus@1.5.0:
+    resolution: {integrity: sha512-t3LL0qbSO3JXq3uyQeKpF5CegstGfKX/0gI6eDe1cmnI7D56R7j52yLdzw4pdKrg3VnufwCgCM3FDz7G1Qr6lg==}
+    dependencies:
+      lit: 3.1.4
+    dev: true
+
+  /glob-parent@5.1.2:
+    resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==}
+    engines: {node: '>= 6'}
+    dependencies:
+      is-glob: 4.0.3
+    dev: true
+
+  /glob@7.2.3:
+    resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
+    deprecated: Glob versions prior to v9 are no longer supported
+    dependencies:
+      fs.realpath: 1.0.0
+      inflight: 1.0.6
+      inherits: 2.0.4
+      minimatch: 3.1.2
+      once: 1.4.0
+      path-is-absolute: 1.0.1
+    dev: true
+
+  /globby@14.0.1:
+    resolution: {integrity: sha512-jOMLD2Z7MAhyG8aJpNOpmziMOP4rPLcc95oQPKXBazW82z+CEgPFBQvEpRUa1KeIMUJo4Wsm+q6uzO/Q/4BksQ==}
+    engines: {node: '>=18'}
+    dependencies:
+      '@sindresorhus/merge-streams': 2.3.0
+      fast-glob: 3.3.2
+      ignore: 5.3.1
+      path-type: 5.0.0
+      slash: 5.1.0
+      unicorn-magic: 0.1.0
+    dev: true
+
+  /globby@6.1.0:
+    resolution: {integrity: sha512-KVbFv2TQtbzCoxAnfD6JcHZTYCzyliEaaeM/gH8qQdkKr5s0OP9scEgvdcngyk7AVdY6YVW/TJHd+lQ/Df3Daw==}
+    engines: {node: '>=0.10.0'}
+    dependencies:
+      array-union: 1.0.2
+      glob: 7.2.3
+      object-assign: 4.1.1
+      pify: 2.3.0
+      pinkie-promise: 2.0.1
+    dev: true
+
+  /graceful-fs@4.2.11:
+    resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==}
+    dev: true
+
+  /gray-matter@4.0.3:
+    resolution: {integrity: sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==}
+    engines: {node: '>=6.0'}
+    dependencies:
+      js-yaml: 3.14.1
+      kind-of: 6.0.3
+      section-matter: 1.0.0
+      strip-bom-string: 1.0.0
+    dev: true
+
+  /hash-sum@2.0.0:
+    resolution: {integrity: sha512-WdZTbAByD+pHfl/g9QSsBIIwy8IT+EsPiKDs0KNX+zSHhdDLFKdZu0BQHljvO+0QI/BasbMSUa8wYNCZTvhslg==}
+    dev: true
+
+  /htmlparser2@8.0.2:
+    resolution: {integrity: sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==}
+    dependencies:
+      domelementtype: 2.3.0
+      domhandler: 5.0.3
+      domutils: 3.1.0
+      entities: 4.5.0
+    dev: true
+
+  /human-signals@5.0.0:
+    resolution: {integrity: sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==}
+    engines: {node: '>=16.17.0'}
+    dev: true
+
+  /ieee754@1.2.1:
+    resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==}
+    dev: true
+
+  /ignore@5.3.1:
+    resolution: {integrity: sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==}
+    engines: {node: '>= 4'}
+    dev: true
+
+  /immutable@4.3.6:
+    resolution: {integrity: sha512-Ju0+lEMyzMVZarkTn/gqRpdqd5dOPaz1mCZ0SH3JV6iFw81PldE/PEB1hWVEA288HPt4WXW8O7AWxB10M+03QQ==}
+    dev: true
+
+  /inflight@1.0.6:
+    resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==}
+    deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.
+    dependencies:
+      once: 1.4.0
+      wrappy: 1.0.2
+    dev: true
+
+  /inherits@2.0.4:
+    resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==}
+    dev: true
+
+  /is-binary-path@2.1.0:
+    resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==}
+    engines: {node: '>=8'}
+    dependencies:
+      binary-extensions: 2.3.0
+    dev: true
+
+  /is-extendable@0.1.1:
+    resolution: {integrity: sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /is-extglob@2.1.1:
+    resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /is-fullwidth-code-point@3.0.0:
+    resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /is-glob@4.0.3:
+    resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
+    engines: {node: '>=0.10.0'}
+    dependencies:
+      is-extglob: 2.1.1
+    dev: true
+
+  /is-interactive@2.0.0:
+    resolution: {integrity: sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /is-number@7.0.0:
+    resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==}
+    engines: {node: '>=0.12.0'}
+    dev: true
+
+  /is-stream@3.0.0:
+    resolution: {integrity: sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    dev: true
+
+  /is-unicode-supported@1.3.0:
+    resolution: {integrity: sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /isexe@2.0.0:
+    resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==}
+    dev: true
+
+  /js-yaml@3.14.1:
+    resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==}
+    hasBin: true
+    dependencies:
+      argparse: 1.0.10
+      esprima: 4.0.1
+    dev: true
+
+  /js-yaml@4.1.0:
+    resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==}
+    hasBin: true
+    dependencies:
+      argparse: 2.0.1
+    dev: true
+
+  /jsonfile@6.1.0:
+    resolution: {integrity: sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==}
+    dependencies:
+      universalify: 2.0.1
+    optionalDependencies:
+      graceful-fs: 4.2.11
+    dev: true
+
+  /kind-of@6.0.3:
+    resolution: {integrity: sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /lilconfig@3.1.2:
+    resolution: {integrity: sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==}
+    engines: {node: '>=14'}
+    dev: true
+
+  /linkify-it@4.0.1:
+    resolution: {integrity: sha512-C7bfi1UZmoj8+PQx22XyeXCuBlokoyWQL5pWSP+EI6nzRylyThouddufc2c1NDIcP9k5agmN9fLpA7VNJfIiqw==}
+    dependencies:
+      uc.micro: 1.0.6
+    dev: true
+
+  /lit-element@4.0.6:
+    resolution: {integrity: sha512-U4sdJ3CSQip7sLGZ/uJskO5hGiqtlpxndsLr6mt3IQIjheg93UKYeGQjWMRql1s/cXNOaRrCzC2FQwjIwSUqkg==}
+    dependencies:
+      '@lit-labs/ssr-dom-shim': 1.2.0
+      '@lit/reactive-element': 2.0.4
+      lit-html: 3.1.4
+    dev: true
+
+  /lit-html@3.1.4:
+    resolution: {integrity: sha512-yKKO2uVv7zYFHlWMfZmqc+4hkmSbFp8jgjdZY9vvR9jr4J8fH6FUMXhr+ljfELgmjpvlF7Z1SJ5n5/Jeqtc9YA==}
+    dependencies:
+      '@types/trusted-types': 2.0.7
+    dev: true
+
+  /lit@3.1.4:
+    resolution: {integrity: sha512-q6qKnKXHy2g1kjBaNfcoLlgbI3+aSOZ9Q4tiGa9bGYXq5RBXxkVTqTIVmP2VWMp29L4GyvCFm8ZQ2o56eUAMyA==}
+    dependencies:
+      '@lit/reactive-element': 2.0.4
+      lit-element: 4.0.6
+      lit-html: 3.1.4
+    dev: true
+
+  /locate-path@5.0.0:
+    resolution: {integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==}
+    engines: {node: '>=8'}
+    dependencies:
+      p-locate: 4.1.0
+    dev: true
+
+  /log-symbols@5.1.0:
+    resolution: {integrity: sha512-l0x2DvrW294C9uDCoQe1VSU4gf529FkSZ6leBl4TiqZH/e+0R7hSfHQBNut2mNygDgHwvYHfFLn6Oxb3VWj2rA==}
+    engines: {node: '>=12'}
+    dependencies:
+      chalk: 5.3.0
+      is-unicode-supported: 1.3.0
+    dev: true
+
+  /magic-string@0.30.10:
+    resolution: {integrity: sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==}
+    dependencies:
+      '@jridgewell/sourcemap-codec': 1.4.15
+    dev: true
+
+  /make-dir@3.1.0:
+    resolution: {integrity: sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==}
+    engines: {node: '>=8'}
+    dependencies:
+      semver: 6.3.1
+    dev: true
+
+  /markdown-it-anchor@8.6.7(@types/markdown-it@13.0.8)(markdown-it@13.0.2):
+    resolution: {integrity: sha512-FlCHFwNnutLgVTflOYHPW2pPcl2AACqVzExlkGQNsi4CJgqOHN7YTgDd4LuhgN1BFO3TS0vLAruV1Td6dwWPJA==}
+    peerDependencies:
+      '@types/markdown-it': '*'
+      markdown-it: '*'
+    dependencies:
+      '@types/markdown-it': 13.0.8
+      markdown-it: 13.0.2
+    dev: true
+
+  /markdown-it-container@3.0.0:
+    resolution: {integrity: sha512-y6oKTq4BB9OQuY/KLfk/O3ysFhB3IMYoIWhGJEidXt1NQFocFK2sA2t0NYZAMyMShAGL6x5OPIbrmXPIqaN9rw==}
+    dev: true
+
+  /markdown-it-emoji@2.0.2:
+    resolution: {integrity: sha512-zLftSaNrKuYl0kR5zm4gxXjHaOI3FAOEaloKmRA5hijmJZvSjmxcokOLlzycb/HXlUFWzXqpIEoyEMCE4i9MvQ==}
+    dev: true
+
+  /markdown-it@13.0.2:
+    resolution: {integrity: sha512-FtwnEuuK+2yVU7goGn/MJ0WBZMM9ZPgU9spqlFs7/A/pDIUNSOQZhUgOqYCficIuR2QaFnrt8LHqBWsbTAoI5w==}
+    hasBin: true
+    dependencies:
+      argparse: 2.0.1
+      entities: 3.0.1
+      linkify-it: 4.0.1
+      mdurl: 1.0.1
+      uc.micro: 1.0.6
+    dev: true
+
+  /mdurl@1.0.1:
+    resolution: {integrity: sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==}
+    dev: true
+
+  /medium-zoom@1.1.0:
+    resolution: {integrity: sha512-ewyDsp7k4InCUp3jRmwHBRFGyjBimKps/AJLjRSox+2q/2H4p/PNpQf+pwONWlJiOudkBXtbdmVbFjqyybfTmQ==}
+    dev: true
+
+  /merge-stream@2.0.0:
+    resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==}
+    dev: true
+
+  /merge2@1.4.1:
+    resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==}
+    engines: {node: '>= 8'}
+    dev: true
+
+  /micromatch@4.0.7:
+    resolution: {integrity: sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==}
+    engines: {node: '>=8.6'}
+    dependencies:
+      braces: 3.0.3
+      picomatch: 2.3.1
+    dev: true
+
+  /mimic-fn@2.1.0:
+    resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /mimic-fn@4.0.0:
+    resolution: {integrity: sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /minimatch@3.1.2:
+    resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==}
+    dependencies:
+      brace-expansion: 1.1.11
+    dev: true
+
+  /ms@2.1.2:
+    resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==}
+    dev: true
+
+  /nanoid@3.3.7:
+    resolution: {integrity: sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==}
+    engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
+    hasBin: true
+    dev: true
+
+  /node-releases@2.0.14:
+    resolution: {integrity: sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==}
+    dev: true
+
+  /normalize-path@3.0.0:
+    resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /normalize-range@0.1.2:
+    resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /npm-run-path@5.3.0:
+    resolution: {integrity: sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    dependencies:
+      path-key: 4.0.0
+    dev: true
+
+  /nth-check@2.1.1:
+    resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==}
+    dependencies:
+      boolbase: 1.0.0
+    dev: true
+
+  /object-assign@4.1.1:
+    resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /once@1.4.0:
+    resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
+    dependencies:
+      wrappy: 1.0.2
+    dev: true
+
+  /onetime@5.1.2:
+    resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==}
+    engines: {node: '>=6'}
+    dependencies:
+      mimic-fn: 2.1.0
+    dev: true
+
+  /onetime@6.0.0:
+    resolution: {integrity: sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==}
+    engines: {node: '>=12'}
+    dependencies:
+      mimic-fn: 4.0.0
+    dev: true
+
+  /ora@7.0.1:
+    resolution: {integrity: sha512-0TUxTiFJWv+JnjWm4o9yvuskpEJLXTcng8MJuKd+SzAzp2o+OP3HWqNhB4OdJRt1Vsd9/mR0oyaEYlOnL7XIRw==}
+    engines: {node: '>=16'}
+    dependencies:
+      chalk: 5.3.0
+      cli-cursor: 4.0.0
+      cli-spinners: 2.9.2
+      is-interactive: 2.0.0
+      is-unicode-supported: 1.3.0
+      log-symbols: 5.1.0
+      stdin-discarder: 0.1.0
+      string-width: 6.1.0
+      strip-ansi: 7.1.0
+    dev: true
+
+  /p-limit@2.3.0:
+    resolution: {integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==}
+    engines: {node: '>=6'}
+    dependencies:
+      p-try: 2.2.0
+    dev: true
+
+  /p-locate@4.1.0:
+    resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==}
+    engines: {node: '>=8'}
+    dependencies:
+      p-limit: 2.3.0
+    dev: true
+
+  /p-try@2.2.0:
+    resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /parse5-htmlparser2-tree-adapter@7.0.0:
+    resolution: {integrity: sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==}
+    dependencies:
+      domhandler: 5.0.3
+      parse5: 7.1.2
+    dev: true
+
+  /parse5@7.1.2:
+    resolution: {integrity: sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==}
+    dependencies:
+      entities: 4.5.0
+    dev: true
+
+  /path-exists@4.0.0:
+    resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /path-is-absolute@1.0.1:
+    resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /path-key@3.1.1:
+    resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /path-key@4.0.0:
+    resolution: {integrity: sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /path-type@5.0.0:
+    resolution: {integrity: sha512-5HviZNaZcfqP95rwpv+1HDgUamezbqdSYTyzjTvwtJSnIH+3vnbmWsItli8OFEndS984VT55M3jduxZbX351gg==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /photoswipe@5.4.4:
+    resolution: {integrity: sha512-WNFHoKrkZNnvFFhbHL93WDkW3ifwVOXSW3w1UuZZelSmgXpIGiZSNlZJq37rR8YejqME2rHs9EhH9ZvlvFH2NA==}
+    engines: {node: '>= 0.12.0'}
+    dev: true
+
+  /picocolors@1.0.1:
+    resolution: {integrity: sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==}
+    dev: true
+
+  /picomatch@2.3.1:
+    resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==}
+    engines: {node: '>=8.6'}
+    dev: true
+
+  /pify@2.3.0:
+    resolution: {integrity: sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /pinkie-promise@2.0.1:
+    resolution: {integrity: sha512-0Gni6D4UcLTbv9c57DfxDGdr41XfgUjqWZu492f0cIGr16zDU06BWP/RAEvOuo7CQ0CNjHaLlM59YJJFm3NWlw==}
+    engines: {node: '>=0.10.0'}
+    dependencies:
+      pinkie: 2.0.4
+    dev: true
+
+  /pinkie@2.0.4:
+    resolution: {integrity: sha512-MnUuEycAemtSaeFSjXKW/aroV7akBbY+Sv+RkyqFjgAe73F+MR0TBWKBRDkmfWq/HiFmdavfZ1G7h4SPZXaCSg==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /pkg-dir@4.2.0:
+    resolution: {integrity: sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==}
+    engines: {node: '>=8'}
+    dependencies:
+      find-up: 4.1.0
+    dev: true
+
+  /pngjs@5.0.0:
+    resolution: {integrity: sha512-40QW5YalBNfQo5yRYmiw7Yz6TKKVr3h6970B2YE+3fQpsWcrbj1PzJgxeJ19DRQjhMbKPIuMY8rFaXc8moolVw==}
+    engines: {node: '>=10.13.0'}
+    dev: true
+
+  /postcss-load-config@4.0.2(postcss@8.4.38):
+    resolution: {integrity: sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==}
+    engines: {node: '>= 14'}
+    peerDependencies:
+      postcss: '>=8.0.9'
+      ts-node: '>=9.0.0'
+    peerDependenciesMeta:
+      postcss:
+        optional: true
+      ts-node:
+        optional: true
+    dependencies:
+      lilconfig: 3.1.2
+      postcss: 8.4.38
+      yaml: 2.4.5
+    dev: true
+
+  /postcss-value-parser@4.2.0:
+    resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==}
+    dev: true
+
+  /postcss@8.4.38:
+    resolution: {integrity: sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==}
+    engines: {node: ^10 || ^12 || >=14}
+    dependencies:
+      nanoid: 3.3.7
+      picocolors: 1.0.1
+      source-map-js: 1.2.0
+    dev: true
+
+  /preact@10.22.0:
+    resolution: {integrity: sha512-RRurnSjJPj4rp5K6XoP45Ui33ncb7e4H7WiOHVpjbkvqvA3U+N8Z6Qbo0AE6leGYBV66n8EhEaFixvIu3SkxFw==}
+    dev: true
+
+  /prismjs@1.29.0:
+    resolution: {integrity: sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /qrcode@1.5.3:
+    resolution: {integrity: sha512-puyri6ApkEHYiVl4CFzo1tDkAZ+ATcnbJrJ6RiBM1Fhctdn/ix9MTE3hRph33omisEbC/2fcfemsseiKgBPKZg==}
+    engines: {node: '>=10.13.0'}
+    hasBin: true
+    dependencies:
+      dijkstrajs: 1.0.3
+      encode-utf8: 1.0.3
+      pngjs: 5.0.0
+      yargs: 15.4.1
+    dev: true
+
+  /queue-microtask@1.2.3:
+    resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
+    dev: true
+
+  /readable-stream@3.6.2:
+    resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==}
+    engines: {node: '>= 6'}
+    dependencies:
+      inherits: 2.0.4
+      string_decoder: 1.3.0
+      util-deprecate: 1.0.2
+    dev: true
+
+  /readdirp@3.6.0:
+    resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==}
+    engines: {node: '>=8.10.0'}
+    dependencies:
+      picomatch: 2.3.1
+    dev: true
+
+  /require-directory@2.1.1:
+    resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /require-main-filename@2.0.0:
+    resolution: {integrity: sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==}
+    dev: true
+
+  /restore-cursor@4.0.0:
+    resolution: {integrity: sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    dependencies:
+      onetime: 5.1.2
+      signal-exit: 3.0.7
+    dev: true
+
+  /reusify@1.0.4:
+    resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==}
+    engines: {iojs: '>=1.0.0', node: '>=0.10.0'}
+    dev: true
+
+  /rollup@4.18.0:
+    resolution: {integrity: sha512-QmJz14PX3rzbJCN1SG4Xe/bAAX2a6NpCP8ab2vfu2GiUr8AQcr2nCV/oEO3yneFarB67zk8ShlIyWb2LGTb3Sg==}
+    engines: {node: '>=18.0.0', npm: '>=8.0.0'}
+    hasBin: true
+    dependencies:
+      '@types/estree': 1.0.5
+    optionalDependencies:
+      '@rollup/rollup-android-arm-eabi': 4.18.0
+      '@rollup/rollup-android-arm64': 4.18.0
+      '@rollup/rollup-darwin-arm64': 4.18.0
+      '@rollup/rollup-darwin-x64': 4.18.0
+      '@rollup/rollup-linux-arm-gnueabihf': 4.18.0
+      '@rollup/rollup-linux-arm-musleabihf': 4.18.0
+      '@rollup/rollup-linux-arm64-gnu': 4.18.0
+      '@rollup/rollup-linux-arm64-musl': 4.18.0
+      '@rollup/rollup-linux-powerpc64le-gnu': 4.18.0
+      '@rollup/rollup-linux-riscv64-gnu': 4.18.0
+      '@rollup/rollup-linux-s390x-gnu': 4.18.0
+      '@rollup/rollup-linux-x64-gnu': 4.18.0
+      '@rollup/rollup-linux-x64-musl': 4.18.0
+      '@rollup/rollup-win32-arm64-msvc': 4.18.0
+      '@rollup/rollup-win32-ia32-msvc': 4.18.0
+      '@rollup/rollup-win32-x64-msvc': 4.18.0
+      fsevents: 2.3.3
+    dev: true
+
+  /run-parallel@1.2.0:
+    resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==}
+    dependencies:
+      queue-microtask: 1.2.3
+    dev: true
+
+  /safe-buffer@5.2.1:
+    resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==}
+    dev: true
+
+  /sass@1.77.6:
+    resolution: {integrity: sha512-ByXE1oLD79GVq9Ht1PeHWCPMPB8XHpBuz1r85oByKHjZY6qV6rWnQovQzXJXuQ/XyE1Oj3iPk3lo28uzaRA2/Q==}
+    engines: {node: '>=14.0.0'}
+    hasBin: true
+    dependencies:
+      chokidar: 3.6.0
+      immutable: 4.3.6
+      source-map-js: 1.2.0
+    dev: true
+
+  /sax@1.4.1:
+    resolution: {integrity: sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==}
+    dev: true
+
+  /search-insights@2.14.0:
+    resolution: {integrity: sha512-OLN6MsPMCghDOqlCtsIsYgtsC0pnwVTyT9Mu6A3ewOj1DxvzZF6COrn2g86E/c05xbktB0XN04m/t1Z+n+fTGw==}
+    dev: true
+
+  /section-matter@1.0.0:
+    resolution: {integrity: sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==}
+    engines: {node: '>=4'}
+    dependencies:
+      extend-shallow: 2.0.1
+      kind-of: 6.0.3
+    dev: true
+
+  /semver@6.3.1:
+    resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==}
+    hasBin: true
+    dev: true
+
+  /semver@7.6.2:
+    resolution: {integrity: sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==}
+    engines: {node: '>=10'}
+    hasBin: true
+    dev: true
+
+  /set-blocking@2.0.0:
+    resolution: {integrity: sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==}
+    dev: true
+
+  /shebang-command@2.0.0:
+    resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==}
+    engines: {node: '>=8'}
+    dependencies:
+      shebang-regex: 3.0.0
+    dev: true
+
+  /shebang-regex@3.0.0:
+    resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /signal-exit@3.0.7:
+    resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==}
+    dev: true
+
+  /signal-exit@4.1.0:
+    resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==}
+    engines: {node: '>=14'}
+    dev: true
+
+  /sitemap@7.1.2:
+    resolution: {integrity: sha512-ARCqzHJ0p4gWt+j7NlU5eDlIO9+Rkr/JhPFZKKQ1l5GCus7rJH4UdrlVAh0xC/gDS/Qir2UMxqYNHtsKr2rpCw==}
+    engines: {node: '>=12.0.0', npm: '>=5.6.0'}
+    hasBin: true
+    dependencies:
+      '@types/node': 17.0.45
+      '@types/sax': 1.2.7
+      arg: 5.0.2
+      sax: 1.4.1
+    dev: true
+
+  /slash@5.1.0:
+    resolution: {integrity: sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==}
+    engines: {node: '>=14.16'}
+    dev: true
+
+  /source-map-js@1.2.0:
+    resolution: {integrity: sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /sprintf-js@1.0.3:
+    resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==}
+    dev: true
+
+  /stdin-discarder@0.1.0:
+    resolution: {integrity: sha512-xhV7w8S+bUwlPTb4bAOUQhv8/cSS5offJuX8GQGq32ONF0ZtDWKfkdomM3HMRA+LhX6um/FZ0COqlwsjD53LeQ==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    dependencies:
+      bl: 5.1.0
+    dev: true
+
+  /string-width@4.2.3:
+    resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==}
+    engines: {node: '>=8'}
+    dependencies:
+      emoji-regex: 8.0.0
+      is-fullwidth-code-point: 3.0.0
+      strip-ansi: 6.0.1
+    dev: true
+
+  /string-width@6.1.0:
+    resolution: {integrity: sha512-k01swCJAgQmuADB0YIc+7TuatfNvTBVOoaUWJjTB9R4VJzR5vNWzf5t42ESVZFPS8xTySF7CAdV4t/aaIm3UnQ==}
+    engines: {node: '>=16'}
+    dependencies:
+      eastasianwidth: 0.2.0
+      emoji-regex: 10.3.0
+      strip-ansi: 7.1.0
+    dev: true
+
+  /string_decoder@1.3.0:
+    resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==}
+    dependencies:
+      safe-buffer: 5.2.1
+    dev: true
+
+  /strip-ansi@6.0.1:
+    resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==}
+    engines: {node: '>=8'}
+    dependencies:
+      ansi-regex: 5.0.1
+    dev: true
+
+  /strip-ansi@7.1.0:
+    resolution: {integrity: sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==}
+    engines: {node: '>=12'}
+    dependencies:
+      ansi-regex: 6.0.1
+    dev: true
+
+  /strip-bom-string@1.0.0:
+    resolution: {integrity: sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /strip-final-newline@3.0.0:
+    resolution: {integrity: sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /strip-outer@1.0.1:
+    resolution: {integrity: sha512-k55yxKHwaXnpYGsOzg4Vl8+tDrWylxDEpknGjhTiZB8dFRU5rTo9CAzeycivxV3s+zlTKwrs6WxMxR95n26kwg==}
+    engines: {node: '>=0.10.0'}
+    dependencies:
+      escape-string-regexp: 1.0.5
+    dev: true
+
+  /striptags@3.2.0:
+    resolution: {integrity: sha512-g45ZOGzHDMe2bdYMdIvdAfCQkCTDMGBazSw1ypMowwGIee7ZQ5dU0rBJ8Jqgl+jAKIv4dbeE1jscZq9wid1Tkw==}
+    dev: true
+
+  /to-fast-properties@2.0.0:
+    resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==}
+    engines: {node: '>=4'}
+    dev: true
+
+  /to-regex-range@5.0.1:
+    resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
+    engines: {node: '>=8.0'}
+    dependencies:
+      is-number: 7.0.0
+    dev: true
+
+  /trim-repeated@1.0.0:
+    resolution: {integrity: sha512-pkonvlKk8/ZuR0D5tLW8ljt5I8kmxp2XKymhepUeOdCEfKpZaktSArkLHZt76OB1ZvO9bssUsDty4SWhLvZpLg==}
+    engines: {node: '>=0.10.0'}
+    dependencies:
+      escape-string-regexp: 1.0.5
+    dev: true
+
+  /ts-debounce@4.0.0:
+    resolution: {integrity: sha512-+1iDGY6NmOGidq7i7xZGA4cm8DAa6fqdYcvO5Z6yBevH++Bdo9Qt/mN0TzHUgcCcKv1gmh9+W5dHqz8pMWbCbg==}
+    dev: true
+
+  /uc.micro@1.0.6:
+    resolution: {integrity: sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==}
+    dev: true
+
+  /undici-types@5.26.5:
+    resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
+    dev: true
+
+  /unicorn-magic@0.1.0:
+    resolution: {integrity: sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==}
+    engines: {node: '>=18'}
+    dev: true
+
+  /universalify@2.0.1:
+    resolution: {integrity: sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==}
+    engines: {node: '>= 10.0.0'}
+    dev: true
+
+  /upath@2.0.1:
+    resolution: {integrity: sha512-1uEe95xksV1O0CYKXo8vQvN1JEbtJp7lb7C5U9HMsIp6IVwntkH/oNUzyVNQSd4S1sYk2FpSSW44FqMc8qee5w==}
+    engines: {node: '>=4'}
+    dev: true
+
+  /update-browserslist-db@1.0.16(browserslist@4.23.1):
+    resolution: {integrity: sha512-KVbTxlBYlckhF5wgfyZXTWnMn7MMZjMu9XG8bPlliUOP9ThaF4QnhP8qrjrH7DRzHfSk0oQv1wToW+iA5GajEQ==}
+    hasBin: true
+    peerDependencies:
+      browserslist: '>= 4.21.0'
+    dependencies:
+      browserslist: 4.23.1
+      escalade: 3.1.2
+      picocolors: 1.0.1
+    dev: true
+
+  /util-deprecate@1.0.2:
+    resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
+    dev: true
+
+  /vite@5.0.13:
+    resolution: {integrity: sha512-/9ovhv2M2dGTuA+dY93B9trfyWMDRQw2jdVBhHNP6wr0oF34wG2i/N55801iZIpgUpnHDm4F/FabGQLyc+eOgg==}
+    engines: {node: ^18.0.0 || >=20.0.0}
+    hasBin: true
+    peerDependencies:
+      '@types/node': ^18.0.0 || >=20.0.0
+      less: '*'
+      lightningcss: ^1.21.0
+      sass: '*'
+      stylus: '*'
+      sugarss: '*'
+      terser: ^5.4.0
+    peerDependenciesMeta:
+      '@types/node':
+        optional: true
+      less:
+        optional: true
+      lightningcss:
+        optional: true
+      sass:
+        optional: true
+      stylus:
+        optional: true
+      sugarss:
+        optional: true
+      terser:
+        optional: true
+    dependencies:
+      esbuild: 0.19.12
+      postcss: 8.4.38
+      rollup: 4.18.0
+    optionalDependencies:
+      fsevents: 2.3.3
+    dev: true
+
+  /vue-demi@0.14.8(vue@3.4.30):
+    resolution: {integrity: sha512-Uuqnk9YE9SsWeReYqK2alDI5YzciATE0r2SkA6iMAtuXvNTMNACJLJEXNXaEy94ECuBe4Sk6RzRU80kjdbIo1Q==}
+    engines: {node: '>=12'}
+    hasBin: true
+    requiresBuild: true
+    peerDependencies:
+      '@vue/composition-api': ^1.0.0-rc.1
+      vue: ^3.0.0-0 || ^2.6.0
+    peerDependenciesMeta:
+      '@vue/composition-api':
+        optional: true
+    dependencies:
+      vue: 3.4.30
+    dev: true
+
+  /vue-router@4.4.0(vue@3.4.30):
+    resolution: {integrity: sha512-HB+t2p611aIZraV2aPSRNXf0Z/oLZFrlygJm+sZbdJaW6lcFqEDQwnzUBXn+DApw+/QzDU/I9TeWx9izEjTmsA==}
+    peerDependencies:
+      vue: ^3.2.0
+    dependencies:
+      '@vue/devtools-api': 6.6.3
+      vue: 3.4.30
+    dev: true
+
+  /vue@3.4.30:
+    resolution: {integrity: sha512-NcxtKCwkdf1zPsr7Y8+QlDBCGqxvjLXF2EX+yi76rV5rrz90Y6gK1cq0olIhdWGgrlhs9ElHuhi9t3+W5sG5Xw==}
+    peerDependencies:
+      typescript: '*'
+    peerDependenciesMeta:
+      typescript:
+        optional: true
+    dependencies:
+      '@vue/compiler-dom': 3.4.30
+      '@vue/compiler-sfc': 3.4.30
+      '@vue/runtime-dom': 3.4.30
+      '@vue/server-renderer': 3.4.30(vue@3.4.30)
+      '@vue/shared': 3.4.30
+    dev: true
+
+  /vuepress-plugin-auto-catalog@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-QA0TN42VSmhrfrl2LKG4ioMBpcQnhB1M799oj6pXQ0MbRdc8Z7rQCVY4DSCGG4I3E7PYUX5sg4bYf1om9BXBsQ==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    deprecated: Please use @vuepress/plugin-catalog@next intead
+    peerDependencies:
+      sass-loader: ^13.3.0
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      sass-loader:
+        optional: true
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-plugin-components: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-sass-palette: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - artplayer
+      - dashjs-pure
+      - hls.js
+      - mpegts.js
+      - plyr
+      - supports-color
+      - typescript
+      - vidstack
+    dev: true
+
+  /vuepress-plugin-blog2@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-EDzsWMB4U7d6en6Zup/ztUoTrdCesdLdjEpRgXYONRZqlpsyPjzzbO6LW0BCNkBcS4ydyjKUXD+MhH1vgp6q1A==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    deprecated: Use @vuepress/plugin-blog@next instead
+    peerDependencies:
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      chokidar: 3.6.0
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-plugin-comment2@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-Wcj2E/Oi0EWyJI/tR/GJNnvEbKkwvzWNTR+IspmHiH4fc9TPaIwKfKMUAMRUewxnGDaXU5H3+lrHTgkbZsU17g==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    deprecated: Use @vuepress/plugin-comment@next instead
+    peerDependencies:
+      '@waline/client': ^2.15.8 || ^3.0.0-alpha.8
+      artalk: ^2.7.3
+      sass-loader: ^13.3.0
+      twikoo: ^1.5.0
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      '@waline/client':
+        optional: true
+      artalk:
+        optional: true
+      sass-loader:
+        optional: true
+      twikoo:
+        optional: true
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      giscus: 1.5.0
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-plugin-sass-palette: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-plugin-components@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-5h3vIFrl2qBYkbCToH/qni2YIOlIeL1RGzuvA7vDJLfohCIIvi/u/V55HLGc67EbVnlD21g/naVblhgE3r2qkg==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    peerDependencies:
+      artplayer: ^5.0.0
+      dashjs-pure: ^1.0.0
+      hls.js: ^1.4.12
+      mpegts.js: ^1.7.3
+      plyr: ^3.7.8
+      sass-loader: ^13.3.0
+      vidstack: ^1.9.0
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      artplayer:
+        optional: true
+      dashjs-pure:
+        optional: true
+      hls.js:
+        optional: true
+      mpegts.js:
+        optional: true
+      plyr:
+        optional: true
+      sass-loader:
+        optional: true
+      vidstack:
+        optional: true
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@stackblitz/sdk': 1.10.0
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      '@vueuse/core': 10.11.0(vue@3.4.30)
+      balloon-css: 1.2.0
+      create-codepen: 1.0.1
+      qrcode: 1.5.3
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-plugin-reading-time2: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-sass-palette: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-plugin-copy-code2@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-69wX9qjJHGbTSnT5badtr6Z7Ao+/qRNNakTfUh8VIpTcFLtYfC7WhykvC9A8uy8Gti/lh+6Y5SK1EmZiThOqOA==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    deprecated: Please use @vuepress/plugin-copy-code@v2 instead
+    peerDependencies:
+      sass-loader: ^13.3.0
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      sass-loader:
+        optional: true
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      '@vueuse/core': 10.11.0(vue@3.4.30)
+      balloon-css: 1.2.0
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-plugin-sass-palette: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-plugin-copyright2@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-Squ5YasdSwJy6WejXGbQzWSNgO7jWDgwNDst2fpB1xGI7f7HX9ytK9ZIrrIXkaZCeNz0JCb0xIN8GKEr1qhppQ==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    deprecated: Please use @vuepress/plugin-copyright@v2 instead
+    peerDependencies:
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      '@vueuse/core': 10.11.0(vue@3.4.30)
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-plugin-md-enhance@2.0.0-rc.11(markdown-it@13.0.2)(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-zkQRkuo2oVGaSqW5srKnLSIU1IW/OKMTmjozsqRhJ/pmLJRvoTpqa25Y4DdQYsWYsRXj09qsthobWzOsRSpgjw==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    peerDependencies:
+      '@types/reveal.js': ^4.4.5
+      '@vue/repl': ^3.0.0
+      chart.js: ^4.0.0
+      echarts: ^5.0.0
+      flowchart.ts: ^2.0.0 || ^3.0.0
+      katex: ^0.16.0
+      kotlin-playground: ^1.23.0
+      markmap-lib: ^0.15.5
+      markmap-toolbar: ^0.15.5
+      markmap-view: ^0.15.5
+      mathjax-full: ^3.2.2
+      mermaid: ^10.6.0
+      reveal.js: ^5.0.0
+      sandpack-vue3: ^3.0.0
+      sass-loader: ^13.3.0
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      '@types/reveal.js':
+        optional: true
+      '@vue/repl':
+        optional: true
+      chart.js:
+        optional: true
+      echarts:
+        optional: true
+      flowchart.ts:
+        optional: true
+      katex:
+        optional: true
+      kotlin-playground:
+        optional: true
+      markmap-lib:
+        optional: true
+      markmap-toolbar:
+        optional: true
+      markmap-view:
+        optional: true
+      mathjax-full:
+        optional: true
+      mermaid:
+        optional: true
+      reveal.js:
+        optional: true
+      sandpack-vue3:
+        optional: true
+      sass-loader:
+        optional: true
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@mdit/plugin-alert': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-align': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-attrs': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-container': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-demo': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-figure': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-footnote': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-img-lazyload': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-img-mark': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-img-size': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-include': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-katex': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-mark': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-mathjax': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-stylize': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-sub': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-sup': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-tab': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-tasklist': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-tex': 0.7.6(markdown-it@13.0.2)
+      '@mdit/plugin-uml': 0.7.6(markdown-it@13.0.2)
+      '@types/markdown-it': 13.0.8
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      '@vueuse/core': 10.11.0(vue@3.4.30)
+      balloon-css: 1.2.0
+      js-yaml: 4.1.0
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-plugin-sass-palette: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - markdown-it
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-plugin-photo-swipe@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-uSJkuoJTJJjf5PyShm2C7uW6HzCNH3GEcsK0X70pzSytL/lpDF0wUAJqUbZGgt69FPX0mBfl1dOeaMxAduYtJw==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    deprecated: Use @vuepress/plugin-photo-swipe instead
+    peerDependencies:
+      sass-loader: ^13.3.0
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      sass-loader:
+        optional: true
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      '@vueuse/core': 10.11.0(vue@3.4.30)
+      photoswipe: 5.4.4
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-plugin-sass-palette: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-plugin-reading-time2@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-u0xHTa/s2755mdihqXRdIT+lixWAyxv81LCgPYgiyX432wEVwphxu4AD5r7kV4RUZH37EMehnQz4uXXHf21l8w==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    deprecated: Please use @vuepress/plugin-reading-time@v2 instead
+    peerDependencies:
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      vue: 3.4.30
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-plugin-rtl@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-7cD7+vwRV4qIXxrKx66VWW+2AmXz2jGnJEk6XVWcdjiWoTYR9OMUk/oDsolQJQXEIu949kzxSjl/FnyKuVMSmw==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    deprecated: Use @vuepress/plugin-rtl@next instead
+    peerDependencies:
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      vue: 3.4.30
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-plugin-sass-palette@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-9uHknDmyGg6BN90kZkQcO96uU1Sfq7e27otQ99zozq32wL2nx84MuVc1d6q95lgpyySwiMZ/l4adSsSIgWAxpQ==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    peerDependencies:
+      sass-loader: ^13.3.0
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      sass-loader:
+        optional: true
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      chokidar: 3.6.0
+      sass: 1.77.6
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-plugin-seo2@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-Cf5wabvruMYzytwrj+UcXYjsyq/HliZUJ4BwkoyYiz4Jt48pLEQOi1iqxwLnUR0653EPqA9yrmt1uX4kSuBdrQ==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    deprecated: Please use @vuepress/plugin-seo@v2 instead
+    peerDependencies:
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-plugin-sitemap2@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-JxDXr13xeq2sXtUYJ/bsl9G2w3KUmuAd4cFCWdfkVCZL5Yt6FbOUIvqy1qp3DrN35OFr97ir22BtOJDwAyrGcA==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    deprecated: Please use @vuepress/plugin-sitemap@v2 instead
+    peerDependencies:
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      sitemap: 7.1.2
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-shared@2.0.0-rc.11(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-ueyr8gJ40VwDmUILKlSwYwspMHCUdPVE4q5yh7Dm5khrHnARfOmKasV7/FpDQtNL6SPAatG+sGBO/ib4Pb7Htw==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    peerDependencies:
+      vuepress: 2.0.0-rc.0
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      vuepress:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      '@vueuse/core': 10.11.0(vue@3.4.30)
+      cheerio: 1.0.0-rc.12
+      dayjs: 1.11.11
+      execa: 8.0.1
+      fflate: 0.8.2
+      gray-matter: 4.0.3
+      semver: 7.6.2
+      striptags: 3.2.0
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+    transitivePeerDependencies:
+      - '@vue/composition-api'
+      - supports-color
+      - typescript
+    dev: true
+
+  /vuepress-theme-hope@2.0.0-rc.11(@vuepress/plugin-docsearch@2.0.0-rc.0)(markdown-it@13.0.2)(vuepress@2.0.0-rc.0):
+    resolution: {integrity: sha512-BQCobcJ5wM8BDjd53Y24acxrk0Z0nkbNIdYrf1GDsDgMCEB3pD20zwHKOA8NtovZkya7AA6DHhK3XY6bvd2Rlg==}
+    engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'}
+    peerDependencies:
+      '@vuepress/plugin-docsearch': 2.0.0-rc.0
+      '@vuepress/plugin-search': 2.0.0-rc.0
+      nodejs-jieba: ^0.1.2
+      sass-loader: ^13.3.0
+      vuepress: 2.0.0-rc.0
+      vuepress-plugin-feed2: 2.0.0-rc.11
+      vuepress-plugin-pwa2: 2.0.0-rc.11
+      vuepress-plugin-redirect: 2.0.0-rc.11
+      vuepress-plugin-search-pro: 2.0.0-rc.11
+      vuepress-vite: 2.0.0-rc.0
+      vuepress-webpack: 2.0.0-rc.0
+    peerDependenciesMeta:
+      '@vuepress/plugin-docsearch':
+        optional: true
+      '@vuepress/plugin-search':
+        optional: true
+      nodejs-jieba:
+        optional: true
+      sass-loader:
+        optional: true
+      vuepress:
+        optional: true
+      vuepress-plugin-feed2:
+        optional: true
+      vuepress-plugin-pwa2:
+        optional: true
+      vuepress-plugin-redirect:
+        optional: true
+      vuepress-plugin-search-pro:
+        optional: true
+      vuepress-vite:
+        optional: true
+      vuepress-webpack:
+        optional: true
+    dependencies:
+      '@vuepress/cli': 2.0.0-rc.0
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/plugin-active-header-links': 2.0.0-rc.0
+      '@vuepress/plugin-docsearch': 2.0.0-rc.0(@algolia/client-search@4.23.3)(search-insights@2.14.0)
+      '@vuepress/plugin-external-link-icon': 2.0.0-rc.0
+      '@vuepress/plugin-git': 2.0.0-rc.0
+      '@vuepress/plugin-nprogress': 2.0.0-rc.0
+      '@vuepress/plugin-prismjs': 2.0.0-rc.0
+      '@vuepress/plugin-theme-data': 2.0.0-rc.0
+      '@vuepress/shared': 2.0.0-rc.0
+      '@vuepress/utils': 2.0.0-rc.0
+      '@vueuse/core': 10.11.0(vue@3.4.30)
+      balloon-css: 1.2.0
+      bcrypt-ts: 5.0.2
+      cheerio: 1.0.0-rc.12
+      chokidar: 3.6.0
+      gray-matter: 4.0.3
+      vue: 3.4.30
+      vue-router: 4.4.0(vue@3.4.30)
+      vuepress: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+      vuepress-plugin-auto-catalog: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-blog2: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-comment2: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-components: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-copy-code2: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-copyright2: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-md-enhance: 2.0.0-rc.11(markdown-it@13.0.2)(vuepress@2.0.0-rc.0)
+      vuepress-plugin-photo-swipe: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-reading-time2: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-rtl: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-sass-palette: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-seo2: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-plugin-sitemap2: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+      vuepress-shared: 2.0.0-rc.11(vuepress@2.0.0-rc.0)
+    transitivePeerDependencies:
+      - '@types/reveal.js'
+      - '@vue/composition-api'
+      - '@vue/repl'
+      - '@waline/client'
+      - artalk
+      - artplayer
+      - chart.js
+      - dashjs-pure
+      - echarts
+      - flowchart.ts
+      - hls.js
+      - katex
+      - kotlin-playground
+      - markdown-it
+      - markmap-lib
+      - markmap-toolbar
+      - markmap-view
+      - mathjax-full
+      - mermaid
+      - mpegts.js
+      - plyr
+      - reveal.js
+      - sandpack-vue3
+      - supports-color
+      - twikoo
+      - typescript
+      - vidstack
+    dev: true
+
+  /vuepress-vite@2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30):
+    resolution: {integrity: sha512-+2XBejeiskPyr2raBeA2o4uDFDsjtadpUVmtio3qqFtQpOhidz/ORuiTLr2UfLtFn1ASIHP6Vy2YjQ0e/TeUVw==}
+    engines: {node: '>=18.16.0'}
+    hasBin: true
+    peerDependencies:
+      '@vuepress/client': 2.0.0-rc.0
+      vue: ^3.3.4
+    dependencies:
+      '@vuepress/bundler-vite': 2.0.0-rc.0
+      '@vuepress/cli': 2.0.0-rc.0
+      '@vuepress/client': 2.0.0-rc.0
+      '@vuepress/core': 2.0.0-rc.0
+      '@vuepress/theme-default': 2.0.0-rc.0
+      vue: 3.4.30
+    transitivePeerDependencies:
+      - '@types/node'
+      - '@vue/composition-api'
+      - less
+      - lightningcss
+      - sass
+      - sass-loader
+      - stylus
+      - sugarss
+      - supports-color
+      - terser
+      - ts-node
+      - typescript
+    dev: true
+
+  /vuepress@2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30):
+    resolution: {integrity: sha512-sydt/B7+pIw926G5PntYmptLkC5o2buXKh+WR1+P2KnsvkXU+UGnQrJJ0FBvu/4RNuY99tkUZd59nyPhEmRrCg==}
+    engines: {node: '>=18.16.0'}
+    hasBin: true
+    dependencies:
+      vuepress-vite: 2.0.0-rc.0(@vuepress/client@2.0.0-rc.0)(vue@3.4.30)
+    transitivePeerDependencies:
+      - '@types/node'
+      - '@vue/composition-api'
+      - '@vuepress/client'
+      - less
+      - lightningcss
+      - sass
+      - sass-loader
+      - stylus
+      - sugarss
+      - supports-color
+      - terser
+      - ts-node
+      - typescript
+      - vue
+    dev: true
+
+  /which-module@2.0.1:
+    resolution: {integrity: sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ==}
+    dev: true
+
+  /which@2.0.2:
+    resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==}
+    engines: {node: '>= 8'}
+    hasBin: true
+    dependencies:
+      isexe: 2.0.0
+    dev: true
+
+  /wrap-ansi@6.2.0:
+    resolution: {integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==}
+    engines: {node: '>=8'}
+    dependencies:
+      ansi-styles: 4.3.0
+      string-width: 4.2.3
+      strip-ansi: 6.0.1
+    dev: true
+
+  /wrappy@1.0.2:
+    resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
+    dev: true
+
+  /y18n@4.0.3:
+    resolution: {integrity: sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ==}
+    dev: true
+
+  /yaml@2.4.5:
+    resolution: {integrity: sha512-aBx2bnqDzVOyNKfsysjA2ms5ZlnjSAW2eG3/L5G/CSujfjLJTJsEw1bGw8kCf04KodQWk1pxlGnZ56CRxiawmg==}
+    engines: {node: '>= 14'}
+    hasBin: true
+    dev: true
+
+  /yargs-parser@18.1.3:
+    resolution: {integrity: sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==}
+    engines: {node: '>=6'}
+    dependencies:
+      camelcase: 5.3.1
+      decamelize: 1.2.0
+    dev: true
+
+  /yargs@15.4.1:
+    resolution: {integrity: sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==}
+    engines: {node: '>=8'}
+    dependencies:
+      cliui: 6.0.0
+      decamelize: 1.2.0
+      find-up: 4.1.0
+      get-caller-file: 2.0.5
+      require-directory: 2.1.1
+      require-main-filename: 2.0.0
+      set-blocking: 2.0.0
+      string-width: 4.2.3
+      which-module: 2.0.1
+      y18n: 4.0.3
+      yargs-parser: 18.1.3
+    dev: true
diff --git a/java/common/pom.xml b/java/common/pom.xml
index bcd54f5..48b61a9 100644
--- a/java/common/pom.xml
+++ b/java/common/pom.xml
@@ -24,7 +24,7 @@
     <parent>
         <groupId>org.apache.tsfile</groupId>
         <artifactId>tsfile-java</artifactId>
-        <version>2.1.0-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <artifactId>common</artifactId>
     <name>TsFile: Java: Common</name>
diff --git a/java/common/src/main/java/org/apache/tsfile/enums/TSDataType.java b/java/common/src/main/java/org/apache/tsfile/enums/TSDataType.java
index cf17aa0..492d1ba 100644
--- a/java/common/src/main/java/org/apache/tsfile/enums/TSDataType.java
+++ b/java/common/src/main/java/org/apache/tsfile/enums/TSDataType.java
@@ -19,6 +19,7 @@
 
 package org.apache.tsfile.enums;
 
+import org.apache.tsfile.utils.Binary;
 import org.apache.tsfile.write.UnSupportedDataTypeException;
 
 import java.io.DataOutputStream;
@@ -26,6 +27,7 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.EnumMap;
@@ -99,6 +101,14 @@
 
     Set<TSDataType> textCompatibleTypes = new HashSet<>();
     textCompatibleTypes.add(STRING);
+    textCompatibleTypes.add(INT32);
+    textCompatibleTypes.add(INT64);
+    textCompatibleTypes.add(FLOAT);
+    textCompatibleTypes.add(DOUBLE);
+    textCompatibleTypes.add(BOOLEAN);
+    textCompatibleTypes.add(BLOB);
+    textCompatibleTypes.add(DATE);
+    textCompatibleTypes.add(TIMESTAMP);
     compatibleTypes.put(TEXT, textCompatibleTypes);
 
     compatibleTypes.put(VECTOR, Collections.emptySet());
@@ -119,6 +129,15 @@
 
     Set<TSDataType> stringCompatibleTypes = new HashSet<>();
     stringCompatibleTypes.add(TEXT);
+    // add
+    stringCompatibleTypes.add(INT32);
+    stringCompatibleTypes.add(INT64);
+    stringCompatibleTypes.add(FLOAT);
+    stringCompatibleTypes.add(DOUBLE);
+    stringCompatibleTypes.add(BOOLEAN);
+    stringCompatibleTypes.add(BLOB);
+    stringCompatibleTypes.add(DATE);
+    stringCompatibleTypes.add(TIMESTAMP);
     compatibleTypes.put(STRING, stringCompatibleTypes);
   }
 
@@ -232,6 +251,17 @@
       case TEXT:
         if (sourceType == TSDataType.TEXT || sourceType == TSDataType.STRING) {
           return value;
+        } else if (sourceType == TSDataType.INT32
+            || sourceType == TSDataType.INT64
+            || sourceType == TSDataType.FLOAT
+            || sourceType == TSDataType.DOUBLE
+            || sourceType == TSDataType.BOOLEAN
+            || sourceType == TSDataType.TIMESTAMP) {
+          return new Binary(String.valueOf(value), StandardCharsets.UTF_8);
+        } else if (sourceType == TSDataType.DATE) {
+          return new Binary(getDateStringValue((int) value), StandardCharsets.UTF_8);
+        } else if (sourceType == TSDataType.BLOB) {
+          return new Binary(value.toString(), StandardCharsets.UTF_8);
         } else {
           break;
         }
@@ -262,6 +292,17 @@
       case STRING:
         if (sourceType == TSDataType.STRING || sourceType == TSDataType.TEXT) {
           return value;
+        } else if (sourceType == TSDataType.INT32
+            || sourceType == TSDataType.INT64
+            || sourceType == TSDataType.FLOAT
+            || sourceType == TSDataType.DOUBLE
+            || sourceType == TSDataType.BOOLEAN
+            || sourceType == TSDataType.TIMESTAMP) {
+          return new Binary(String.valueOf(value), StandardCharsets.UTF_8);
+        } else if (sourceType == TSDataType.DATE) {
+          return new Binary(getDateStringValue((int) value), StandardCharsets.UTF_8);
+        } else if (sourceType == TSDataType.BLOB) {
+          return new Binary(value.toString(), StandardCharsets.UTF_8);
         } else {
           break;
         }
@@ -331,12 +372,6 @@
         } else {
           break;
         }
-      case TEXT:
-        if (sourceType == TSDataType.TEXT || sourceType == STRING) {
-          return array;
-        } else {
-          break;
-        }
       case TIMESTAMP:
         if (sourceType == TSDataType.TIMESTAMP) {
           return array;
@@ -361,9 +396,54 @@
         } else {
           break;
         }
+      case TEXT:
       case STRING:
-        if (sourceType == TSDataType.STRING || sourceType == TSDataType.TEXT) {
+        if (sourceType == TSDataType.STRING
+            || sourceType == TSDataType.TEXT
+            || sourceType == TSDataType.BLOB) {
           return array;
+        } else if (sourceType == TSDataType.INT32) {
+          int[] tmp = (int[]) array;
+          Binary[] result = new Binary[tmp.length];
+          for (int i = 0; i < tmp.length; i++) {
+            result[i] = new Binary(String.valueOf(tmp[i]), StandardCharsets.UTF_8);
+          }
+          return result;
+        } else if (sourceType == TSDataType.DATE) {
+          int[] tmp = (int[]) array;
+          Binary[] result = new Binary[tmp.length];
+          for (int i = 0; i < tmp.length; i++) {
+            result[i] = new Binary(TSDataType.getDateStringValue(tmp[i]), StandardCharsets.UTF_8);
+          }
+          return result;
+        } else if (sourceType == TSDataType.INT64 || sourceType == TSDataType.TIMESTAMP) {
+          long[] tmp = (long[]) array;
+          Binary[] result = new Binary[tmp.length];
+          for (int i = 0; i < tmp.length; i++) {
+            result[i] = new Binary(String.valueOf(tmp[i]), StandardCharsets.UTF_8);
+          }
+          return result;
+        } else if (sourceType == TSDataType.FLOAT) {
+          float[] tmp = (float[]) array;
+          Binary[] result = new Binary[tmp.length];
+          for (int i = 0; i < tmp.length; i++) {
+            result[i] = new Binary(String.valueOf(tmp[i]), StandardCharsets.UTF_8);
+          }
+          return result;
+        } else if (sourceType == TSDataType.DOUBLE) {
+          double[] tmp = (double[]) array;
+          Binary[] result = new Binary[tmp.length];
+          for (int i = 0; i < tmp.length; i++) {
+            result[i] = new Binary(String.valueOf(tmp[i]), StandardCharsets.UTF_8);
+          }
+          return result;
+        } else if (sourceType == TSDataType.BOOLEAN) {
+          boolean[] tmp = (boolean[]) array;
+          Binary[] result = new Binary[tmp.length];
+          for (int i = 0; i < tmp.length; i++) {
+            result[i] = new Binary(String.valueOf(tmp[i]), StandardCharsets.UTF_8);
+          }
+          return result;
         } else {
           break;
         }
@@ -487,4 +567,8 @@
   public boolean isBinary() {
     return this == TEXT || this == STRING || this == BLOB;
   }
+
+  public static String getDateStringValue(int value) {
+    return String.format("%04d-%02d-%02d", value / 10000, (value % 10000) / 100, value % 100);
+  }
 }
diff --git a/java/common/src/main/java/org/apache/tsfile/utils/BitMap.java b/java/common/src/main/java/org/apache/tsfile/utils/BitMap.java
index 8a4c10b..9ceff99 100644
--- a/java/common/src/main/java/org/apache/tsfile/utils/BitMap.java
+++ b/java/common/src/main/java/org/apache/tsfile/utils/BitMap.java
@@ -36,14 +36,13 @@
         (byte) 0X7F // 01111111
       };
 
-  private final byte[] bits;
-  private final int size;
+  private byte[] bits;
+  private int size;
 
   /** Initialize a BitMap with given size. */
   public BitMap(int size) {
     this.size = size;
     bits = new byte[getSizeOfBytes(size)];
-    Arrays.fill(bits, (byte) 0);
   }
 
   /** Initialize a BitMap with given size and bytes. */
@@ -75,6 +74,34 @@
     bits[position / Byte.SIZE] |= BIT_UTIL[position % Byte.SIZE];
   }
 
+  public void markRange(int startPosition, int length) {
+    if (length <= 0) {
+      return;
+    }
+
+    if (startPosition < 0 || startPosition + length > size) {
+      throw new IndexOutOfBoundsException(
+          "startPosition " + startPosition + " + length " + length + " is out of range " + size);
+    }
+
+    int bitEnd = startPosition + length - 1;
+    int byte0 = startPosition >>> 3;
+    int byte1 = bitEnd >>> 3;
+
+    if (byte0 == byte1) {
+      bits[byte0] |= (byte) (((1 << length) - 1) << (startPosition & 7));
+      return;
+    }
+
+    bits[byte0++] |= (byte) (0xFF << (startPosition & 7));
+
+    while (byte0 < byte1) {
+      bits[byte0++] = (byte) 0xFF;
+    }
+
+    bits[byte1] |= (byte) (0xFF >>> (7 - (bitEnd & 7)));
+  }
+
   /** mark as 0 at all positions. */
   public void reset() {
     Arrays.fill(bits, (byte) 0);
@@ -84,6 +111,68 @@
     bits[position / Byte.SIZE] &= UNMARK_BIT_UTIL[position % Byte.SIZE];
   }
 
+  public void unmarkRange(int startPosition, int length) {
+    if (length <= 0) {
+      return;
+    }
+
+    if (startPosition < 0 || startPosition + length > size) {
+      throw new IndexOutOfBoundsException(
+          "startPosition " + startPosition + " + length " + length + " is out of range " + size);
+    }
+
+    int bitEnd = startPosition + length - 1;
+    int byte0 = startPosition >>> 3;
+    int byte1 = bitEnd >>> 3;
+
+    if (byte0 == byte1) {
+      bits[byte0] &= (byte) ~(((1 << length) - 1) << (startPosition & 7));
+      return;
+    }
+
+    bits[byte0++] &= (byte) ~(0xFF << (startPosition & 7));
+
+    while (byte0 < byte1) {
+      bits[byte0++] = 0;
+    }
+
+    bits[byte1] &= (byte) (0xFF << ((bitEnd & 7) + 1));
+  }
+
+  public void merge(BitMap src, int srcStart, int destStart, int len) {
+    if (len <= 0) return;
+    if (srcStart < 0 || destStart < 0 || srcStart + len > src.size || destStart + len > this.size) {
+      throw new IndexOutOfBoundsException();
+    }
+
+    int done = 0;
+    int dstBit = destStart & 7;
+    while (done < len) {
+      int size = Math.min(len - done, 64);
+      long bits = extractBits(src.bits, srcStart + done, size);
+      int destStartByte = (destStart + done) >>> 3;
+      this.bits[destStartByte++] |= (byte) ((bits << dstBit) & 255L);
+      bits = bits >>> (8 - dstBit);
+      while (bits > 0L) {
+        this.bits[destStartByte++] |= (byte) (bits & 255L);
+        bits = bits >>> 8;
+      }
+      done += size;
+    }
+  }
+
+  private long extractBits(byte[] buf, int off, int len) {
+    int start = off >>> 3;
+    int size = 8 - (off & 7);
+    long val = (buf[start++] & 0xFFL) >>> (off & 7);
+    while (size < len) {
+      val |= ((buf[start++] & 0xFFL) << size);
+      size += 8;
+    }
+
+    return val & (0xffff_ffff_ffff_ffffL >>> (64 - len));
+  }
+
   /** whether all bits are zero, i.e., no Null value */
   public boolean isAllUnmarked() {
     int j;
@@ -258,4 +347,22 @@
   public byte[] getTruncatedByteArray(int size) {
     return Arrays.copyOf(this.bits, getSizeOfBytes(size));
   }
+
+  public void append(BitMap another, int position, int length) {
+    for (int i = 0; i < length; i++) {
+      if (another.isMarked(i)) {
+        mark(position + i);
+      } else {
+        unmark(position + i);
+      }
+    }
+  }
+
+  public void extend(int newSize) {
+    if (size >= newSize) {
+      return;
+    }
+    bits = Arrays.copyOf(bits, getSizeOfBytes(newSize));
+    size = newSize;
+  }
 }
diff --git a/java/common/src/main/java/org/apache/tsfile/utils/RamUsageEstimator.java b/java/common/src/main/java/org/apache/tsfile/utils/RamUsageEstimator.java
index 2f1cf3d..af7a8cf 100644
--- a/java/common/src/main/java/org/apache/tsfile/utils/RamUsageEstimator.java
+++ b/java/common/src/main/java/org/apache/tsfile/utils/RamUsageEstimator.java
@@ -27,11 +27,16 @@
 import java.security.PrivilegedAction;
 import java.text.DecimalFormat;
 import java.text.DecimalFormatSymbols;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.IdentityHashMap;
+import java.util.List;
 import java.util.Locale;
 import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 
 /**
  * This class is copied from apache lucene, version 8.4.0. Estimates the size(memory representation)
@@ -77,6 +82,8 @@
    */
   public static final int NUM_BYTES_OBJECT_ALIGNMENT;
 
+  private static final int ALIGN_MASK;
+
   /**
    * Approximate memory usage that we assign to all unknown queries - this maps roughly to a
    * BooleanQuery with a couple term clauses.
@@ -175,6 +182,8 @@
       NUM_BYTES_ARRAY_HEADER = NUM_BYTES_OBJECT_HEADER + Integer.BYTES;
     }
 
+    ALIGN_MASK = NUM_BYTES_OBJECT_ALIGNMENT - 1;
+
     // get min/max value of cached Long class instances:
     long longCacheMinValue = 0;
     while (longCacheMinValue > Long.MIN_VALUE
@@ -204,8 +213,7 @@
 
   /** Aligns an object size to be the next multiple of {@link #NUM_BYTES_OBJECT_ALIGNMENT}. */
   public static long alignObjectSize(long size) {
-    size += NUM_BYTES_OBJECT_ALIGNMENT - 1L;
-    return size - (size % NUM_BYTES_OBJECT_ALIGNMENT);
+    return (size + ALIGN_MASK) & ~ALIGN_MASK;
   }
 
   /**
@@ -302,6 +310,25 @@
   /** Recurse only into immediate descendants. */
   public static final int MAX_DEPTH = 1;
 
+  public static final long SHALLOW_SIZE_OF_HASHMAP =
+      RamUsageEstimator.shallowSizeOfInstance(HashMap.class);
+  public static long SHALLOW_SIZE_OF_HASHMAP_ENTRY;
+  public static final long SHALLOW_SIZE_OF_CONCURRENT_HASHMAP =
+      RamUsageEstimator.shallowSizeOfInstance(ConcurrentHashMap.class);
+  public static long SHALLOW_SIZE_OF_CONCURRENT_HASHMAP_ENTRY;
+  public static final long ARRAY_LIST_INSTANCE_SIZE =
+      RamUsageEstimator.shallowSizeOfInstance(ArrayList.class);
+
+  static {
+    Map<Integer, Integer> map = new HashMap<>(1);
+    map.put(1, 1);
+    Map.Entry<Integer, Integer> next = map.entrySet().iterator().next();
+    SHALLOW_SIZE_OF_HASHMAP_ENTRY = RamUsageEstimator.shallowSizeOf(next);
+    map = new ConcurrentHashMap<>(map);
+    SHALLOW_SIZE_OF_CONCURRENT_HASHMAP_ENTRY =
+        RamUsageEstimator.shallowSizeOf(map.entrySet().iterator().next());
+  }
+
   /**
    * Returns the size in bytes of a Map object, including sizes of its keys and values, supplying
    * {@link #UNKNOWN_DEFAULT_RAM_BYTES_USED} when object type is not well known. This method
@@ -340,6 +367,49 @@
     return alignObjectSize(size);
   }
 
+  public static long sizeOfMapWithKnownShallowSize(
+      Map<?, ?> map, long shallowSizeOfMap, long shallowSizeOfMapEntry) {
+    if (map == null) {
+      return 0;
+    }
+    int depth = 0;
+    long defSize = UNKNOWN_DEFAULT_RAM_BYTES_USED;
+    long size = shallowSizeOfMap + map.size() * shallowSizeOfMapEntry;
+    for (Map.Entry<?, ?> entry : map.entrySet()) {
+      size += sizeOfObject(entry.getKey(), depth, defSize);
+      size += sizeOfObject(entry.getValue(), depth, defSize);
+    }
+    return alignObjectSize(size);
+  }
+
+  public static long sizeOfHashSet(Set<?> set) {
+    if (set == null) {
+      return 0L;
+    } else {
+      long size =
+          RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP
+              + (long) set.size() * RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP_ENTRY;
+      for (Object obj : set) {
+        size += RamUsageEstimator.sizeOfObject(obj);
+      }
+      return RamUsageEstimator.alignObjectSize(size);
+    }
+  }
+
+  public static long sizeOfArrayList(List<?> arrayList) {
+    if (arrayList == null) {
+      return 0L;
+    }
+    long size = ARRAY_LIST_INSTANCE_SIZE;
+    size +=
+        (long) RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
+            + (long) arrayList.size() * (long) RamUsageEstimator.NUM_BYTES_OBJECT_REF;
+    for (Object obj : arrayList) {
+      size += RamUsageEstimator.sizeOfObject(obj);
+    }
+    return RamUsageEstimator.alignObjectSize(size);
+  }
+
   /**
    * Returns the size in bytes of a Collection object, including sizes of its values, supplying
    * {@link #UNKNOWN_DEFAULT_RAM_BYTES_USED} when object type is not well known. This method
diff --git a/java/examples/pom.xml b/java/examples/pom.xml
index 5a484cf..64cc162 100644
--- a/java/examples/pom.xml
+++ b/java/examples/pom.xml
@@ -24,7 +24,7 @@
     <parent>
         <groupId>org.apache.tsfile</groupId>
         <artifactId>tsfile-java</artifactId>
-        <version>2.1.0-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <artifactId>examples</artifactId>
     <name>TsFile: Java: Examples</name>
@@ -36,7 +36,7 @@
         <dependency>
             <groupId>org.apache.tsfile</groupId>
             <artifactId>tsfile</artifactId>
-            <version>2.1.0-SNAPSHOT</version>
+            <version>2.2.0-SNAPSHOT</version>
         </dependency>
     </dependencies>
     <build>
diff --git a/java/examples/readme.md b/java/examples/readme.md
index 23f5a7c..6bd6597 100644
--- a/java/examples/readme.md
+++ b/java/examples/readme.md
@@ -31,7 +31,7 @@
     <dependency>
         <groupId>org.apache.tsfile</groupId>
         <artifactId>tsfile</artifactId>
-     	  <version>2.1.0-SNAPSHOT</version>
+     	  <version>2.1.0</version>
     </dependency>
 </dependencies>
 ```
diff --git a/java/pom.xml b/java/pom.xml
index df7cec5..130b09b 100644
--- a/java/pom.xml
+++ b/java/pom.xml
@@ -24,10 +24,10 @@
     <parent>
         <groupId>org.apache.tsfile</groupId>
         <artifactId>tsfile-parent</artifactId>
-        <version>2.1.0-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <artifactId>tsfile-java</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <packaging>pom</packaging>
     <name>TsFile: Java</name>
     <modules>
@@ -56,7 +56,7 @@
             <dependency>
                 <groupId>org.apache.commons</groupId>
                 <artifactId>commons-lang3</artifactId>
-                <version>3.15.0</version>
+                <version>3.18.0</version>
             </dependency>
             <dependency>
                 <groupId>org.lz4</groupId>
@@ -98,4 +98,37 @@
             </dependency>
         </dependencies>
     </dependencyManagement>
+    <build>
+        <pluginManagement>
+            <plugins>
+                <plugin>
+                    <groupId>com.diffplug.spotless</groupId>
+                    <artifactId>spotless-maven-plugin</artifactId>
+                    <version>${spotless.version}</version>
+                    <configuration>
+                        <java>
+                            <googleJavaFormat>
+                                <version>${google.java.format.version}</version>
+                                <style>GOOGLE</style>
+                            </googleJavaFormat>
+                            <importOrder>
+                                <order>org.apache.tsfile,,javax,java,\#</order>
+                            </importOrder>
+                            <removeUnusedImports/>
+                        </java>
+                        <lineEndings>UNIX</lineEndings>
+                    </configuration>
+                    <executions>
+                        <execution>
+                            <id>spotless-check</id>
+                            <goals>
+                                <goal>check</goal>
+                            </goals>
+                            <phase>validate</phase>
+                        </execution>
+                    </executions>
+                </plugin>
+            </plugins>
+        </pluginManagement>
+    </build>
 </project>
diff --git a/java/tools/pom.xml b/java/tools/pom.xml
index 8cc58d1..a4491e3 100644
--- a/java/tools/pom.xml
+++ b/java/tools/pom.xml
@@ -24,7 +24,7 @@
     <parent>
         <groupId>org.apache.tsfile</groupId>
         <artifactId>tsfile-java</artifactId>
-        <version>2.1.0-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <artifactId>tools</artifactId>
     <name>TsFile: Java: Tools</name>
@@ -32,7 +32,7 @@
         <dependency>
             <groupId>org.apache.tsfile</groupId>
             <artifactId>common</artifactId>
-            <version>2.1.0-SNAPSHOT</version>
+            <version>2.2.0-SNAPSHOT</version>
         </dependency>
         <dependency>
             <groupId>commons-cli</groupId>
@@ -50,7 +50,7 @@
         <dependency>
             <groupId>org.apache.tsfile</groupId>
             <artifactId>tsfile</artifactId>
-            <version>2.1.0-SNAPSHOT</version>
+            <version>2.2.0-SNAPSHOT</version>
         </dependency>
         <dependency>
             <groupId>ch.qos.logback</groupId>
diff --git a/java/tsfile/README-zh.md b/java/tsfile/README-zh.md
index 32f15fc..e97abb8 100644
--- a/java/tsfile/README-zh.md
+++ b/java/tsfile/README-zh.md
@@ -26,7 +26,7 @@
 \__    ___/____\_   _____/|__|  |   ____  
   |    | /  ___/|    __)  |  |  | _/ __ \ 
   |    | \___ \ |     \   |  |  |_\  ___/ 
-  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0-SNAPSHOT
+  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0
              \/     \/                 \/  
 </pre>
 
@@ -34,14 +34,14 @@
 
 ### 在 Maven 中添加 TsFile 依赖
 
-当前开发版本是 `2.1.0-SNAPSHOT`,可以这样引用
+当前发布版本是 `2.1.0`,可以这样引用
 
 ```xml  
 <dependencies>
     <dependency>
       <groupId>org.apache.tsfile</groupId>
       <artifactId>tsfile</artifactId>
-      <version>2.1.0-SNAPSHOT</version>
+      <version>2.1.0</version>
     </dependency>
 <dependencies>
 ```
diff --git a/java/tsfile/README.md b/java/tsfile/README.md
index b92bc52..c225567 100644
--- a/java/tsfile/README.md
+++ b/java/tsfile/README.md
@@ -26,7 +26,7 @@
 \__    ___/____\_   _____/|__|  |   ____  
   |    | /  ___/|    __)  |  |  | _/ __ \ 
   |    | \___ \ |     \   |  |  |_\  ___/ 
-  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0-SNAPSHOT
+  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0
              \/     \/                 \/  
 </pre>
 
@@ -34,14 +34,14 @@
 
 ### Add TsFile as a dependency in Maven
 
-The current developing version is `2.1.0-SNAPSHOT`
+The current version is `2.1.0`
 
 ```xml  
 <dependencies>
     <dependency>
       <groupId>org.apache.tsfile</groupId>
       <artifactId>tsfile</artifactId>
-      <version>2.1.0-SNAPSHOT</version>
+      <version>2.1.0</version>
     </dependency>
 <dependencies>
 ```
diff --git a/java/tsfile/pom.xml b/java/tsfile/pom.xml
index 8625f0c..866472d 100644
--- a/java/tsfile/pom.xml
+++ b/java/tsfile/pom.xml
@@ -24,7 +24,7 @@
     <parent>
         <groupId>org.apache.tsfile</groupId>
         <artifactId>tsfile-java</artifactId>
-        <version>2.1.0-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <artifactId>tsfile</artifactId>
     <name>TsFile: Java: TsFile</name>
@@ -38,7 +38,7 @@
         <dependency>
             <groupId>org.apache.tsfile</groupId>
             <artifactId>common</artifactId>
-            <version>2.1.0-SNAPSHOT</version>
+            <version>2.2.0-SNAPSHOT</version>
         </dependency>
         <dependency>
             <groupId>com.github.luben</groupId>
@@ -87,6 +87,11 @@
             <artifactId>logback-classic</artifactId>
             <scope>test</scope>
         </dependency>
+        <dependency>
+        <groupId>com.github.wendykierp</groupId>
+            <artifactId>JTransforms</artifactId>
+            <version>3.1</version>
+        </dependency>
     </dependencies>
     <build>
         <plugins>
@@ -142,22 +147,41 @@
                     </filesets>
                 </configuration>
             </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-shade-plugin</artifactId>
+                <version>3.4.1</version>
+                <executions>
+                    <execution>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>shade</goal>
+                        </goals>
+                        <configuration>
+                            <relocations/>
+                            <createDependencyReducedPom>false</createDependencyReducedPom>
+                            <transformers>
+                                <transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer"/>
+                            </transformers>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
             <!--
               Generate an OSGI compatible MANIFEST file.
             -->
             <plugin>
                 <groupId>org.apache.felix</groupId>
                 <artifactId>maven-bundle-plugin</artifactId>
-                <configuration>
-                    <exportScr>true</exportScr>
-                    <instructions>
-                        <_include>-bnd.bnd</_include>
-                        <_removeheaders>Bnd-LastModified,Built-By</_removeheaders>
-                        <Embed-Dependency>dependencies</Embed-Dependency>
-                    </instructions>
-                </configuration>
                 <executions>
                     <execution>
+                        <id>bundle</id>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>bundle</goal>
+                        </goals>
+                    </execution>
+                    <execution>
                         <id>bundle-manifest</id>
                         <goals>
                             <goal>manifest</goal>
@@ -165,6 +189,20 @@
                         <phase>process-classes</phase>
                     </execution>
                 </executions>
+                <configuration>
+                    <classesDirectory>${project.build.outputDirectory}</classesDirectory>
+                    <archive>
+                        <manifestFile>${project.build.outputDirectory}/META-INF/MANIFEST.MF</manifestFile>
+                    </archive>
+                    <instructions>
+                        <Export-Package>org.apache.tsfile.*</Export-Package>
+                        <Embed-Dependency>common;inline=true</Embed-Dependency>
+                        <Embed-Transitive>false</Embed-Transitive>
+                        <Private-Package/>
+                        <_removeheaders>Bnd-LastModified,Built-By</_removeheaders>
+                        <Bundle-SymbolicName>org.apache.tsfile</Bundle-SymbolicName>
+                    </instructions>
+                </configuration>
             </plugin>
             <!--
               Use the MANIFEST file generated by the maven-bundle-plugin.
diff --git a/java/tsfile/src/main/codegen/templates/FilterOperatorsTemplate.ftl b/java/tsfile/src/main/codegen/templates/FilterOperatorsTemplate.ftl
index 77d394d..7e39b36 100644
--- a/java/tsfile/src/main/codegen/templates/FilterOperatorsTemplate.ftl
+++ b/java/tsfile/src/main/codegen/templates/FilterOperatorsTemplate.ftl
@@ -42,6 +42,9 @@
 import java.io.IOException;
 import java.io.Serializable;
 import java.nio.ByteBuffer;
+<#if filter.dataType == "Binary">
+import java.nio.charset.StandardCharsets;
+</#if>
 <#if filter.dataType != "boolean">
 import java.util.Collections;
 </#if>
@@ -137,7 +140,18 @@
 
     @Override
     public boolean valueSatisfy(Object value){
+      <#if filter.dataType == "boolean" || filter.javaBoxName == "String">
       return valueSatisfy((${filter.dataType}) value);
+      <#elseif filter.dataType == "Binary">
+      if(value instanceof Binary){
+        return valueSatisfy((${filter.dataType}) value);
+      }
+      else{
+        return valueSatisfy(new ${filter.dataType}(String.valueOf(value), StandardCharsets.UTF_8));
+      }
+      <#else>
+      return valueSatisfy(((Number) value).${filter.dataType}Value());
+      </#if>
     }
 
     @Override
@@ -152,7 +166,7 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean canSkip(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -162,20 +176,36 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if((statistics.getMinValue() instanceof Binary) && (statistics.getMaxValue() instanceof Binary)){
+        return constant.compareTo((${filter.dataType}) statistics.getMinValue()) < 0
+            || constant.compareTo((${filter.dataType}) statistics.getMaxValue()) > 0;
+      }
+      else{
+        return constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMinValue()), StandardCharsets.UTF_8)) < 0
+            || constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMaxValue()), StandardCharsets.UTF_8)) > 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       // drop if value < min || value > max
       if(statistics.isEmpty()){
         return false;
       }
-      return constant < (${filter.javaBoxName}) statistics.getMinValue()
-          || constant > (${filter.javaBoxName}) statistics.getMaxValue();
+      return constant < ((Number) statistics.getMinValue()).${filter.dataType}Value()
+          || constant > ((Number) statistics.getMaxValue()).${filter.dataType}Value();
       </#if>
     }
 
     @Override
     @SuppressWarnings("unchecked")
     public boolean allSatisfy(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -185,13 +215,29 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if((statistics.getMinValue() instanceof Binary) && (statistics.getMaxValue() instanceof Binary)){
+        return constant.compareTo((${filter.dataType}) statistics.getMinValue()) == 0
+            && constant.compareTo((${filter.dataType}) statistics.getMaxValue()) == 0;
+      }
+      else{
+        return constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMinValue()), StandardCharsets.UTF_8)) == 0
+            && constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMaxValue()), StandardCharsets.UTF_8)) == 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       // drop if value < min || value > max
       if(statistics.isEmpty()){
         return false;
       }
-      return constant == (${filter.javaBoxName}) statistics.getMinValue()
-          && constant == (${filter.javaBoxName}) statistics.getMaxValue();
+      return constant == ((Number) statistics.getMinValue()).${filter.dataType}Value()
+          && constant == ((Number) statistics.getMaxValue()).${filter.dataType}Value();
       </#if>
     }
 
@@ -218,7 +264,17 @@
 
     @Override
     public boolean valueSatisfy(Object value){
+      <#if filter.dataType == "boolean" || filter.javaBoxName == "String">
       return valueSatisfy((${filter.dataType}) value);
+      <#elseif filter.dataType == "Binary">
+      if(value instanceof Binary){
+        return valueSatisfy((${filter.dataType}) value);
+      } else {
+        return valueSatisfy(new ${filter.dataType}(String.valueOf(value), StandardCharsets.UTF_8));
+      }
+      <#else>
+      return valueSatisfy(((Number) value).${filter.dataType}Value());
+      </#if>
     }
 
     @Override
@@ -233,7 +289,7 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean canSkip(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -243,21 +299,37 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if((statistics.getMinValue() instanceof Binary) && (statistics.getMaxValue() instanceof Binary)){
+        return constant.compareTo((${filter.dataType}) statistics.getMinValue()) == 0
+            && constant.compareTo((${filter.dataType}) statistics.getMaxValue()) == 0;
+      }
+      else{
+        return constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMinValue()), StandardCharsets.UTF_8)) == 0
+            && constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMaxValue()), StandardCharsets.UTF_8)) == 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       // drop if value < min || value > max
       if(statistics.isEmpty()){
         return false;
       }
       // drop if this is a column where min = max = value
-      return constant == (${filter.javaBoxName}) statistics.getMinValue()
-          && constant == (${filter.javaBoxName}) statistics.getMaxValue();
+      return constant == ((Number) statistics.getMinValue()).${filter.dataType}Value()
+          && constant == ((Number) statistics.getMaxValue()).${filter.dataType}Value();
       </#if>
     }
 
     @Override
     @SuppressWarnings("unchecked")
     public boolean allSatisfy(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -267,12 +339,28 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if((statistics.getMinValue() instanceof Binary) && (statistics.getMaxValue() instanceof Binary)){
+        return constant.compareTo((${filter.dataType}) statistics.getMinValue()) < 0
+            || constant.compareTo((${filter.dataType}) statistics.getMaxValue()) > 0;
+      }
+      else{
+        return constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMinValue()), StandardCharsets.UTF_8)) < 0
+            || constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMaxValue()), StandardCharsets.UTF_8)) > 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       if(statistics.isEmpty()){
         return false;
       }
-      return constant < (${filter.javaBoxName}) statistics.getMinValue()
-          || constant > (${filter.javaBoxName}) statistics.getMaxValue();
+      return constant < ((Number) statistics.getMinValue()).${filter.dataType}Value()
+          || constant > ((Number) statistics.getMaxValue()).${filter.dataType}Value();
       </#if>
     }
 
@@ -300,7 +388,17 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean valueSatisfy(Object value){
+      <#if filter.dataType == "boolean" || filter.javaBoxName == "String">
       return valueSatisfy((${filter.dataType}) value);
+      <#elseif filter.dataType == "Binary">
+      if(value instanceof Binary){
+        return valueSatisfy((${filter.dataType}) value);
+      } else {
+        return valueSatisfy(new ${filter.dataType}(String.valueOf(value), StandardCharsets.UTF_8));
+      }
+      <#else>
+      return valueSatisfy(((Number) value).${filter.dataType}Value());
+      </#if>
     }
 
     @Override
@@ -317,7 +415,7 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean canSkip(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -326,20 +424,34 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if(statistics.getMinValue() instanceof Binary) {
+        return constant.compareTo((${filter.dataType}) statistics.getMinValue()) <= 0;
+      }
+      else{
+        return constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMinValue()), StandardCharsets.UTF_8)) <= 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       // drop if value < min || value > max
       if(statistics.isEmpty()){
         return false;
       }
       // drop if value <= min
-      return constant <= (${filter.javaBoxName}) statistics.getMinValue();
+      return constant <= ((Number) statistics.getMinValue()).${filter.dataType}Value();
       </#if>
     }
 
     @Override
     @SuppressWarnings("unchecked")
     public boolean allSatisfy(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -348,11 +460,25 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if(statistics.getMaxValue() instanceof Binary){
+        return constant.compareTo((${filter.dataType}) statistics.getMaxValue()) > 0;
+      }
+      else{
+        return constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMaxValue()), StandardCharsets.UTF_8)) > 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       if(statistics.isEmpty()){
         return false;
       }
-      return constant > (${filter.javaBoxName}) statistics.getMaxValue();
+      return constant > ((Number) statistics.getMaxValue()).${filter.dataType}Value();
       </#if>
     }
 
@@ -380,7 +506,17 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean valueSatisfy(Object value){
+      <#if filter.dataType == "boolean" || filter.javaBoxName == "String">
       return valueSatisfy((${filter.dataType}) value);
+      <#elseif filter.dataType == "Binary">
+      if(value instanceof Binary){
+        return valueSatisfy((${filter.dataType}) value);
+      } else {
+        return valueSatisfy(new ${filter.dataType}(String.valueOf(value), StandardCharsets.UTF_8));
+      }
+      <#else>
+      return valueSatisfy(((Number) value).${filter.dataType}Value());
+      </#if>
     }
 
     @Override
@@ -397,7 +533,7 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean canSkip(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -406,20 +542,34 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if(statistics.getMinValue() instanceof Binary) {
+        return constant.compareTo((${filter.dataType}) statistics.getMinValue()) < 0;
+      }
+      else{
+        return constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMinValue()), StandardCharsets.UTF_8)) < 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       // drop if value < min || value > max
       if(statistics.isEmpty()){
         return false;
       }
       // drop if value < min
-      return constant < (${filter.javaBoxName}) statistics.getMinValue();
+      return constant < ((Number) statistics.getMinValue()).${filter.dataType}Value();
       </#if>
     }
 
     @Override
     @SuppressWarnings("unchecked")
     public boolean allSatisfy(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -428,11 +578,25 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if(statistics.getMaxValue() instanceof Binary){
+        return constant.compareTo((${filter.dataType}) statistics.getMaxValue()) >= 0;
+      }
+      else{
+        return constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMaxValue()), StandardCharsets.UTF_8)) >= 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       if(statistics.isEmpty()){
         return false;
       }
-      return constant >= (${filter.javaBoxName}) statistics.getMaxValue();
+      return constant >= ((Number) statistics.getMaxValue()).${filter.dataType}Value();
       </#if>
     }
 
@@ -460,7 +624,17 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean valueSatisfy(Object value){
+      <#if filter.dataType == "boolean" || filter.javaBoxName == "String">
       return valueSatisfy((${filter.dataType}) value);
+      <#elseif filter.dataType == "Binary">
+      if(value instanceof Binary){
+        return valueSatisfy((${filter.dataType}) value);
+      } else {
+        return valueSatisfy(new ${filter.dataType}(String.valueOf(value), StandardCharsets.UTF_8));
+      }
+      <#else>
+      return valueSatisfy(((Number) value).${filter.dataType}Value());
+      </#if>
     }
 
     @Override
@@ -477,7 +651,7 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean canSkip(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -486,20 +660,34 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if(statistics.getMaxValue() instanceof Binary) {
+        return constant.compareTo((${filter.dataType}) statistics.getMaxValue()) >= 0;
+      }
+      else{
+        return constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMaxValue()), StandardCharsets.UTF_8)) >= 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       // drop if value < min || value > max
       if(statistics.isEmpty()){
         return false;
       }
       // drop if value >= max
-      return constant >= (${filter.javaBoxName}) statistics.getMaxValue();
+      return constant >= ((Number) statistics.getMaxValue()).${filter.dataType}Value();
       </#if>
     }
 
     @Override
     @SuppressWarnings("unchecked")
     public boolean allSatisfy(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -508,11 +696,25 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if(statistics.getMinValue() instanceof Binary){
+        return constant.compareTo((${filter.dataType}) statistics.getMinValue()) < 0;
+      }
+      else{
+        return constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMinValue()), StandardCharsets.UTF_8)) < 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       if(statistics.isEmpty()){
         return false;
       }
-      return constant < (${filter.javaBoxName}) statistics.getMinValue();
+      return constant < ((Number) statistics.getMinValue()).${filter.dataType}Value();
       </#if>
     }
 
@@ -540,7 +742,17 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean valueSatisfy(Object value){
+      <#if filter.dataType == "boolean" || filter.javaBoxName == "String">
       return valueSatisfy((${filter.dataType}) value);
+      <#elseif filter.dataType == "Binary">
+      if(value instanceof Binary){
+        return valueSatisfy((${filter.dataType}) value);
+      } else {
+        return valueSatisfy(new ${filter.dataType}(String.valueOf(value), StandardCharsets.UTF_8));
+      }
+      <#else>
+      return valueSatisfy(((Number) value).${filter.dataType}Value());
+      </#if>
     }
 
     @Override
@@ -557,7 +769,7 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean canSkip(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -566,20 +778,34 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if(statistics.getMaxValue() instanceof Binary) {
+        return constant.compareTo((${filter.dataType}) statistics.getMaxValue()) > 0;
+      }
+      else{
+        return constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMaxValue()), StandardCharsets.UTF_8)) > 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       // drop if value < min || value > max
       if(statistics.isEmpty()){
         return false;
       }
       // drop if value > max
-      return constant > (${filter.javaBoxName}) statistics.getMaxValue();
+      return constant > ((Number) statistics.getMaxValue()).${filter.dataType}Value();
       </#if>
     }
 
     @Override
     @SuppressWarnings("unchecked")
     public boolean allSatisfy(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -588,11 +814,25 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if(statistics.getMinValue() instanceof Binary){
+        return constant.compareTo((${filter.dataType}) statistics.getMinValue()) <= 0;
+      }
+      else{
+        return constant.compareTo(new ${filter.dataType}(String.valueOf(statistics.getMinValue()), StandardCharsets.UTF_8)) <= 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       if(statistics.isEmpty()){
         return false;
       }
-      return constant <= (${filter.javaBoxName}) statistics.getMinValue();
+      return constant <= ((Number) statistics.getMinValue()).${filter.dataType}Value();
       </#if>
     }
 
@@ -688,7 +928,17 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean valueSatisfy(Object value){
+      <#if filter.dataType == "boolean" || filter.javaBoxName == "String">
       return valueSatisfy((${filter.dataType}) value);
+      <#elseif filter.dataType == "Binary">
+      if(value instanceof Binary){
+        return valueSatisfy((${filter.dataType}) value);
+      } else {
+        return valueSatisfy(new ${filter.dataType}(String.valueOf(value), StandardCharsets.UTF_8));
+      }
+      <#else>
+      return valueSatisfy(((Number) value).${filter.dataType}Value());
+      </#if>
     }
 
     @Override
@@ -707,7 +957,7 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean canSkip(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -717,20 +967,36 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if((statistics.getMaxValue() instanceof Binary) && (statistics.getMinValue() instanceof Binary)) {
+        return ((${filter.dataType}) statistics.getMaxValue()).compareTo(min) < 0
+            || ((${filter.dataType}) statistics.getMinValue()).compareTo(max) > 0;
+      }
+      else{
+        return (new ${filter.dataType}(String.valueOf(statistics.getMaxValue()), StandardCharsets.UTF_8)).compareTo(min) < 0
+            || (new ${filter.dataType}(String.valueOf(statistics.getMinValue()), StandardCharsets.UTF_8)).compareTo(max) > 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       // drop if value < min || value > max
       if(statistics.isEmpty()){
         return false;
       }
-      return (${filter.javaBoxName}) statistics.getMaxValue() < min
-          || (${filter.javaBoxName}) statistics.getMinValue() > max;
+      return ((Number) statistics.getMaxValue()).${filter.dataType}Value() < min
+          || ((Number) statistics.getMinValue()).${filter.dataType}Value() > max;
       </#if>
     }
 
     @Override
     @SuppressWarnings("unchecked")
     public boolean allSatisfy(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -740,12 +1006,28 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if((statistics.getMinValue() instanceof Binary) && (statistics.getMaxValue() instanceof Binary)){
+        return ((${filter.dataType}) statistics.getMinValue()).compareTo(min) >= 0
+            && ((${filter.dataType}) statistics.getMaxValue()).compareTo(max) <= 0;
+      }
+      else{
+        return (new ${filter.dataType}(String.valueOf(statistics.getMinValue()), StandardCharsets.UTF_8)).compareTo(min) >= 0
+            && (new ${filter.dataType}(String.valueOf(statistics.getMaxValue()), StandardCharsets.UTF_8)).compareTo(max) <= 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       if(statistics.isEmpty()){
         return false;
       }
-      return (${filter.javaBoxName}) statistics.getMinValue() >= min
-          && (${filter.javaBoxName}) statistics.getMaxValue() <= max;
+      return ((Number) statistics.getMinValue()).${filter.dataType}Value() >= min
+          && ((Number) statistics.getMaxValue()).${filter.dataType}Value() <= max;
       </#if>
     }
 
@@ -773,7 +1055,17 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean valueSatisfy(Object value){
+      <#if filter.dataType == "boolean" || filter.javaBoxName == "String">
       return valueSatisfy((${filter.dataType}) value);
+      <#elseif filter.dataType == "Binary">
+      if(value instanceof Binary){
+        return valueSatisfy((${filter.dataType}) value);
+      } else {
+        return valueSatisfy(new ${filter.dataType}(String.valueOf(value), StandardCharsets.UTF_8));
+      }
+      <#else>
+      return valueSatisfy(((Number) value).${filter.dataType}Value());
+      </#if>
     }
 
     @Override
@@ -791,7 +1083,7 @@
     @Override
     @SuppressWarnings("unchecked")
     public boolean canSkip(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -801,20 +1093,36 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if((statistics.getMinValue() instanceof Binary) && (statistics.getMaxValue() instanceof Binary)) {
+        return ((${filter.dataType}) statistics.getMinValue()).compareTo(min) >= 0
+            && ((${filter.dataType}) statistics.getMaxValue()).compareTo(max) <= 0;
+      }
+      else{
+        return (new ${filter.dataType}(String.valueOf(statistics.getMinValue()), StandardCharsets.UTF_8)).compareTo(min) >= 0
+            && (new ${filter.dataType}(String.valueOf(statistics.getMaxValue()), StandardCharsets.UTF_8)).compareTo(max) <= 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       // drop if value < min || value > max
       if(statistics.isEmpty()){
         return false;
       }
-      return (${filter.javaBoxName}) statistics.getMinValue() >= min
-          && (${filter.javaBoxName}) statistics.getMaxValue() <= max;
+      return ((Number) statistics.getMinValue()).${filter.dataType}Value() >= min
+          && ((Number) statistics.getMaxValue()).${filter.dataType}Value() <= max;
       </#if>
     }
 
     @Override
     @SuppressWarnings("unchecked")
     public boolean allSatisfy(Statistics<? extends Serializable> statistics) {
-      <#if filter.dataType == "boolean" || filter.dataType == "Binary">
+      <#if filter.dataType == "boolean">
         <#if filter.javaBoxName == "String">
       if(statistics.isEmpty()){
         return false;
@@ -824,12 +1132,28 @@
         <#else>
       return false;
         </#if>
+      <#elseif filter.dataType == "Binary">
+        <#if filter.javaBoxName == "String">
+      if(statistics.isEmpty()){
+        return false;
+      }
+      if((statistics.getMinValue() instanceof Binary) && (statistics.getMaxValue() instanceof Binary)){
+        return ((${filter.dataType}) statistics.getMinValue()).compareTo(max) > 0
+            || ((${filter.dataType}) statistics.getMaxValue()).compareTo(min) < 0;
+      }
+      else{
+        return (new ${filter.dataType}(String.valueOf(statistics.getMinValue()), StandardCharsets.UTF_8)).compareTo(max) > 0
+            || (new ${filter.dataType}(String.valueOf(statistics.getMaxValue()), StandardCharsets.UTF_8)).compareTo(min) < 0;
+      }
+        <#else>
+      return false;
+        </#if>
       <#else>
       if(statistics.isEmpty()){
         return false;
       }
-      return (${filter.javaBoxName}) statistics.getMinValue() > max
-          || (${filter.javaBoxName}) statistics.getMaxValue() < min;
+      return ((Number) statistics.getMinValue()).${filter.dataType}Value() > max
+          || ((Number) statistics.getMaxValue()).${filter.dataType}Value() < min;
       </#if>
     }
 
@@ -962,7 +1286,17 @@
 
     @Override
     public boolean valueSatisfy(Object value){
-      return candidates.contains((${filter.javaBoxName}) value);
+      <#if filter.dataType == "boolean" || filter.javaBoxName == "String">
+      return candidates.contains((${filter.dataType}) value);
+      <#elseif filter.dataType == "Binary">
+      if(value instanceof Binary){
+        return candidates.contains((${filter.dataType}) value);
+      } else {
+        return candidates.contains(new ${filter.dataType}(String.valueOf(value), StandardCharsets.UTF_8));
+      }
+      <#else>
+      return candidates.contains(((Number) value).${filter.dataType}Value());
+      </#if>
     }
 
     @Override
@@ -994,8 +1328,8 @@
         ${filter.dataType} valuesMin = (${filter.dataType}) stat.getMinValue();
         ${filter.dataType} valuesMax = (${filter.dataType}) stat.getMaxValue();
         <#else>
-        ${filter.javaBoxName} valuesMin = (${filter.javaBoxName}) stat.getMinValue();
-        ${filter.javaBoxName} valuesMax = (${filter.javaBoxName}) stat.getMaxValue();
+        ${filter.javaBoxName} valuesMin = ((Number) stat.getMinValue()).${filter.dataType}Value();
+        ${filter.javaBoxName} valuesMax = ((Number) stat.getMaxValue()).${filter.dataType}Value();
         </#if>
         // All values are same
         if (valuesMin.equals(valuesMax)) {
@@ -1048,8 +1382,8 @@
         ${filter.dataType} valuesMin = (${filter.dataType}) stat.getMinValue();
         ${filter.dataType} valuesMax = (${filter.dataType}) stat.getMaxValue();
         <#else>
-        ${filter.javaBoxName} valuesMin = (${filter.javaBoxName}) stat.getMinValue();
-        ${filter.javaBoxName} valuesMax = (${filter.javaBoxName}) stat.getMaxValue();
+        ${filter.javaBoxName} valuesMin = ((Number) stat.getMinValue()).${filter.dataType}Value();
+        ${filter.javaBoxName} valuesMax = ((Number) stat.getMaxValue()).${filter.dataType}Value();
         </#if>
         // All values are same
         if (valuesMin.equals(valuesMax)) {
@@ -1106,7 +1440,17 @@
 
     @Override
     public boolean valueSatisfy(Object value){
-      return !candidates.contains((${filter.javaBoxName}) value);
+      <#if filter.dataType == "boolean" || filter.javaBoxName == "String">
+      return !candidates.contains((${filter.dataType}) value);
+      <#elseif filter.dataType == "Binary">
+      if(value instanceof Binary){
+        return !candidates.contains((${filter.dataType}) value);
+      } else {
+        return !candidates.contains(new ${filter.dataType}(String.valueOf(value), StandardCharsets.UTF_8));
+      }
+      <#else>
+      return !candidates.contains(((Number) value).${filter.dataType}Value());
+      </#if>
     }
 
     @Override
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/common/bitStream/BitInputStream.java b/java/tsfile/src/main/java/org/apache/tsfile/common/bitStream/BitInputStream.java
new file mode 100644
index 0000000..4da6c8f
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/common/bitStream/BitInputStream.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.common.bitStream;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+
+/** A stream for reading individual bits or groups of bits from an InputStream. */
+public class BitInputStream extends BitStream {
+
+  protected InputStream in;
+  protected int buffer;
+  protected int bufferBitCount;
+  protected final long totalBits; // Total valid bits
+  protected long bitsRead = 0; // Number of bits read so far
+
+  protected int markedBuffer = 0;
+  protected int markedBufferBitCount = 0;
+  protected long markedBitsRead = 0;
+
+  /**
+   * Constructs a BitInputStream with a given InputStream and total number of valid bits.
+   *
+   * @param in the underlying InputStream
+   * @param totalBits the total number of valid bits to read
+   */
+  public BitInputStream(InputStream in, long totalBits) {
+    this.in = in;
+    this.totalBits = totalBits;
+    this.bufferBitCount = 0;
+  }
+
+  /**
+   * Reads an integer value using the specified number of bits. If fewer bits are available, only
+   * the available bits are returned.
+   *
+   * @param numBits the number of bits to read (≤ 32)
+   * @return an integer whose lower bits contain the read value
+   * @throws EOFException if no data is available to read
+   * @throws IOException if an I/O error occurs
+   */
+  public int readInt(int numBits) throws IOException {
+    if (availableBits() <= 0) {
+      throw new EOFException();
+    }
+
+    bitsRead += numBits;
+    int result = 0;
+    boolean hasReadData = false;
+
+    while (numBits > 0) {
+      if (bufferBitCount == 0) {
+        buffer = in.read();
+        if (buffer < 0) {
+          if (!hasReadData) {
+            throw new EOFException();
+          }
+          return result;
+        }
+        bufferBitCount = BITS_PER_BYTE;
+      }
+
+      if (bufferBitCount > numBits) {
+        result = ((buffer >> (bufferBitCount - numBits)) & MASKS[numBits]) | result;
+        bufferBitCount -= numBits;
+        numBits = 0;
+      } else {
+        result = ((buffer & MASKS[bufferBitCount]) << (numBits - bufferBitCount)) | result;
+        numBits -= bufferBitCount;
+        bufferBitCount = 0;
+      }
+
+      hasReadData = true;
+    }
+
+    return result;
+  }
+
+  /**
+   * Reads a long value using the specified number of bits.
+   *
+   * @param numBits the number of bits to read (0 to 64)
+   * @return a long value containing the read bits
+   * @throws EOFException if no data is available to read
+   * @throws IOException if an I/O error occurs
+   */
+  public long readLong(int numBits) throws IOException {
+    if (availableBits() <= 0) {
+      throw new EOFException();
+    }
+    bitsRead += numBits;
+    if (numBits > 64 || numBits < 0) {
+      throw new IllegalArgumentException("numBits must be between 0 and 64");
+    }
+
+    long result = 0;
+    boolean hasReadData = false;
+
+    while (numBits > 0) {
+      if (bufferBitCount == 0) {
+        buffer = in.read();
+        if (buffer < 0) {
+          if (!hasReadData) {
+            throw new EOFException();
+          }
+          return result;
+        }
+        bufferBitCount = BITS_PER_BYTE;
+      }
+
+      if (bufferBitCount > numBits) {
+        int shift = bufferBitCount - numBits;
+        result = (result << numBits) | ((buffer >> shift) & MASKS[numBits]);
+        bufferBitCount -= numBits;
+        buffer &= MASKS[bufferBitCount];
+        numBits = 0;
+      } else {
+        result = (result << bufferBitCount) | (buffer & MASKS[bufferBitCount]);
+        numBits -= bufferBitCount;
+        bufferBitCount = 0;
+      }
+
+      hasReadData = true;
+    }
+
+    return result;
+  }
+
+  public static int readVarInt(BitInputStream in) throws IOException {
+    int result = 0;
+    int shift = 0;
+
+    while (true) {
+      int chunk = in.readInt(7);
+      boolean hasNext = in.readBit();
+      result |= chunk << shift;
+      if (!hasNext) break;
+      shift += 7;
+      if (shift >= 32) throw new IOException("VarInt too long");
+    }
+
+    return (result >>> 1) ^ -(result & 1);
+  }
+
+  public static long readVarLong(BitInputStream in) throws IOException {
+    long result = 0;
+    int shift = 0;
+
+    while (true) {
+      long chunk = in.readInt(7);
+      boolean hasNext = in.readBit();
+
+      result |= (chunk) << shift;
+      shift += 7;
+
+      if (!hasNext) {
+        break;
+      }
+
+      if (shift >= 64) {
+        throw new IOException("VarLong too long: overflow");
+      }
+    }
+
+    // ZigZag 解码
+    return (result >>> 1) ^ -(result & 1);
+  }
+
+  /**
+   * Reads a single bit from the stream.
+   *
+   * @return true if the bit is 1, false if it is 0
+   * @throws EOFException if no bits are available
+   * @throws IOException if an I/O error occurs
+   */
+  public boolean readBit() throws IOException {
+    if (availableBits() <= 0) {
+      throw new EOFException();
+    }
+    bitsRead += 1;
+    if (bufferBitCount == 0) {
+      buffer = in.read();
+      if (buffer < 0) {
+        throw new EOFException();
+      }
+      bufferBitCount = BITS_PER_BYTE;
+    }
+
+    boolean bit = ((buffer >> (bufferBitCount - 1)) & 1) != 0;
+    bufferBitCount--;
+    return bit;
+  }
+
+  /**
+   * Returns whether this stream supports mark/reset.
+   *
+   * @return true if mark/reset is supported
+   */
+  public boolean markSupported() {
+    return in.markSupported();
+  }
+
+  /**
+   * Marks the current position in the stream.
+   *
+   * @param readLimit the maximum number of bits that can be read before the mark becomes invalid
+   */
+  public void mark(int readLimit) {
+    in.mark((readLimit + BITS_PER_BYTE - 1) / BITS_PER_BYTE);
+    markedBuffer = buffer;
+    markedBufferBitCount = bufferBitCount;
+    markedBitsRead = bitsRead;
+  }
+
+  /**
+   * Resets the stream to the most recent marked position.
+   *
+   * @throws IOException if mark was not called or has been invalidated
+   */
+  public void reset() throws IOException {
+    in.reset();
+    buffer = markedBuffer;
+    bufferBitCount = markedBufferBitCount;
+    bitsRead = markedBitsRead;
+  }
+
+  /**
+   * Returns the number of bits still available to read.
+   *
+   * @return the number of remaining available bits
+   * @throws IOException if an I/O error occurs
+   */
+  public int availableBits() throws IOException {
+    return (int)
+        Math.min(((long) in.available() * BITS_PER_BYTE) + bufferBitCount, totalBits - bitsRead);
+  }
+
+  /**
+   * Closes the underlying InputStream.
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  public void close() throws IOException {
+    in.close();
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/common/bitStream/BitOutputStream.java b/java/tsfile/src/main/java/org/apache/tsfile/common/bitStream/BitOutputStream.java
new file mode 100644
index 0000000..641ccd5
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/common/bitStream/BitOutputStream.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.common.bitStream;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * A bit-level output stream that writes bits to an underlying byte-oriented OutputStream. Bits are
+ * written in MSB-first (most significant bit first) order within each byte.
+ */
+public class BitOutputStream extends BitStream {
+
+  protected OutputStream out;
+  protected int buffer; // Bit buffer (8-bit)
+  protected int bufferBitCount; // Number of bits currently in the buffer
+
+  protected int bitsWritten; // Total number of bits written
+
+  /**
+   * Constructs a BitOutputStream from the given OutputStream.
+   *
+   * @param out the underlying OutputStream
+   */
+  public BitOutputStream(OutputStream out) {
+    this.out = out;
+    this.buffer = 0;
+    this.bufferBitCount = 0;
+    this.bitsWritten = 0;
+  }
+
+  public void reset(OutputStream out) {
+    this.out = out;
+    this.buffer = 0;
+    this.bufferBitCount = 0;
+    this.bitsWritten = 0;
+  }
+
+  /**
+   * Writes the specified number of bits from the given integer. Bits are taken from the lower bits
+   * of the data and written MSB-first.
+   *
+   * @param data the data to write (bits from the LSB end)
+   * @param numBits number of bits to write (0–32)
+   * @throws IOException if an I/O error occurs
+   */
+  public void writeInt(int data, int numBits) throws IOException {
+    bitsWritten += numBits;
+    while (numBits > 0) {
+      int rest = 8 - bufferBitCount;
+
+      if (rest > numBits) {
+        buffer |= ((data & MASKS[numBits]) << (rest - numBits));
+        bufferBitCount += numBits;
+        numBits = 0;
+      } else {
+        buffer |= ((data >> (numBits - rest)) & MASKS[rest]);
+        out.write(buffer);
+        buffer = 0;
+        bufferBitCount = 0;
+        numBits -= rest;
+      }
+    }
+  }
+
+  /**
+   * Writes the specified number of bits from the given long value. Bits are taken from the lower
+   * bits of the data and written MSB-first.
+   *
+   * @param data the data to write (bits from the LSB end)
+   * @param numBits number of bits to write (0–64)
+   * @throws IOException if an I/O error occurs
+   */
+  public void writeLong(long data, int numBits) throws IOException {
+    if (numBits > 64 || numBits < 0) {
+      throw new IllegalArgumentException("numBits must be between 0 and 64");
+    }
+
+    bitsWritten += numBits;
+    while (numBits > 0) {
+      int rest = 8 - bufferBitCount;
+
+      if (rest > numBits) {
+        int shift = rest - numBits;
+        int toWrite = (int) ((data & MASKS[numBits]) << shift);
+        buffer |= toWrite;
+        bufferBitCount += numBits;
+        numBits = 0;
+      } else {
+        int shift = numBits - rest;
+        int toWrite = (int) ((data >> shift) & MASKS[rest]);
+        buffer |= toWrite;
+        out.write(buffer);
+        buffer = 0;
+        bufferBitCount = 0;
+        numBits -= rest;
+      }
+    }
+  }
+
+  public static int writeVarInt(int value, BitOutputStream out) throws IOException {
+    int uValue =
+        (value << 1) ^ (value >> 31); // ZigZag encoding: even for positive, odd for negative
+    int bits = 0;
+
+    while ((uValue & ~0x7F) != 0) {
+      out.writeInt(uValue & 0x7F, 7); // Write lower 7 bits
+      out.writeBit(true); // Continuation flag 1
+      uValue >>>= 7;
+      bits += 8;
+    }
+
+    out.writeInt(uValue, 7); // Last 7 bits
+    out.writeBit(false); // Termination flag 0
+    bits += 8;
+
+    return bits;
+  }
+
+  public static int writeVarLong(long value, BitOutputStream out) throws IOException {
+    long uValue =
+        (value << 1) ^ (value >> 63); // ZigZag encoding: even for positive, odd for negative
+    int bitsWritten = 0;
+
+    while ((uValue & ~0x7FL) != 0) {
+      int chunk = (int) (uValue & 0x7F); // Lower 7 bits
+      out.writeInt(chunk, 7); // Write data bits
+      out.writeBit(true); // Has more data
+      uValue >>>= 7;
+      bitsWritten += 8;
+    }
+
+    out.writeInt((int) (uValue & 0x7F), 7); // Last byte
+    out.writeBit(false); // End flag
+    bitsWritten += 8;
+    return bitsWritten;
+  }
+
+  /**
+   * Writes a single bit. The bit is stored in the buffer until a full byte is collected.
+   *
+   * @param bit true to write 1, false to write 0
+   * @throws IOException if an I/O error occurs
+   */
+  public void writeBit(boolean bit) throws IOException {
+    bitsWritten += 1;
+
+    buffer |= (bit ? 1 : 0) << (7 - bufferBitCount);
+    bufferBitCount++;
+
+    if (bufferBitCount == 8) {
+      out.write(buffer);
+      buffer = 0;
+      bufferBitCount = 0;
+    }
+  }
+
+  /**
+   * Flushes the remaining bits in the buffer to the stream (if any), padding the remaining bits
+   * with zeros in the lower positions.
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  public void close() throws IOException {
+    if (bufferBitCount > 0) {
+      out.write(buffer);
+    }
+    out.close();
+  }
+
+  /**
+   * Returns the total number of bits written so far.
+   *
+   * @return the number of bits written
+   */
+  public int getBitsWritten() {
+    return bitsWritten;
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/common/bitStream/BitStream.java b/java/tsfile/src/main/java/org/apache/tsfile/common/bitStream/BitStream.java
new file mode 100644
index 0000000..83720e3
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/common/bitStream/BitStream.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.common.bitStream;
+
+/**
+ * Base class for bit-level stream operations. Provides shared constants and bit masks for bitwise
+ * manipulation.
+ */
+public class BitStream {
+
+  /** Number of bits per byte (always 8) */
+  protected static final int BITS_PER_BYTE = 8;
+
+  /**
+   * Bit masks used to extract the lowest N bits of a value. MASKS[n] contains a bitmask with the
+   * lowest n bits set to 1. For example: MASKS[0] = 0b00000000 MASKS[1] = 0b00000001 MASKS[2] =
+   * 0b00000011 ... MASKS[8] = 0b11111111
+   */
+  protected static final int[] MASKS = new int[] {0, 1, 3, 7, 0xf, 0x1f, 0x3f, 0x7f, 0xff};
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/common/bitStream/ByteBufferBackedInputStream.java b/java/tsfile/src/main/java/org/apache/tsfile/common/bitStream/ByteBufferBackedInputStream.java
new file mode 100644
index 0000000..e8a35aa
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/common/bitStream/ByteBufferBackedInputStream.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.common.bitStream;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+
+public class ByteBufferBackedInputStream extends InputStream {
+  private final ByteBuffer buf;
+  private final int startPos;
+
+  public ByteBufferBackedInputStream(ByteBuffer buf) {
+    this.buf = buf;
+    this.startPos = buf.position();
+  }
+
+  @Override
+  public int read() throws IOException {
+    if (!buf.hasRemaining()) {
+      return -1;
+    }
+    return buf.get() & 0xFF;
+  }
+
+  @Override
+  public int available() {
+    return buf.remaining();
+  }
+
+  public int getConsumed() {
+    return buf.position() - startPos;
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/common/conf/TSFileConfig.java b/java/tsfile/src/main/java/org/apache/tsfile/common/conf/TSFileConfig.java
index a987bf3..eb49b71 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/common/conf/TSFileConfig.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/common/conf/TSFileConfig.java
@@ -126,6 +126,24 @@
   /** Encoder of string, blob and text column. Default value is PLAIN. */
   private String textEncoding = "PLAIN";
 
+  /** Compression of boolean column. Defaults to the overall compression. */
+  private String booleanCompression = null;
+
+  /** Compression of int32 and date column. Defaults to the overall compression. */
+  private String int32Compression = null;
+
+  /** Compression of int64 and timestamp column. Defaults to the overall compression. */
+  private String int64Compression = null;
+
+  /** Compression of float column. Defaults to the overall compression. */
+  private String floatCompression = null;
+
+  /** Compression of double column. Defaults to the overall compression. */
+  private String doubleCompression = null;
+
+  /** Compression of string, blob and text column. Defaults to the overall compression. */
+  private String textCompression = null;
+
   /**
    * Encoder of value series. default value is PLAIN. For int, long data type, TsFile also supports
    * TS_2DIFF, REGULAR, GORILLA and RLE(run-length encoding). For float, double data type, TsFile
@@ -156,11 +174,14 @@
   private CompressionType compressor = CompressionType.LZ4;
 
   /** encryptKey, this should be 16 bytes String. */
-  private byte[] encryptKey = "abcdefghijklmnop".getBytes(TSFileConfig.STRING_CHARSET);
+  private byte[] encryptKey;
 
   /** Data encryption method, default encryptType is "UNENCRYPTED". */
   private String encryptType = "UNENCRYPTED";
 
+  /** Salt for encrypt, this should be 16 bytes String. */
+  private byte[] encryptSalt = EncryptUtils.generateSalt();
+
   /** Line count threshold for checking page memory occupied size. */
   private int pageCheckSizeThreshold = 100;
 
@@ -259,7 +280,15 @@
   }
 
   public void setEncryptKeyFromToken(String token) {
-    this.encryptKey = EncryptUtils.getEncryptKeyFromToken(token);
+    this.encryptKey = EncryptUtils.getEncryptKeyFromToken(token, encryptSalt);
+  }
+
+  public void setEncryptSalt(byte[] encryptSalt) {
+    this.encryptSalt = encryptSalt;
+  }
+
+  public byte[] getEncryptSalt() {
+    return this.encryptSalt;
   }
 
   public int getGroupSizeInByte() {
@@ -361,6 +390,44 @@
     }
   }
 
+  public CompressionType getCompressor(TSDataType dataType) {
+    String compressionName;
+    switch (dataType) {
+      case BOOLEAN:
+        compressionName = booleanCompression;
+        break;
+      case INT32:
+      case DATE:
+        compressionName = int32Compression;
+        break;
+      case INT64:
+      case TIMESTAMP:
+        compressionName = int64Compression;
+        break;
+      case FLOAT:
+        compressionName = floatCompression;
+        break;
+      case DOUBLE:
+        compressionName = doubleCompression;
+        break;
+      case STRING:
+      case BLOB:
+      case TEXT:
+        compressionName = textCompression;
+        break;
+      default:
+        compressionName = null;
+    }
+
+    CompressionType compressionType;
+    if (compressionName != null) {
+      compressionType = CompressionType.valueOf(compressionName);
+    } else {
+      compressionType = compressor;
+    }
+    return compressionType;
+  }
+
   public void setValueEncoder(String valueEncoder) {
     this.valueEncoder = valueEncoder;
   }
@@ -689,4 +756,28 @@
   public void setLz4UseJni(boolean lz4UseJni) {
     this.lz4UseJni = lz4UseJni;
   }
+
+  public void setBooleanCompression(String booleanCompression) {
+    this.booleanCompression = booleanCompression;
+  }
+
+  public void setInt32Compression(String int32Compression) {
+    this.int32Compression = int32Compression;
+  }
+
+  public void setInt64Compression(String int64Compression) {
+    this.int64Compression = int64Compression;
+  }
+
+  public void setFloatCompression(String floatCompression) {
+    this.floatCompression = floatCompression;
+  }
+
+  public void setDoubleCompression(String doubleCompression) {
+    this.doubleCompression = doubleCompression;
+  }
+
+  public void setTextCompression(String textCompression) {
+    this.textCompression = textCompression;
+  }
 }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/common/conf/TSFileDescriptor.java b/java/tsfile/src/main/java/org/apache/tsfile/common/conf/TSFileDescriptor.java
index 435561d..01b7831 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/common/conf/TSFileDescriptor.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/common/conf/TSFileDescriptor.java
@@ -81,6 +81,12 @@
     writer.setInt(conf::setFloatPrecision, "float_precision");
     writer.setString(conf::setValueEncoder, "value_encoder");
     writer.setString(conf::setCompressor, "compressor");
+    writer.setString(conf::setBooleanCompression, "boolean_compressor");
+    writer.setString(conf::setInt32Compression, "int32_compressor");
+    writer.setString(conf::setInt64Compression, "int64_compressor");
+    writer.setString(conf::setFloatCompression, "float_compressor");
+    writer.setString(conf::setDoubleCompression, "double_compressor");
+    writer.setString(conf::setTextCompression, "text_compressor");
     writer.setInt(conf::setBatchSize, "batch_size");
     writer.setString(conf::setEncryptType, "encrypt_type");
     writer.setBoolean(conf::setLz4UseJni, "lz4_use_jni");
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/CamelDecoder.java b/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/CamelDecoder.java
new file mode 100644
index 0000000..4744fa7
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/CamelDecoder.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.decoder;
+
+import org.apache.tsfile.common.bitStream.BitInputStream;
+import org.apache.tsfile.common.bitStream.ByteBufferBackedInputStream;
+import org.apache.tsfile.exception.encoding.TsFileDecodingException;
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.utils.ReadWriteForEncodingUtils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+public class CamelDecoder extends Decoder {
+  // === Constants for decoding ===
+  private static final int BITS_FOR_SIGN = 1;
+  private static final int BITS_FOR_TYPE = 1;
+  private static final int BITS_FOR_FIRST_VALUE = 64;
+  private static final int BITS_FOR_LEADING_ZEROS = 6;
+  private static final int BITS_FOR_SIGNIFICANT_BITS = 6;
+  private static final int BITS_FOR_DECIMAL_COUNT = 4;
+  private static final int DOUBLE_TOTAL_BITS = 64;
+  private static final int DOUBLE_MANTISSA_BITS = 52;
+  private static final int DECIMAL_MAX_COUNT = 15;
+
+  // === Camel state ===
+  private long previousValue = 0;
+  private boolean isFirst = true;
+  private long storedVal = 0;
+
+  private double scale;
+
+  // === Precomputed tables ===
+  public static final long[] powers = new long[DECIMAL_MAX_COUNT];
+  public static final long[] threshold = new long[DECIMAL_MAX_COUNT];
+
+  static {
+    for (int l = 1; l <= DECIMAL_MAX_COUNT; l++) {
+      int idx = l - 1;
+      powers[idx] = (long) Math.pow(10, l);
+      long divisor = 1L << l;
+      threshold[idx] = powers[idx] / divisor;
+    }
+  }
+
+  private BitInputStream in;
+  private final GorillaDecoder gorillaDecoder;
+
+  public CamelDecoder(InputStream inputStream, long totalBits) {
+    super(TSEncoding.CAMEL);
+    // Initialize bit-level reader and nested Gorilla decoder
+    this.in = new BitInputStream(inputStream, totalBits);
+    this.gorillaDecoder = new GorillaDecoder();
+  }
+
+  public CamelDecoder() {
+    super(TSEncoding.CAMEL);
+    this.gorillaDecoder = new GorillaDecoder();
+  }
+
+  @Override
+  public boolean hasNext(ByteBuffer buffer) throws IOException {
+    if (cacheIndex < cacheSize) {
+      return true;
+    }
+    if (in != null && in.availableBits() > 0) {
+      return true;
+    }
+    return buffer.hasRemaining();
+  }
+
+  @Override
+  public void reset() {
+    this.in = null;
+    this.isFirst = true;
+    this.previousValue = 0L;
+    this.storedVal = 0L;
+    this.gorillaDecoder.leadingZeros = Integer.MAX_VALUE;
+    this.gorillaDecoder.trailingZeros = 0;
+  }
+
+  // Cache for batch decoding
+  private double[] valueCache = new double[0];
+  private int cacheIndex = 0;
+  private int cacheSize = 0;
+
+  // === Added reusable buffer for getValues ===
+  private double[] valuesBuffer = new double[16];
+
+  @Override
+  public double readDouble(ByteBuffer buffer) {
+    try {
+      if (cacheIndex >= cacheSize) {
+        if (in == null || in.availableBits() == 0) {
+          if (!buffer.hasRemaining()) {
+            throw new TsFileDecodingException("No more data to decode");
+          }
+          // read next chunk
+          ByteBuffer slice = buffer.slice();
+          ByteBufferBackedInputStream bais = new ByteBufferBackedInputStream(slice);
+          int blockBits = ReadWriteForEncodingUtils.readVarInt(bais);
+          this.in = new BitInputStream(bais, blockBits);
+          // reset state
+          this.isFirst = true;
+          this.storedVal = 0L;
+          this.previousValue = 0L;
+          this.gorillaDecoder.leadingZeros = Integer.MAX_VALUE;
+          this.gorillaDecoder.trailingZeros = 0;
+          // decode current block
+          double[] newValues = getValues();
+          if (newValues.length == 0) {
+            throw new TsFileDecodingException("Unexpected empty block");
+          }
+          valueCache = newValues;
+          cacheSize = newValues.length;
+          cacheIndex = 0;
+          int consumed = bais.getConsumed();
+          buffer.position(buffer.position() + consumed);
+        }
+      }
+      return valueCache[cacheIndex++];
+    } catch (IOException e) {
+      throw new TsFileDecodingException(e.getMessage());
+    }
+  }
+
+  /** Nested class to handle fallback encoding (Gorilla) for double values. */
+  public class GorillaDecoder {
+    private int leadingZeros = Integer.MAX_VALUE;
+    private int trailingZeros = 0;
+
+    /** Decode next value using Gorilla algorithm. */
+    public double decode(BitInputStream in) throws IOException {
+      if (isFirst) {
+        previousValue = in.readLong(BITS_FOR_FIRST_VALUE);
+        isFirst = false;
+        return Double.longBitsToDouble(previousValue);
+      }
+
+      boolean controlBit = in.readBit();
+      if (!controlBit) {
+        return Double.longBitsToDouble(previousValue);
+      }
+
+      boolean reuseBlock = !in.readBit();
+      long xor;
+      if (reuseBlock) {
+        int sigBits = DOUBLE_TOTAL_BITS - leadingZeros - trailingZeros;
+        if (sigBits == 0) {
+          return Double.longBitsToDouble(previousValue);
+        }
+        xor = in.readLong(sigBits) << trailingZeros;
+      } else {
+        leadingZeros = in.readInt(BITS_FOR_LEADING_ZEROS);
+        int sigBits = in.readInt(BITS_FOR_SIGNIFICANT_BITS) + 1;
+        trailingZeros = DOUBLE_TOTAL_BITS - leadingZeros - sigBits;
+        xor = in.readLong(sigBits) << trailingZeros;
+      }
+
+      previousValue ^= xor;
+      return Double.longBitsToDouble(previousValue);
+    }
+  }
+
+  /** Retrieve nested GorillaDecoder. */
+  public GorillaDecoder getGorillaDecoder() {
+    return gorillaDecoder;
+  }
+
+  /** Read all values until the stream is exhausted, reusing valuesBuffer. */
+  public double[] getValues() throws IOException {
+    int count = 0;
+    while (in.availableBits() > 0) {
+      double val = next();
+      if (count == valuesBuffer.length) {
+        valuesBuffer = Arrays.copyOf(valuesBuffer, valuesBuffer.length * 2);
+      }
+      valuesBuffer[count++] = val;
+    }
+    return Arrays.copyOf(valuesBuffer, count);
+  }
+
+  /** Decode next available value, return null if no more bits. */
+  private double next() throws IOException {
+    double result;
+    if (isFirst) {
+      isFirst = false;
+      long firstBits = in.readLong(BITS_FOR_FIRST_VALUE);
+      result = Double.longBitsToDouble(firstBits);
+      storedVal = (long) result;
+    } else {
+      result = nextValue();
+    }
+    previousValue = Double.doubleToLongBits(result);
+    return result;
+  }
+
+  /** Decode according to Camel vs Gorilla path. */
+  private double nextValue() throws IOException {
+    // Read sign bit
+    int signBit = in.readInt(BITS_FOR_SIGN);
+    double sign = signBit == 1 ? -1.0 : 1.0;
+    // Read encoding type bit
+    int typeBit = in.readInt(BITS_FOR_TYPE);
+    boolean useCamel = typeBit == 1;
+
+    if (useCamel) {
+      long intPart = readLong();
+      // decimal = decPart / scale
+      double decPart = readDecimal();
+      double value =
+          (intPart >= 0
+              ? (intPart * scale + decPart) / scale
+              : -(intPart * scale + decPart) / scale);
+      return sign * value;
+    } else {
+      return sign * gorillaDecoder.decode(in);
+    }
+  }
+
+  /** Read variable-length integer diff and update storedVal. */
+  private long readLong() throws IOException {
+    long diff = BitInputStream.readVarLong(in);
+    storedVal += diff;
+    return storedVal;
+  }
+
+  /** Read and reconstruct decimal component. */
+  private double readDecimal() throws IOException {
+    int count = in.readInt(BITS_FOR_DECIMAL_COUNT) + 1;
+    boolean hasXor = in.readBit();
+    long xor = 0;
+    if (hasXor) {
+      long bits = in.readLong(count);
+      xor = bits << (DOUBLE_MANTISSA_BITS - count);
+    }
+    long mVal = BitInputStream.readVarLong(in);
+    double frac;
+    if (hasXor) {
+      double base = (double) mVal / powers[count - 1] + 1;
+      long merged = xor ^ Double.doubleToLongBits(base);
+      frac = Double.longBitsToDouble(merged) - 1;
+    } else {
+      frac = (double) mVal / powers[count - 1];
+    }
+    // Round to original scale
+    scale = Math.pow(10, count);
+    return Math.round(frac * scale);
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/Decoder.java b/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/Decoder.java
index 36c3d82..fcbbb5c 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/Decoder.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/Decoder.java
@@ -177,6 +177,57 @@
           default:
             throw new TsFileDecodingException(String.format(ERROR_MSG, encoding, dataType));
         }
+      case CAMEL:
+        switch (dataType) {
+          case DOUBLE:
+            return new CamelDecoder();
+          default:
+            throw new TsFileDecodingException(String.format(ERROR_MSG, encoding, dataType));
+        }
+      case DESCENDING_BIT_PACKING:
+        switch (dataType) {
+          case INT32:
+          case DATE:
+            return new DescendingBitPackingDecoder.IntDescendingBitPackingDecoder();
+          case INT64:
+          case TIMESTAMP:
+            return new DescendingBitPackingDecoder();
+          default:
+            throw new TsFileDecodingException(String.format(ERROR_MSG, encoding, dataType));
+        }
+      case SEPARATE_STORAGE:
+        switch (dataType) {
+          case INT32:
+          case DATE:
+            return new SeparateStorageDecoder.IntSeparateStorageDecoder();
+          case INT64:
+          case TIMESTAMP:
+            return new SeparateStorageDecoder();
+          default:
+            throw new TsFileDecodingException(String.format(ERROR_MSG, encoding, dataType));
+        }
+      case LAMINAR:
+        switch (dataType) {
+          case INT32:
+          case DATE:
+            return new LaminarDecoder.IntLaminarDecoder();
+          case INT64:
+          case TIMESTAMP:
+            return new LaminarDecoder();
+          default:
+            throw new TsFileDecodingException(String.format(ERROR_MSG, encoding, dataType));
+        }
+      case FLEA:
+        switch (dataType) {
+          case INT32:
+          case DATE:
+            return new FleaDecoder.IntFleaDecoder();
+          case INT64:
+          case TIMESTAMP:
+            return new FleaDecoder();
+          default:
+            throw new TsFileDecodingException(String.format(ERROR_MSG, encoding, dataType));
+        }
       default:
         throw new TsFileDecodingException(String.format(ERROR_MSG, encoding, dataType));
     }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/DescendingBitPackingDecoder.java b/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/DescendingBitPackingDecoder.java
new file mode 100644
index 0000000..f38cb70
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/DescendingBitPackingDecoder.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.decoder;
+
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.utils.BytesUtils;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public class DescendingBitPackingDecoder extends Decoder {
+  private boolean isSigned;
+  private int numberRemainingInCurrentBlock = 0, totalInCurrentBlock = 0;
+  private long[] currentBlockValues = null;
+
+  protected static int bitsToBytes(int bits) {
+    return (bits + 7) / 8;
+  }
+
+  protected static int getValueWidth(long value) {
+    return 64 - Long.numberOfLeadingZeros(value);
+  }
+
+  protected static long zigzagDecode(long value) {
+    return (value >>> 1) ^ -(value & 1);
+  }
+
+  public DescendingBitPackingDecoder(boolean isSigned) {
+    super(TSEncoding.DESCENDING_BIT_PACKING);
+    this.isSigned = isSigned;
+  }
+
+  public DescendingBitPackingDecoder() {
+    this(true);
+  }
+
+  private void loadNextBlock(ByteBuffer buffer) {
+    byte[] currentBuffer = null;
+
+    int n = ReadWriteIOUtils.readInt(buffer);
+    if (n > 0) {
+      this.currentBlockValues = new long[n];
+      this.numberRemainingInCurrentBlock = this.totalInCurrentBlock = n;
+      int m = ReadWriteIOUtils.readInt(buffer);
+      if (m > 0) {
+        int indexBitWidth = getValueWidth(n - 1);
+        int encodingLength = bitsToBytes(indexBitWidth * m);
+        currentBuffer = new byte[encodingLength];
+        buffer.get(currentBuffer);
+        long[] sortedIndicesArray = new long[m];
+        for (int i = 0; i < m; i++) {
+          sortedIndicesArray[i] =
+              BytesUtils.bytesToLong(currentBuffer, indexBitWidth * i, indexBitWidth);
+        }
+        currentBuffer = null;
+
+        encodingLength = ReadWriteIOUtils.readInt(buffer);
+        currentBuffer = new byte[encodingLength];
+        int offset = 0;
+        int previousValueWidth = ReadWriteIOUtils.readInt(buffer);
+        buffer.get(currentBuffer);
+        long tmp;
+        for (int i = 0; i < m; i++) {
+          tmp = BytesUtils.bytesToLong(currentBuffer, offset, previousValueWidth);
+          this.currentBlockValues[Math.toIntExact(sortedIndicesArray[i])] = tmp;
+          offset += previousValueWidth;
+          previousValueWidth = getValueWidth(tmp);
+        }
+      }
+    } else {
+      this.currentBlockValues = new long[0];
+      this.numberRemainingInCurrentBlock = this.totalInCurrentBlock = 0;
+    }
+  }
+
+  @Override
+  public long readLong(ByteBuffer buffer) {
+    if (numberRemainingInCurrentBlock == 0) {
+      loadNextBlock(buffer);
+    }
+    numberRemainingInCurrentBlock--;
+    long value = currentBlockValues[totalInCurrentBlock - numberRemainingInCurrentBlock - 1];
+    return isSigned ? zigzagDecode(value) : value;
+  }
+
+  @Override
+  public boolean hasNext(ByteBuffer buffer) throws IOException {
+    if (numberRemainingInCurrentBlock > 0) {
+      return true;
+    }
+    return buffer.hasRemaining();
+  }
+
+  @Override
+  public void reset() {
+    this.currentBlockValues = null;
+    this.numberRemainingInCurrentBlock = this.totalInCurrentBlock = 0;
+  }
+
+  public static class IntDescendingBitPackingDecoder extends DescendingBitPackingDecoder {
+    public IntDescendingBitPackingDecoder() {
+      super();
+    }
+
+    @Override
+    public int readInt(ByteBuffer buffer) {
+      return Math.toIntExact(super.readLong(buffer));
+    }
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/FleaDecoder.java b/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/FleaDecoder.java
new file mode 100644
index 0000000..8333f8d
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/FleaDecoder.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.decoder;
+
+import org.apache.tsfile.encoding.encoder.FleaEncoder;
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public class FleaDecoder extends Decoder {
+  private int numberRemainingInCurrentBlock = 0, totalInCurrentBlock = 0;
+  private long[] currentBlockValues = null;
+
+  public FleaDecoder() {
+    super(TSEncoding.FLEA);
+  }
+
+  private void loadNextBlock(ByteBuffer buffer) {
+    int n = ReadWriteIOUtils.readInt(buffer);
+
+    if (n > 0) {
+      int beta = ReadWriteIOUtils.readInt(buffer);
+
+      LaminarDecoder laminarDecoder = new LaminarDecoder();
+      long[] quantizedReal = new long[n / 2 + 1], quantizedImag = new long[n / 2 + 1];
+      for (int i = 0; i <= n / 2; i++) {
+        quantizedReal[i] = laminarDecoder.readLong(buffer);
+      }
+      laminarDecoder = new LaminarDecoder();
+      for (int i = 0; i <= n / 2; i++) {
+        quantizedImag[i] = laminarDecoder.readLong(buffer);
+      }
+
+      double[][] dequantized = new double[2][n / 2 + 1];
+      for (int i = 0; i <= n / 2; i++) {
+        dequantized[0][i] = FleaEncoder.dequantize(quantizedReal[i], beta);
+        dequantized[1][i] = FleaEncoder.dequantize(quantizedImag[i], beta);
+      }
+      double[] reconstructed = FleaEncoder.inverseRealFFT(dequantized, n);
+
+      long[] residuals = new long[n];
+      SeparateStorageDecoder separateStorageDecoder = new SeparateStorageDecoder();
+      for (int i = 0; i < n; i++) {
+        residuals[i] = separateStorageDecoder.readLong(buffer);
+      }
+
+      this.currentBlockValues = new long[n];
+      this.numberRemainingInCurrentBlock = this.totalInCurrentBlock = n;
+      for (int i = 0; i < n; i++) {
+        this.currentBlockValues[i] = residuals[i] + Math.round(reconstructed[i]);
+      }
+    } else {
+      this.currentBlockValues = new long[0];
+      this.numberRemainingInCurrentBlock = this.totalInCurrentBlock = 0;
+    }
+  }
+
+  @Override
+  public long readLong(ByteBuffer buffer) {
+    if (numberRemainingInCurrentBlock == 0) {
+      loadNextBlock(buffer);
+    }
+    numberRemainingInCurrentBlock--;
+    long value = currentBlockValues[totalInCurrentBlock - numberRemainingInCurrentBlock - 1];
+    return value;
+  }
+
+  @Override
+  public boolean hasNext(ByteBuffer buffer) throws IOException {
+    if (numberRemainingInCurrentBlock > 0) {
+      return true;
+    }
+    return buffer.hasRemaining();
+  }
+
+  @Override
+  public void reset() {
+    this.currentBlockValues = null;
+    this.numberRemainingInCurrentBlock = this.totalInCurrentBlock = 0;
+  }
+
+  public static class IntFleaDecoder extends FleaDecoder {
+
+    public IntFleaDecoder() {
+      super();
+    }
+
+    @Override
+    public int readInt(ByteBuffer buffer) {
+      return Math.toIntExact(super.readLong(buffer));
+    }
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/LaminarDecoder.java b/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/LaminarDecoder.java
new file mode 100644
index 0000000..17233c5
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/LaminarDecoder.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.decoder;
+
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.utils.BytesUtils;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public class LaminarDecoder extends Decoder {
+  private int numberRemainingInCurrentBlock = 0, totalInCurrentBlock = 0;
+  private long[] currentBlockValues = null;
+
+  public LaminarDecoder() {
+    super(TSEncoding.LAMINAR);
+  }
+
+  private long[] loadDecodeArray(ByteBuffer buffer) {
+    int n = ReadWriteIOUtils.readInt(buffer);
+    long[] values = new long[n];
+
+    if (n > 0) {
+      int[] laminarBitWidths = new int[n];
+      int currentLaminarBitWidth = ReadWriteIOUtils.readInt(buffer);
+      laminarBitWidths[0] = currentLaminarBitWidth;
+
+      if (n > 1) {
+        IntRleDecoder rleDecoder = new IntRleDecoder();
+        for (int i = 1; i < n; i++) {
+          while (rleDecoder.readInt(buffer) == 1) {
+            currentLaminarBitWidth--;
+          }
+          laminarBitWidths[i] = currentLaminarBitWidth;
+        }
+      }
+
+      int totalBits = 0;
+      for (int width : laminarBitWidths) totalBits += width;
+      int encodingLength = DescendingBitPackingDecoder.bitsToBytes(totalBits);
+      byte[] currentBuffer = new byte[encodingLength];
+      buffer.get(currentBuffer);
+      int offset = 0;
+      for (int i = 0; i < n; i++) {
+        if (laminarBitWidths[i] > 0) {
+          values[i] = BytesUtils.bytesToLong(currentBuffer, offset, laminarBitWidths[i]);
+          offset += laminarBitWidths[i];
+        }
+      }
+    }
+    return values;
+  }
+
+  private void loadNextBlock(ByteBuffer buffer) {
+    byte[] currentBuffer = null;
+    int n = ReadWriteIOUtils.readInt(buffer);
+
+    if (n > 0) {
+      this.currentBlockValues = new long[n];
+      this.numberRemainingInCurrentBlock = this.totalInCurrentBlock = n;
+
+      int p = ReadWriteIOUtils.readInt(buffer);
+
+      long[] denseValues = loadDecodeArray(buffer);
+      for (int i = 0; i < p; i++) this.currentBlockValues[i] = denseValues[i];
+
+      long[] sparseValues = loadDecodeArray(buffer);
+      int indexBitWidth = DescendingBitPackingDecoder.getValueWidth(n - 1);
+      int encodingLength =
+          DescendingBitPackingDecoder.bitsToBytes(indexBitWidth * sparseValues.length);
+      currentBuffer = new byte[encodingLength];
+      buffer.get(currentBuffer);
+      for (int i = 0; i < sparseValues.length; i++) {
+        int currentIndex =
+            Math.toIntExact(
+                BytesUtils.bytesToLong(currentBuffer, indexBitWidth * i, indexBitWidth));
+        this.currentBlockValues[currentIndex + p] = sparseValues[i];
+      }
+    } else {
+      this.currentBlockValues = new long[0];
+      this.numberRemainingInCurrentBlock = this.totalInCurrentBlock = 0;
+    }
+  }
+
+  @Override
+  public long readLong(ByteBuffer buffer) {
+    if (numberRemainingInCurrentBlock == 0) {
+      loadNextBlock(buffer);
+    }
+    numberRemainingInCurrentBlock--;
+    long value = currentBlockValues[totalInCurrentBlock - numberRemainingInCurrentBlock - 1];
+    return DescendingBitPackingDecoder.zigzagDecode(value);
+  }
+
+  @Override
+  public void reset() {
+    this.currentBlockValues = null;
+    this.numberRemainingInCurrentBlock = this.totalInCurrentBlock = 0;
+  }
+
+  @Override
+  public boolean hasNext(ByteBuffer buffer) throws IOException {
+    if (numberRemainingInCurrentBlock > 0) {
+      return true;
+    }
+    return buffer.hasRemaining();
+  }
+
+  public static class IntLaminarDecoder extends LaminarDecoder {
+
+    public IntLaminarDecoder() {
+      super();
+    }
+
+    @Override
+    public int readInt(ByteBuffer buffer) {
+      return Math.toIntExact(super.readLong(buffer));
+    }
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/SeparateStorageDecoder.java b/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/SeparateStorageDecoder.java
new file mode 100644
index 0000000..75ef168
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encoding/decoder/SeparateStorageDecoder.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.decoder;
+
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.utils.BytesUtils;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public class SeparateStorageDecoder extends Decoder {
+  private boolean isSigned;
+  private int numberRemainingInCurrentBlock = 0, totalInCurrentBlock = 0;
+  private long[] currentBlockValues = null;
+
+  public SeparateStorageDecoder(boolean isSigned) {
+    super(TSEncoding.SEPARATE_STORAGE);
+    this.isSigned = isSigned;
+  }
+
+  public SeparateStorageDecoder() {
+    this(true);
+  }
+
+  private void loadNextBlock(ByteBuffer buffer) {
+    byte[] currentBuffer = null;
+
+    int n = ReadWriteIOUtils.readInt(buffer);
+    if (n > 0) {
+      int optimalWidth = ReadWriteIOUtils.readInt(buffer);
+
+      long[] highBits = new long[n];
+      long[] lowBits = new long[n];
+
+      DescendingBitPackingDecoder highBitsDecoder = new DescendingBitPackingDecoder(false);
+      for (int i = 0; i < n; i++) {
+        highBits[i] = highBitsDecoder.readLong(buffer);
+      }
+
+      if (optimalWidth > 0) {
+        int encodingLength = DescendingBitPackingDecoder.bitsToBytes(optimalWidth * n);
+        currentBuffer = new byte[encodingLength];
+        buffer.get(currentBuffer);
+        for (int i = 0; i < n; i++) {
+          lowBits[i] = BytesUtils.bytesToLong(currentBuffer, optimalWidth * i, optimalWidth);
+        }
+      }
+
+      this.currentBlockValues = new long[n];
+      this.numberRemainingInCurrentBlock = this.totalInCurrentBlock = n;
+      for (int i = 0; i < n; i++)
+        this.currentBlockValues[i] = (highBits[i] << optimalWidth) | lowBits[i];
+    } else {
+      this.currentBlockValues = new long[0];
+      this.numberRemainingInCurrentBlock = this.totalInCurrentBlock = 0;
+    }
+  }
+
+  @Override
+  public long readLong(ByteBuffer buffer) {
+    if (numberRemainingInCurrentBlock == 0) {
+      loadNextBlock(buffer);
+    }
+    numberRemainingInCurrentBlock--;
+    long value = currentBlockValues[totalInCurrentBlock - numberRemainingInCurrentBlock - 1];
+    return isSigned ? DescendingBitPackingDecoder.zigzagDecode(value) : value;
+  }
+
+  @Override
+  public boolean hasNext(ByteBuffer buffer) throws IOException {
+    if (numberRemainingInCurrentBlock > 0) {
+      return true;
+    }
+    return buffer.hasRemaining();
+  }
+
+  @Override
+  public void reset() {
+    this.currentBlockValues = null;
+    this.numberRemainingInCurrentBlock = this.totalInCurrentBlock = 0;
+  }
+
+  public static class IntSeparateStorageDecoder extends SeparateStorageDecoder {
+    public IntSeparateStorageDecoder() {
+      super();
+    }
+
+    @Override
+    public int readInt(ByteBuffer buffer) {
+      return Math.toIntExact(super.readLong(buffer));
+    }
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/CamelEncoder.java b/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/CamelEncoder.java
new file mode 100644
index 0000000..9156963
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/CamelEncoder.java
@@ -0,0 +1,297 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.encoder;
+
+import org.apache.tsfile.common.bitStream.BitOutputStream;
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.utils.ReadWriteForEncodingUtils;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+
+public class CamelEncoder extends Encoder {
+  private final GorillaEncoder gorillaEncoder;
+
+  // === Constants for encoding ===
+  private static final int BITS_FOR_SIGN = 1;
+  private static final int BITS_FOR_TYPE = 1;
+  private static final int BITS_FOR_FIRST_VALUE = 64;
+  private static final int BITS_FOR_LEADING_ZEROS = 6;
+  private static final int BITS_FOR_SIGNIFICANT_BITS = 6;
+  private static final int BITS_FOR_DECIMAL_COUNT = 4;
+  private static final int DOUBLE_TOTAL_BITS = 64;
+  private static final int DOUBLE_MANTISSA_BITS = 52;
+  private static final int DECIMAL_MAX_COUNT = 10;
+
+  // === Camel state ===
+  private long storedVal = 0;
+  private boolean isFirst = true;
+  long previousValue = 0;
+  private boolean hasPending = false; // guard for empty or duplicate flush
+
+  // === Precomputed tables ===
+  public static final long[] powers = new long[DECIMAL_MAX_COUNT];
+  public static final long[] threshold = new long[DECIMAL_MAX_COUNT];
+
+  private final BitOutputStream out;
+  private final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+
+  static {
+    for (int l = 1; l <= DECIMAL_MAX_COUNT; l++) {
+      int idx = l - 1;
+      powers[idx] = (long) Math.pow(10, l);
+      long divisor = 1L << l;
+      threshold[idx] = powers[idx] / divisor;
+    }
+  }
+
+  public CamelEncoder() {
+    super(TSEncoding.CAMEL);
+    out = new BitOutputStream(baos);
+    gorillaEncoder = new GorillaEncoder();
+  }
+
+  /**
+   * Encode a double value and buffer bits for later flush. Marks that there is pending data to
+   * flush.
+   *
+   * @param value the double value to encode
+   * @param out unused here, uses internal buffer
+   */
+  @Override
+  public void encode(double value, ByteArrayOutputStream out) {
+    try {
+      this.addValue(value);
+      hasPending = true;
+    } catch (IOException ignored) {
+    }
+  }
+
+  /**
+   * Flush buffered encoded values to the provided stream. Writes a header indicating the number of
+   * bits written, followed by the buffered bit data. Resets internal buffers and state afterward.
+   * Consecutive calls without new data are no-ops.
+   *
+   * @param out the destination ByteArrayOutputStream to write flushed data
+   * @throws IOException if an I/O error occurs during flush
+   */
+  @Override
+  public void flush(ByteArrayOutputStream out) throws IOException {
+    if (!hasPending) {
+      return;
+    }
+    int writtenBits = close();
+    ReadWriteForEncodingUtils.writeVarInt(writtenBits, out);
+    this.baos.writeTo(out);
+    this.baos.reset();
+    this.out.reset(this.baos);
+    resetState();
+    hasPending = false;
+  }
+
+  /**
+   * Reset encoder state to initial conditions for a new block. Clears Camel and nested Gorilla
+   * state, and resets pending flag.
+   */
+  private void resetState() {
+    this.isFirst = true;
+    this.storedVal = 0L;
+    this.previousValue = 0L;
+    this.hasPending = false;
+    // reset Gorilla state
+    this.gorillaEncoder.leadingZeros = Integer.MAX_VALUE;
+    this.gorillaEncoder.trailingZeros = 0;
+  }
+
+  @Override
+  public int getOneItemMaxSize() {
+    return 8;
+  }
+
+  @Override
+  public long getMaxByteSize() {
+    // bitstream buffer | bytes buffer | storedVal | previousValue
+    return 1 + this.baos.size() + 8 + 8;
+  }
+
+  public class GorillaEncoder {
+    private int leadingZeros = Integer.MAX_VALUE;
+    private int trailingZeros = 0;
+
+    public void encode(double value, BitOutputStream out) throws IOException {
+      long curr = Double.doubleToLongBits(value);
+      if (isFirst) {
+        out.writeLong(curr, BITS_FOR_FIRST_VALUE);
+        previousValue = curr;
+        isFirst = false;
+        return;
+      }
+
+      long xor = curr ^ previousValue;
+      if (xor == 0) {
+        out.writeBit(false); // Control bit: same as previous
+      } else {
+        out.writeBit(true); // Control bit: value changed
+        int leading = Long.numberOfLeadingZeros(xor);
+        int trailing = Long.numberOfTrailingZeros(xor);
+        if (leading >= leadingZeros && trailing >= trailingZeros) {
+          out.writeBit(false); // Reuse previous block info
+          int significantBits = DOUBLE_TOTAL_BITS - leadingZeros - trailingZeros;
+          out.writeLong(xor >>> trailingZeros, significantBits);
+        } else {
+          out.writeBit(true); // Write new block info
+          out.writeInt(leading, BITS_FOR_LEADING_ZEROS);
+          int significantBits = DOUBLE_TOTAL_BITS - leading - trailing;
+          out.writeInt(significantBits - 1, BITS_FOR_SIGNIFICANT_BITS);
+          out.writeLong(xor >>> trailing, significantBits);
+          leadingZeros = leading;
+          trailingZeros = trailing;
+        }
+      }
+
+      previousValue = curr;
+    }
+
+    public void close(BitOutputStream out) throws IOException {
+      out.close();
+    }
+  }
+
+  public void addValue(double value) throws IOException {
+    if (isFirst) {
+      writeFirst(Double.doubleToRawLongBits(value));
+    } else {
+      compressValue(value);
+    }
+    previousValue = Double.doubleToLongBits(value);
+  }
+
+  private void writeFirst(long value) throws IOException {
+    isFirst = false;
+    storedVal = (long) Double.longBitsToDouble(value);
+    out.writeLong(value, BITS_FOR_FIRST_VALUE);
+  }
+
+  public int close() throws IOException {
+    out.close();
+    return out.getBitsWritten();
+  }
+
+  private void compressValue(double value) throws IOException {
+    int signBit = (int) ((Double.doubleToLongBits(value) >>> (DOUBLE_TOTAL_BITS - 1)) & 1);
+    out.writeInt(signBit, BITS_FOR_SIGN);
+
+    value = Math.abs(value);
+    if (value > Long.MAX_VALUE
+        || value == 0
+        || Math.abs(Math.floor(Math.log10(value))) > DECIMAL_MAX_COUNT) {
+      out.writeInt(CamelInnerEncodingType.GORILLA.getCode(), BITS_FOR_TYPE);
+      gorillaEncoder.encode(value, out);
+      return;
+    }
+
+    long integerPart = (long) value;
+    int numDigits = 1;
+    long absInt = Math.abs(integerPart);
+    while (absInt >= 10) {
+      absInt /= 10;
+      numDigits++;
+    }
+
+    double factor = 1;
+    int decimalCount = 0;
+    while (Math.abs(value * factor - Math.round(value * factor)) > 0) {
+      factor *= 10.0;
+      decimalCount++;
+      if (numDigits + decimalCount > DECIMAL_MAX_COUNT) {
+        break;
+      }
+    }
+
+    decimalCount = Math.max(1, decimalCount);
+    long decimalValue;
+
+    if (decimalCount + numDigits <= DECIMAL_MAX_COUNT) {
+      long pow = powers[decimalCount - 1];
+      decimalValue = Math.round(value * pow) % pow;
+
+      out.writeInt(CamelInnerEncodingType.CAMEL.getCode(), BITS_FOR_TYPE);
+      compressIntegerValue(integerPart);
+      compressDecimalValue(decimalValue, decimalCount);
+    } else {
+      out.writeInt(CamelInnerEncodingType.GORILLA.getCode(), BITS_FOR_TYPE);
+      gorillaEncoder.encode(value, out);
+    }
+  }
+
+  private void compressIntegerValue(long value) throws IOException {
+    long diff = value - storedVal;
+    storedVal = value;
+    BitOutputStream.writeVarLong(diff, out);
+  }
+
+  private void compressDecimalValue(long decimalValue, int decimalCount) throws IOException {
+    out.writeInt(decimalCount - 1, BITS_FOR_DECIMAL_COUNT);
+    long thresh = threshold[decimalCount - 1];
+    int m = (int) decimalValue;
+
+    if (decimalValue >= thresh) {
+      out.writeBit(true);
+      m = (int) (decimalValue % thresh);
+
+      long xor =
+          Double.doubleToLongBits((double) decimalValue / powers[decimalCount - 1] + 1)
+              ^ Double.doubleToLongBits((double) m / powers[decimalCount - 1] + 1);
+
+      out.writeLong(xor >>> (DOUBLE_MANTISSA_BITS - decimalCount), decimalCount);
+    } else {
+      out.writeBit(false);
+    }
+
+    BitOutputStream.writeVarLong(m, out);
+  }
+
+  public int getWrittenBits() {
+    return out.getBitsWritten();
+  }
+
+  public ByteArrayOutputStream getByteArrayOutputStream() {
+    return this.baos;
+  }
+
+  public GorillaEncoder getGorillaEncoder() {
+    return gorillaEncoder;
+  }
+
+  public enum CamelInnerEncodingType {
+    GORILLA(0),
+    CAMEL(1);
+
+    private final int code;
+
+    CamelInnerEncodingType(int code) {
+      this.code = code;
+    }
+
+    public int getCode() {
+      return code;
+    }
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/DescendingBitPackingEncoder.java b/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/DescendingBitPackingEncoder.java
new file mode 100644
index 0000000..bb1869c
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/DescendingBitPackingEncoder.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.encoder;
+
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.utils.BytesUtils;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
+
+public class DescendingBitPackingEncoder extends Encoder {
+  private boolean isSigned;
+  private List<Long> buffer = new ArrayList<>();
+  private byte[] encodingBlockBuffer = null;
+
+  protected static int bitsToBytes(int bits) {
+    return (bits + 7) / 8;
+  }
+
+  protected static int getValueWidth(long value) {
+    return 64 - Long.numberOfLeadingZeros(value);
+  }
+
+  protected static long zigzagEncode(long value) {
+    return (value << 1) ^ (value >> 63);
+  }
+
+  public DescendingBitPackingEncoder(boolean isSigned) {
+    super(TSEncoding.DESCENDING_BIT_PACKING);
+    this.isSigned = isSigned;
+  }
+
+  public DescendingBitPackingEncoder() {
+    this(true);
+  }
+
+  @Override
+  public void encode(long value, ByteArrayOutputStream out) {
+    if (isSigned) {
+      value = zigzagEncode(value);
+    }
+    buffer.add(value);
+  }
+
+  @Override
+  public void flush(ByteArrayOutputStream out) throws IOException {
+    int n = Math.toIntExact(buffer.size());
+    ReadWriteIOUtils.write(n, out);
+
+    if (n > 0) {
+      int m = 0;
+      for (long value : buffer) {
+        if (value != 0) m++;
+      }
+      ReadWriteIOUtils.write(m, out);
+
+      if (m > 0) {
+        // INDArray[] sortResult = Nd4j.sortWithIndices(array, -1, false);
+        // INDArray sortedIndices = sortResult[0], sortedValues = sortResult[1];
+        // long[] sortedValuesArray = sortedValues.toLongVector(),
+        // sortedIndicesArray = sortedIndices.toLongVector();
+        Long[] sortedValuesArray = new Long[n];
+        Integer[] sortedIndicesArray = new Integer[n];
+        for (int i = 0; i < n; i++) {
+          sortedValuesArray[i] = buffer.get(i);
+          sortedIndicesArray[i] = i;
+        }
+        Arrays.sort(
+            sortedIndicesArray,
+            new Comparator<Integer>() {
+              @Override
+              public int compare(Integer i1, Integer i2) {
+                return Long.compareUnsigned(sortedValuesArray[i2], sortedValuesArray[i1]);
+              }
+            });
+        Arrays.sort(
+            sortedValuesArray,
+            new Comparator<Long>() {
+              @Override
+              public int compare(Long i1, Long i2) {
+                return Long.compareUnsigned(i2, i1);
+              }
+            });
+
+        int indexBitWidth = getValueWidth(n - 1);
+        int encodingLength = bitsToBytes(indexBitWidth * m);
+        this.encodingBlockBuffer = new byte[encodingLength];
+        for (int i = 0; i < m; i++) {
+          BytesUtils.intToBytes(
+              Math.toIntExact(sortedIndicesArray[i]),
+              this.encodingBlockBuffer,
+              indexBitWidth * i,
+              indexBitWidth);
+        }
+        out.write(this.encodingBlockBuffer, 0, encodingLength);
+        this.encodingBlockBuffer = null;
+
+        int valueWidthSum = 0;
+        for (int i = 0; i < m; i++) valueWidthSum += getValueWidth(sortedValuesArray[i]);
+        encodingLength = bitsToBytes(valueWidthSum + getValueWidth(sortedValuesArray[0]));
+        ReadWriteIOUtils.write(encodingLength, out);
+
+        this.encodingBlockBuffer = new byte[encodingLength];
+        int offset = 0;
+        int previousValueWidth = getValueWidth(sortedValuesArray[0]);
+        ReadWriteIOUtils.write(previousValueWidth, out);
+        BytesUtils.longToBytes(
+            sortedValuesArray[0], this.encodingBlockBuffer, offset, previousValueWidth);
+        offset += previousValueWidth;
+        for (int i = 1; i < m; i++) {
+          BytesUtils.longToBytes(
+              sortedValuesArray[i], this.encodingBlockBuffer, offset, previousValueWidth);
+          offset += previousValueWidth;
+          previousValueWidth = getValueWidth(sortedValuesArray[i]);
+        }
+        out.write(this.encodingBlockBuffer, 0, encodingLength);
+        this.encodingBlockBuffer = null;
+      }
+    }
+
+    this.buffer.clear();
+  }
+
+  @Override
+  public final long getMaxByteSize() {
+    return 0;
+  }
+
+  public static class IntDescendingBitPackingEncoder extends DescendingBitPackingEncoder {
+    public IntDescendingBitPackingEncoder() {
+      super();
+    }
+
+    @Override
+    public void encode(int value, ByteArrayOutputStream out) {
+      super.encode(Long.valueOf(value), out);
+    }
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/FleaEncoder.java b/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/FleaEncoder.java
new file mode 100644
index 0000000..086db35
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/FleaEncoder.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.encoder;
+
+import org.apache.tsfile.common.conf.TSFileConfig;
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import org.jtransforms.fft.DoubleFFT_1D;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class FleaEncoder extends Encoder {
+  private List<Long> buffer = new ArrayList<>();
+  private int minBeta, maxBeta;
+
+  public FleaEncoder() {
+    super(TSEncoding.FLEA);
+    this.minBeta = 11;
+    this.maxBeta = 20;
+  }
+
+  private long[] laminarEstimateLength(long[] values) {
+    int n = values.length;
+    long[] result = new long[this.maxBeta - this.minBeta + 1];
+
+    if (n > 0) {
+      int groupSize =
+          1 + DescendingBitPackingEncoder.getValueWidth(TSFileConfig.RLE_MAX_REPEATED_NUM);
+      int[] laminarBitWidths = LaminarEncoder.getLaminarBitWidths(values);
+      List<Integer> repeatValues = new ArrayList<>(), repeatCounts = new ArrayList<>();
+      int currentValue = -1, currentCount = 0;
+      for (int width : laminarBitWidths) {
+        if (width == currentValue) {
+          currentCount++;
+        } else {
+          if (currentValue != -1) {
+            repeatValues.add(currentValue);
+            repeatCounts.add(currentCount);
+          }
+          currentValue = width;
+          currentCount = 1;
+        }
+      }
+      if (currentValue != -1) {
+        repeatValues.add(currentValue);
+        repeatCounts.add(currentCount);
+      }
+
+      for (int i = 0; i < repeatValues.size(); i++) {
+        int width = repeatValues.get(i), count = repeatCounts.get(i);
+        for (int beta = this.minBeta; beta <= this.maxBeta; beta++) {
+          if (width > beta) {
+            int groupCount =
+                (1
+                    + (count + TSFileConfig.RLE_MAX_REPEATED_NUM - 1)
+                        / TSFileConfig.RLE_MAX_REPEATED_NUM);
+            result[beta - this.minBeta] += groupCount * groupSize;
+          }
+        }
+      }
+    }
+    return result;
+  }
+
+  private void laminarAddSparseMode(
+      long[] resultD2, int realBitWidth, int groupBitWidth, int indexBitWidth, boolean subtract) {
+    int d = subtract ? -1 : 1;
+    if (minBeta < realBitWidth) {
+      resultD2[0] += d * (1 + groupBitWidth - minBeta + indexBitWidth);
+      if (minBeta + 1 <= maxBeta) {
+        resultD2[1] += d * (-1 - (1 + groupBitWidth - minBeta + indexBitWidth));
+      }
+      if (realBitWidth <= maxBeta) {
+        resultD2[realBitWidth - minBeta] +=
+            d * (1 - (1 + groupBitWidth - (realBitWidth - 1) + indexBitWidth));
+      }
+      if (realBitWidth + 1 <= maxBeta) {
+        resultD2[realBitWidth + 1 - minBeta] +=
+            d * (1 + groupBitWidth - (realBitWidth - 1) + indexBitWidth);
+      }
+    }
+  }
+
+  private void laminarAddDenseMode(
+      long[] resultD2, int realBitWidth, int groupBitWidth, boolean subtract) {
+    int d = subtract ? -1 : 1;
+    if (minBeta < realBitWidth) {
+      resultD2[0] += d * (1 + groupBitWidth - minBeta);
+      if (minBeta + 1 <= maxBeta) {
+        resultD2[1] += d * (-1 - (1 + groupBitWidth - minBeta));
+      }
+      if (groupBitWidth <= maxBeta) {
+        resultD2[groupBitWidth - minBeta] += d * (-1);
+      }
+      if (groupBitWidth + 1 <= maxBeta) {
+        resultD2[groupBitWidth + 1 - minBeta] += d * 2;
+      }
+    }
+  }
+
+  private long[] laminarCalculateResult(long[] resultD2) {
+    long[] resultD1 = new long[this.maxBeta - this.minBeta + 1];
+    long[] result = new long[this.maxBeta - this.minBeta + 1];
+    resultD1[0] = resultD2[0];
+    result[0] = resultD1[0];
+    for (int beta = this.minBeta + 1; beta <= this.maxBeta; beta++) {
+      resultD1[beta - this.minBeta] =
+          resultD1[beta - this.minBeta - 1] + resultD2[beta - this.minBeta];
+      result[beta - this.minBeta] = result[beta - this.minBeta - 1] + resultD1[beta - this.minBeta];
+    }
+    return result;
+  }
+
+  private long[] laminarEstimateValue(long[] values) {
+    int n = values.length;
+
+    long[] resultD2 = new long[this.maxBeta - this.minBeta + 1];
+    int[] bitWidths = new int[n];
+    for (int i = 0; i < n; i++) {
+      bitWidths[i] = DescendingBitPackingEncoder.getValueWidth(values[i]);
+    }
+    int[] laminarBitWidths = LaminarEncoder.getLaminarBitWidths(values);
+    int indexBitWidth = DescendingBitPackingEncoder.getValueWidth(n - 1);
+
+    for (int i = 0; i < n; i++) {
+      laminarAddSparseMode(resultD2, bitWidths[i], laminarBitWidths[i], indexBitWidth, false);
+    }
+
+    long[] result = new long[this.maxBeta - this.minBeta + 1];
+    for (int beta = this.minBeta; beta <= this.maxBeta; beta++) {
+      result[beta - this.minBeta] = Long.MAX_VALUE;
+    }
+    int k = Math.min(n, 2 * (maxBeta - minBeta + 1));
+    for (int i = 0; i < n; i++) {
+      if (i % k == 0) {
+        long[] currentResult = laminarCalculateResult(resultD2);
+        for (int beta = this.minBeta; beta <= this.maxBeta; beta++) {
+          result[beta - this.minBeta] =
+              Math.min(result[beta - this.minBeta], currentResult[beta - this.minBeta]);
+        }
+      }
+      laminarAddSparseMode(resultD2, bitWidths[i], laminarBitWidths[i], indexBitWidth, true);
+      laminarAddDenseMode(resultD2, bitWidths[i], laminarBitWidths[i], false);
+    }
+    long[] currentResult = laminarCalculateResult(resultD2);
+    for (int beta = this.minBeta; beta <= this.maxBeta; beta++) {
+      result[beta - this.minBeta] =
+          Math.min(result[beta - this.minBeta], currentResult[beta - this.minBeta]);
+    }
+    return result;
+  }
+
+  private long[] estimateFrequency(long[] values) {
+    long[] result1 = laminarEstimateLength(values);
+    long[] result2 = laminarEstimateValue(values);
+    long[] result = new long[this.maxBeta - this.minBeta + 1];
+    for (int beta = this.minBeta; beta <= this.maxBeta; beta++) {
+      result[beta - this.minBeta] = result1[beta - this.minBeta] + result2[beta - this.minBeta];
+    }
+    return result;
+  }
+
+  private long[] estimateResidual(int n, long[] frequencyReal, long[] frequencyImag) {
+    double[] squareSumDiff = new double[maxBeta - minBeta + 1];
+    long[] partialCountDiff = new long[maxBeta - minBeta + 1];
+
+    for (int i = 0; i < n; i++) {
+      long real = i < frequencyReal.length ? frequencyReal[i] : frequencyReal[n - i];
+      int realBitLength = DescendingBitPackingEncoder.getValueWidth(Math.abs(real));
+      if (realBitLength - 1 >= minBeta) {
+        partialCountDiff[0] += 1;
+        if (realBitLength <= maxBeta) {
+          partialCountDiff[realBitLength - minBeta] -= 1;
+        }
+      }
+      if (realBitLength >= minBeta && realBitLength <= maxBeta) {
+        squareSumDiff[realBitLength - minBeta] += ((double) real * real);
+      }
+
+      long imag = i < frequencyImag.length ? frequencyImag[i] : -frequencyImag[n - i];
+      int imagBitLength = DescendingBitPackingEncoder.getValueWidth(Math.abs(imag));
+      if (imagBitLength - 1 >= minBeta) {
+        partialCountDiff[0] += 1;
+        if (imagBitLength <= maxBeta) {
+          partialCountDiff[imagBitLength - minBeta] -= 1;
+        }
+      }
+      if (imagBitLength >= minBeta && imagBitLength <= maxBeta) {
+        squareSumDiff[imagBitLength - minBeta] += ((double) imag * imag);
+      }
+    }
+
+    double[] squareSum = new double[maxBeta - minBeta + 1];
+    squareSum[0] = squareSumDiff[0];
+    for (int beta = minBeta + 1; beta <= maxBeta; beta++) {
+      squareSum[beta - minBeta] = squareSum[beta - minBeta - 1] + squareSumDiff[beta - minBeta];
+    }
+    long[] partialCount = new long[maxBeta - minBeta + 1];
+    partialCount[0] = partialCountDiff[0];
+    for (int beta = minBeta + 1; beta <= maxBeta; beta++) {
+      partialCount[beta - minBeta] =
+          partialCount[beta - minBeta - 1] + partialCountDiff[beta - minBeta];
+    }
+
+    long[] result = new long[maxBeta - minBeta + 1];
+    for (int beta = minBeta; beta <= maxBeta; beta++) {
+      double squareSumBeta =
+          squareSum[beta - minBeta]
+              + partialCount[beta - minBeta] * (1L << beta) * (1L << beta) / 3;
+      long optimalBitWidth =
+          Math.round(Math.ceil(Math.log(Math.sqrt(squareSumBeta) / n + 1) / Math.log(2)));
+      result[beta - minBeta] = (optimalBitWidth + 2) * n;
+    }
+
+    return result;
+  }
+
+  private int getOptimalBeta(int n, long[] frequencyReal, long[] frequencyImag) {
+    long[] frequency = estimateFrequency(frequencyReal);
+    long[] frequency2 = estimateFrequency(frequencyImag);
+    for (int beta = minBeta; beta <= maxBeta; beta++) {
+      frequency[beta - minBeta] += frequency2[beta - minBeta];
+    }
+    long[] residual = estimateResidual(n, frequencyReal, frequencyImag);
+    for (int beta = minBeta; beta <= maxBeta; beta++) {
+      frequency[beta - minBeta] += residual[beta - minBeta];
+    }
+    int optimalBeta = minBeta;
+    for (int beta = minBeta + 1; beta <= maxBeta; beta++) {
+      if (frequency[beta - minBeta] < frequency[optimalBeta - minBeta]) {
+        optimalBeta = beta;
+      }
+    }
+    return optimalBeta;
+  }
+
+  @Override
+  public void encode(long value, ByteArrayOutputStream out) {
+    buffer.add(value);
+  }
+
+  public static double[][] realFFT(double[] data) {
+    double[] dataComplex = new double[data.length * 2];
+    for (int i = 0; i < data.length; i++) {
+      dataComplex[2 * i] = data[i];
+      dataComplex[2 * i + 1] = 0;
+    }
+    DoubleFFT_1D fft = new DoubleFFT_1D(data.length);
+    fft.complexForward(dataComplex);
+    double[][] result = new double[2][data.length / 2 + 1];
+    for (int i = 0; i <= data.length / 2; i++) {
+      result[0][i] = dataComplex[2 * i];
+      result[1][i] = dataComplex[2 * i + 1];
+    }
+    return result; // result[0] is real part, result[1] is imaginary part
+  }
+
+  public static double[] inverseRealFFT(double[][] data, int n) {
+    double[] dataComplex = new double[n * 2];
+    for (int i = 0; i <= n / 2; i++) {
+      dataComplex[2 * i] = data[0][i];
+      dataComplex[2 * i + 1] = data[1][i];
+    }
+    for (int i = n / 2 + 1; i < n; i++) {
+      dataComplex[2 * i] = data[0][n - i];
+      dataComplex[2 * i + 1] = -data[1][n - i];
+    }
+    DoubleFFT_1D fft = new DoubleFFT_1D(n);
+    fft.complexInverse(dataComplex, true);
+    double[] result = new double[n];
+    for (int i = 0; i < n; i++) {
+      result[i] = dataComplex[2 * i];
+    }
+    return result;
+  }
+
+  public static long quantize(double value, int beta) {
+    return Math.round(value / (1 << beta));
+  }
+
+  public static double dequantize(long value, int beta) {
+    return (double) value * (1 << beta);
+  }
+
+  @Override
+  public void flush(ByteArrayOutputStream out) throws IOException {
+    int n = this.buffer.size();
+    ReadWriteIOUtils.write(n, out);
+    if (n > 0) {
+      double[] data = new double[n];
+      for (int i = 0; i < n; i++) {
+        data[i] = buffer.get(i);
+      }
+      double[][] fftResult = realFFT(data);
+      double[] frequencyReal = fftResult[0], frequencyImag = fftResult[1];
+
+      long[] frequencyRealLong = new long[frequencyReal.length];
+      long[] frequencyImagLong = new long[frequencyImag.length];
+      for (int i = 0; i < frequencyReal.length; i++) {
+        frequencyRealLong[i] = Math.round(frequencyReal[i]);
+      }
+      for (int i = 0; i < frequencyImag.length; i++) {
+        frequencyImagLong[i] = Math.round(frequencyImag[i]);
+      }
+      int beta = getOptimalBeta(n, frequencyRealLong, frequencyImagLong);
+      ReadWriteIOUtils.write(beta, out);
+
+      long[] quantizedReal = new long[frequencyReal.length],
+          quantizedImag = new long[frequencyImag.length];
+      for (int i = 0; i < frequencyReal.length; i++) {
+        quantizedReal[i] = quantize(frequencyReal[i], beta);
+        quantizedImag[i] = quantize(frequencyImag[i], beta);
+      }
+      LaminarEncoder laminarEncoder = new LaminarEncoder();
+      for (long v : quantizedReal) {
+        laminarEncoder.encode(v, out);
+      }
+      laminarEncoder.flush(out);
+      laminarEncoder = new LaminarEncoder();
+      for (long v : quantizedImag) {
+        laminarEncoder.encode(v, out);
+      }
+      laminarEncoder.flush(out);
+
+      double[][] dequantized = new double[2][frequencyReal.length];
+      for (int i = 0; i < frequencyReal.length; i++) {
+        dequantized[0][i] = dequantize(quantizedReal[i], beta);
+        dequantized[1][i] = dequantize(quantizedImag[i], beta);
+      }
+      double[] reconstructed = inverseRealFFT(dequantized, n);
+      long[] residuals = new long[n];
+      for (int i = 0; i < n; i++) {
+        residuals[i] = buffer.get(i) - Math.round(reconstructed[i]);
+      }
+      SeparateStorageEncoder separateStorageEncoder = new SeparateStorageEncoder();
+      for (long v : residuals) {
+        separateStorageEncoder.encode(v, out);
+      }
+      separateStorageEncoder.flush(out);
+    }
+    this.buffer.clear();
+  }
+
+  @Override
+  public final long getMaxByteSize() {
+    return 0;
+  }
+
+  public static class IntFleaEncoder extends FleaEncoder {
+
+    public IntFleaEncoder() {
+      super();
+    }
+
+    @Override
+    public void encode(int value, ByteArrayOutputStream out) {
+      super.encode(Long.valueOf(value), out);
+    }
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/LaminarEncoder.java b/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/LaminarEncoder.java
new file mode 100644
index 0000000..ec87523
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/LaminarEncoder.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.encoder;
+
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.utils.BytesUtils;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class LaminarEncoder extends Encoder {
+  private List<Long> buffer = new ArrayList<>();
+  private byte[] encodingBlockBuffer = null;
+
+  public LaminarEncoder() {
+    super(TSEncoding.LAMINAR);
+  }
+
+  @Override
+  public void encode(long value, ByteArrayOutputStream out) {
+    value = DescendingBitPackingEncoder.zigzagEncode(value);
+    buffer.add(value);
+  }
+
+  protected static int[] getLaminarBitWidths(long[] values) {
+    int n = values.length;
+    int[] laminarBitWidths = new int[n];
+    for (int i = n - 1; i >= 0; i--) {
+      laminarBitWidths[i] = DescendingBitPackingEncoder.getValueWidth(values[i]);
+      if (i < n - 1) {
+        laminarBitWidths[i] = Math.max(laminarBitWidths[i], laminarBitWidths[i + 1]);
+      }
+    }
+    return laminarBitWidths;
+  }
+
+  private static int partition(long[] values) {
+    int n = values.length;
+
+    int[] laminarBitWidths = getLaminarBitWidths(values);
+    int indexBitWidth = DescendingBitPackingEncoder.getValueWidth(n - 1);
+
+    long currentLength = 0;
+    for (int i = 0; i < n; i++) {
+      if (values[i] != 0) {
+        currentLength += 1 + laminarBitWidths[i] + indexBitWidth;
+      }
+    }
+
+    int bestP = 0;
+    long bestLength = currentLength;
+    for (int i = 0; i < n; i++) {
+      if (values[i] != 0) {
+        currentLength -= 1 + laminarBitWidths[i] + indexBitWidth;
+      }
+      currentLength += 1 + laminarBitWidths[i];
+      if (currentLength < bestLength) {
+        bestLength = currentLength;
+        bestP = i + 1;
+      }
+    }
+
+    return bestP;
+  }
+
+  private void flushEncodeArray(long[] values, ByteArrayOutputStream out) throws IOException {
+    int n = values.length;
+    ReadWriteIOUtils.write(n, out);
+
+    if (n > 0) {
+      int[] laminarBitWidths = getLaminarBitWidths(values);
+      ReadWriteIOUtils.write(laminarBitWidths[0], out);
+
+      if (n > 1) {
+        IntRleEncoder rleEncoder = new IntRleEncoder();
+        for (int i = 1; i < n; i++) {
+          if (laminarBitWidths[i] < laminarBitWidths[i - 1])
+            for (int j = laminarBitWidths[i - 1]; j > laminarBitWidths[i]; j--)
+              rleEncoder.encode(1, out);
+          rleEncoder.encode(0, out);
+        }
+        rleEncoder.flush(out);
+      }
+
+      int totalBits = 0;
+      for (int width : laminarBitWidths) totalBits += width;
+      int encodingLength = DescendingBitPackingEncoder.bitsToBytes(totalBits);
+      this.encodingBlockBuffer = new byte[encodingLength];
+      int offset = 0;
+      for (int i = 0; i < n; i++) {
+        BytesUtils.longToBytes(values[i], encodingBlockBuffer, offset, laminarBitWidths[i]);
+        offset += laminarBitWidths[i];
+      }
+      out.write(this.encodingBlockBuffer, 0, encodingLength);
+      this.encodingBlockBuffer = null;
+    }
+  }
+
+  @Override
+  public void flush(ByteArrayOutputStream out) throws IOException {
+    int n = this.buffer.size();
+    ReadWriteIOUtils.write(n, out);
+
+    if (n > 0) {
+      long[] values = new long[n];
+      for (int i = 0; i < n; i++) {
+        values[i] = buffer.get(i);
+      }
+      int p = partition(values);
+      ReadWriteIOUtils.write(p, out);
+      flushEncodeArray(java.util.Arrays.copyOfRange(values, 0, p), out);
+
+      List<Integer> sparseIndices = new ArrayList<>();
+      List<Long> sparseValues = new ArrayList<>();
+      for (int i = p; i < n; i++) {
+        if (values[i] != 0) {
+          sparseIndices.add(i - p);
+          sparseValues.add(values[i]);
+        }
+      }
+      int[] sparseIndicesArray = new int[sparseIndices.size()];
+      long[] sparseValuesArray = new long[sparseValues.size()];
+      for (int i = 0; i < sparseIndices.size(); i++) {
+        sparseIndicesArray[i] = sparseIndices.get(i);
+        sparseValuesArray[i] = sparseValues.get(i);
+      }
+
+      flushEncodeArray(sparseValuesArray, out);
+      int indexBitWidth = DescendingBitPackingEncoder.getValueWidth(n - 1);
+      int encodingLength =
+          DescendingBitPackingEncoder.bitsToBytes(indexBitWidth * sparseValuesArray.length);
+      this.encodingBlockBuffer = new byte[encodingLength];
+      for (int i = 0; i < sparseValuesArray.length; i++) {
+        BytesUtils.intToBytes(
+            sparseIndicesArray[i], encodingBlockBuffer, indexBitWidth * i, indexBitWidth);
+      }
+      out.write(this.encodingBlockBuffer, 0, encodingLength);
+      this.encodingBlockBuffer = null;
+    }
+
+    this.buffer.clear();
+  }
+
+  @Override
+  public final long getMaxByteSize() {
+    return 0;
+  }
+
+  public static class IntegerLaminarEncoder extends LaminarEncoder {
+
+    public IntegerLaminarEncoder() {
+      super();
+    }
+
+    @Override
+    public void encode(int value, ByteArrayOutputStream out) {
+      super.encode(Long.valueOf(value), out);
+    }
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/SeparateStorageEncoder.java b/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/SeparateStorageEncoder.java
new file mode 100644
index 0000000..2e772d1
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/SeparateStorageEncoder.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.encoder;
+
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.utils.BytesUtils;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class SeparateStorageEncoder extends Encoder {
+  private boolean isSigned;
+  private List<Long> buffer = new ArrayList<>();
+  private byte[] encodingBlockBuffer = null;
+
+  public SeparateStorageEncoder(boolean isSigned) {
+    super(TSEncoding.SEPARATE_STORAGE);
+    this.isSigned = isSigned;
+  }
+
+  public SeparateStorageEncoder() {
+    this(true);
+  }
+
+  @Override
+  public void encode(long value, ByteArrayOutputStream out) {
+    if (isSigned) {
+      value = DescendingBitPackingEncoder.zigzagEncode(value);
+    }
+    buffer.add(value);
+  }
+
+  private static int getOptimalBitWidth(long[] widthCount) {
+    long widthCountSum = 0, widthCountWeightedSum = 0;
+    for (int i = 0; i <= 64; i++) {
+      widthCountSum += widthCount[i];
+      widthCountWeightedSum += widthCount[i] * i;
+    }
+    int indexWidth = DescendingBitPackingEncoder.getValueWidth(widthCountSum - 1);
+
+    long currentWidthCountSum = widthCountSum, currentWidthCountWeightedSum = widthCountWeightedSum;
+    int optimalWidth = -1;
+    long optimalBitLength = -1;
+    for (int i = 0; i <= 64; i++) {
+      currentWidthCountSum -= widthCount[i];
+      currentWidthCountWeightedSum -= widthCount[i] * i;
+      long bitLength =
+          widthCountSum * i
+              + currentWidthCountSum * indexWidth
+              + (currentWidthCountWeightedSum - currentWidthCountSum * i);
+      if (optimalBitLength == -1 || bitLength < optimalBitLength) {
+        optimalBitLength = bitLength;
+        optimalWidth = i;
+      }
+    }
+
+    return optimalWidth;
+  }
+
+  @Override
+  public void flush(ByteArrayOutputStream out) throws IOException {
+    int n = buffer.size();
+    ReadWriteIOUtils.write(n, out);
+
+    if (n > 0) {
+      long[] widthCount = new long[65];
+      for (long value : buffer) widthCount[DescendingBitPackingEncoder.getValueWidth(value)]++;
+      int optimalWidth = getOptimalBitWidth(widthCount);
+      ReadWriteIOUtils.write(optimalWidth, out);
+
+      Long[] highBits = new Long[n];
+      Long[] lowBits = new Long[n];
+      for (int i = 0; i < n; i++) {
+        long value = buffer.get(i);
+        highBits[i] = value >>> optimalWidth;
+        lowBits[i] = value & ((1L << optimalWidth) - 1);
+      }
+      DescendingBitPackingEncoder highBitsEncoder = new DescendingBitPackingEncoder(false);
+      for (long value : highBits) highBitsEncoder.encode(value, out);
+      highBitsEncoder.flush(out);
+
+      if (optimalWidth > 0) {
+        int encodingLength = DescendingBitPackingEncoder.bitsToBytes(optimalWidth * n);
+        this.encodingBlockBuffer = new byte[encodingLength];
+        for (int i = 0; i < n; i++) {
+          BytesUtils.longToBytes(lowBits[i], encodingBlockBuffer, optimalWidth * i, optimalWidth);
+        }
+        out.write(this.encodingBlockBuffer, 0, encodingLength);
+        this.encodingBlockBuffer = null;
+      }
+    }
+    this.buffer.clear();
+  }
+
+  @Override
+  public final long getMaxByteSize() {
+    return 0;
+  }
+
+  public static class IntSeparateStorageEncoder extends SeparateStorageEncoder {
+
+    public IntSeparateStorageEncoder() {
+      super();
+    }
+
+    @Override
+    public void encode(int value, ByteArrayOutputStream out) {
+      super.encode(Long.valueOf(value), out);
+    }
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/TSEncodingBuilder.java b/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/TSEncodingBuilder.java
index 68c7e56..4596be4 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/TSEncodingBuilder.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encoding/encoder/TSEncodingBuilder.java
@@ -30,9 +30,10 @@
 import org.slf4j.LoggerFactory;
 
 import java.util.Map;
+import java.util.Objects;
 
 /**
- * Each subclass of TSEncodingBuilder responds a enumerate value in {@linkplain TSEncoding
+ * Each subclass of TSEncodingBuilder responds an enumerate value in {@linkplain TSEncoding
  * TSEncoding}, which stores several configuration related to responding encoding type to generate
  * {@linkplain Encoder Encoder} instance.<br>
  * Each TSEncoding has a responding TSEncodingBuilder. The design referring to visit pattern
@@ -43,6 +44,7 @@
 
   private static final Logger logger = LoggerFactory.getLogger(TSEncodingBuilder.class);
   protected final TSFileConfig conf;
+  private static final String ERROR_MSG = "%s doesn't support data type: %s";
 
   protected TSEncodingBuilder() {
     this.conf = TSFileDescriptor.getInstance().getConfig();
@@ -64,8 +66,6 @@
         return new Ts2Diff();
       case GORILLA_V1:
         return new GorillaV1();
-      case REGULAR:
-        return new Regular();
       case GORILLA:
         return new GorillaV2();
       case DICTIONARY:
@@ -78,8 +78,18 @@
         return new Sprintz();
       case RLBE:
         return new RLBE();
+      case CAMEL:
+        return new Camel();
+      case DESCENDING_BIT_PACKING:
+        return new DescendingBitPacking();
+      case SEPARATE_STORAGE:
+        return new SeparateStorage();
+      case LAMINAR:
+        return new Laminar();
+      case FLEA:
+        return new Flea();
       default:
-        throw new UnsupportedOperationException(type.toString());
+        throw new UnsupportedOperationException("Unsupported encoding: " + type);
     }
   }
 
@@ -122,7 +132,7 @@
       if (props == null || !props.containsKey(Encoder.MAX_STRING_LENGTH)) {
         maxStringLength = TSFileDescriptor.getInstance().getConfig().getMaxStringLength();
       } else {
-        maxStringLength = Integer.valueOf(props.get(Encoder.MAX_STRING_LENGTH));
+        maxStringLength = Integer.parseInt(props.get(Encoder.MAX_STRING_LENGTH));
         if (maxStringLength < 0) {
           maxStringLength = TSFileDescriptor.getInstance().getConfig().getMaxStringLength();
           logger.warn(
@@ -152,7 +162,7 @@
         case DOUBLE:
           return new FloatEncoder(TSEncoding.RLE, type, maxPointNumber);
         default:
-          throw new UnSupportedDataTypeException("RLE doesn't support data type: " + type);
+          throw new UnSupportedDataTypeException(String.format(ERROR_MSG, TSEncoding.RLE, type));
       }
     }
 
@@ -207,15 +217,16 @@
         case DOUBLE:
           return new FloatEncoder(TSEncoding.TS_2DIFF, type, maxPointNumber);
         default:
-          throw new UnSupportedDataTypeException("TS_2DIFF doesn't support data type: " + type);
+          throw new UnSupportedDataTypeException(
+              String.format(ERROR_MSG, TSEncoding.TS_2DIFF, type));
       }
     }
 
-    @Override
     /**
      * TS_2DIFF could specify <b>max_point_number</b> in given JSON Object, which means the maximum
      * decimal digits for float or double data.
      */
+    @Override
     public void initFromProps(Map<String, String> props) {
       // set max error from initialized map or default value if not set
       if (props == null || !props.containsKey(Encoder.MAX_POINT_NUMBER)) {
@@ -255,13 +266,122 @@
         case DOUBLE:
           return new DoublePrecisionEncoderV1();
         default:
-          throw new UnSupportedDataTypeException("GORILLA_V1 doesn't support data type: " + type);
+          throw new UnSupportedDataTypeException(
+              String.format(ERROR_MSG, TSEncoding.GORILLA_V1, type));
       }
     }
 
     @Override
     public void initFromProps(Map<String, String> props) {
-      // allowed do nothing
+      // allowed to do nothing
+    }
+  }
+
+  /** for DOUBLE. */
+  public static class Camel extends TSEncodingBuilder {
+
+    @Override
+    public Encoder getEncoder(TSDataType type) {
+      if (Objects.requireNonNull(type) == TSDataType.DOUBLE) {
+        return new CamelEncoder();
+      }
+      throw new UnSupportedDataTypeException(String.format(ERROR_MSG, TSEncoding.CAMEL, type));
+    }
+
+    @Override
+    public void initFromProps(Map<String, String> props) {
+      // allowed to do nothing
+    }
+  }
+
+  public static class DescendingBitPacking extends TSEncodingBuilder {
+
+    @Override
+    public Encoder getEncoder(TSDataType type) {
+      switch (type) {
+        case INT32:
+        case DATE:
+          return new DescendingBitPackingEncoder.IntDescendingBitPackingEncoder();
+        case INT64:
+        case TIMESTAMP:
+          return new DescendingBitPackingEncoder();
+        default:
+          throw new UnSupportedDataTypeException(
+              String.format(ERROR_MSG, TSEncoding.DESCENDING_BIT_PACKING, type));
+      }
+    }
+
+    @Override
+    public void initFromProps(Map<String, String> props) {
+      // allowed to do nothing
+    }
+  }
+
+  public static class SeparateStorage extends TSEncodingBuilder {
+
+    @Override
+    public Encoder getEncoder(TSDataType type) {
+      switch (type) {
+        case INT32:
+        case DATE:
+          return new SeparateStorageEncoder.IntSeparateStorageEncoder();
+        case INT64:
+        case TIMESTAMP:
+          return new SeparateStorageEncoder();
+        default:
+          throw new UnSupportedDataTypeException(
+              String.format(ERROR_MSG, TSEncoding.SEPARATE_STORAGE, type));
+      }
+    }
+
+    @Override
+    public void initFromProps(Map<String, String> props) {
+      // allowed to do nothing
+    }
+  }
+
+  public static class Laminar extends TSEncodingBuilder {
+
+    @Override
+    public Encoder getEncoder(TSDataType type) {
+      switch (type) {
+        case INT32:
+        case DATE:
+          return new LaminarEncoder.IntegerLaminarEncoder();
+        case INT64:
+        case TIMESTAMP:
+          return new LaminarEncoder();
+        default:
+          throw new UnSupportedDataTypeException(
+              String.format(ERROR_MSG, TSEncoding.LAMINAR, type));
+      }
+    }
+
+    @Override
+    public void initFromProps(Map<String, String> props) {
+      // allowed to do nothing
+    }
+  }
+
+  public static class Flea extends TSEncodingBuilder {
+
+    @Override
+    public Encoder getEncoder(TSDataType type) {
+      switch (type) {
+        case INT32:
+        case DATE:
+          return new FleaEncoder.IntFleaEncoder();
+        case INT64:
+        case TIMESTAMP:
+          return new FleaEncoder();
+        default:
+          throw new UnSupportedDataTypeException(String.format(ERROR_MSG, TSEncoding.FLEA, type));
+      }
+    }
+
+    @Override
+    public void initFromProps(Map<String, String> props) {
+      // allowed to do nothing
     }
   }
 
@@ -278,13 +398,14 @@
         case TIMESTAMP:
           return new RegularDataEncoder.LongRegularEncoder();
         default:
-          throw new UnSupportedDataTypeException("REGULAR doesn't support data type: " + type);
+          throw new UnSupportedDataTypeException(
+              String.format(ERROR_MSG, TSEncoding.REGULAR, type));
       }
     }
 
     @Override
     public void initFromProps(Map<String, String> props) {
-      // allowed do nothing
+      // allowed to do nothing
     }
   }
 
@@ -305,13 +426,14 @@
         case TIMESTAMP:
           return new LongGorillaEncoder();
         default:
-          throw new UnSupportedDataTypeException("GORILLA doesn't support data type: " + type);
+          throw new UnSupportedDataTypeException(
+              String.format(ERROR_MSG, TSEncoding.GORILLA, type));
       }
     }
 
     @Override
     public void initFromProps(Map<String, String> props) {
-      // allowed do nothing
+      // allowed to do nothing
     }
   }
 
@@ -330,7 +452,8 @@
         case DOUBLE:
           return new DoubleSprintzEncoder();
         default:
-          throw new UnSupportedDataTypeException("Sprintz doesn't support data type: " + type);
+          throw new UnSupportedDataTypeException(
+              String.format(ERROR_MSG, TSEncoding.SPRINTZ, type));
       }
     }
 
@@ -356,7 +479,7 @@
         case DOUBLE:
           return new DoubleRLBE();
         default:
-          throw new UnSupportedDataTypeException("RLBE doesn't support data type: " + type);
+          throw new UnSupportedDataTypeException(String.format(ERROR_MSG, TSEncoding.RLBE, type));
       }
     }
 
@@ -373,7 +496,7 @@
       if (type == TSDataType.TEXT || type == TSDataType.STRING) {
         return new DictionaryEncoder();
       }
-      throw new UnSupportedDataTypeException("DICTIONARY doesn't support data type: " + type);
+      throw new UnSupportedDataTypeException(String.format(ERROR_MSG, TSEncoding.DICTIONARY, type));
     }
 
     @Override
@@ -394,7 +517,7 @@
         case TIMESTAMP:
           return new LongZigzagEncoder();
         default:
-          throw new UnSupportedDataTypeException("ZIGZAG doesn't support data type: " + type);
+          throw new UnSupportedDataTypeException(String.format(ERROR_MSG, TSEncoding.ZIGZAG, type));
       }
     }
 
@@ -421,13 +544,13 @@
         case TIMESTAMP:
           return new LongChimpEncoder();
         default:
-          throw new UnSupportedDataTypeException("CHIMP doesn't support data type: " + type);
+          throw new UnSupportedDataTypeException(String.format(ERROR_MSG, TSEncoding.CHIMP, type));
       }
     }
 
     @Override
     public void initFromProps(Map<String, String> props) {
-      // allowed do nothing
+      // allowed to do nothing
     }
   }
 }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/encrypt/EncryptUtils.java b/java/tsfile/src/main/java/org/apache/tsfile/encrypt/EncryptUtils.java
index a621675..97b7ad5 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/encrypt/EncryptUtils.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/encrypt/EncryptUtils.java
@@ -28,9 +28,6 @@
 import javax.crypto.Mac;
 import javax.crypto.spec.SecretKeySpec;
 
-import java.io.BufferedReader;
-import java.io.FileReader;
-import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.security.InvalidKeyException;
 import java.security.MessageDigest;
@@ -43,8 +40,6 @@
 
   private static final Logger logger = LoggerFactory.getLogger(EncryptUtils.class);
 
-  private static final String defaultKey = "abcdefghijklmnop";
-
   private static final String encryptClassPrefix = "org.apache.tsfile.encrypt.";
 
   private static volatile String normalKeyStr;
@@ -81,47 +76,10 @@
     }
   }
 
-  public static String getEncryptKeyFromPath(String path) {
-    if (path == null) {
-      return defaultKey;
-    }
-    if (path.isEmpty()) {
-      return defaultKey;
-    }
-    try (BufferedReader br = new BufferedReader(new FileReader(path))) {
-      StringBuilder sb = new StringBuilder();
-      String line;
-      boolean first = true;
-      while ((line = br.readLine()) != null) {
-        if (first) {
-          sb.append(line);
-          first = false;
-        } else {
-          sb.append("\n").append(line);
-        }
-      }
-      String str = sb.toString();
-      if (str.isEmpty()) {
-        return defaultKey;
-      }
-      if (str.length() != 16) {
-        throw new EncryptException(
-            "The length of the key("
-                + str
-                + ") in the file is not 16 bytes, please check the key file:"
-                + path);
-      }
-      return str;
-    } catch (IOException e) {
-      throw new EncryptException("Read main encrypt key error", e);
-    }
-  }
-
-  public static byte[] getEncryptKeyFromToken(String token) {
+  public static byte[] getEncryptKeyFromToken(String token, byte[] salt) {
     if (token == null || token.trim().isEmpty()) {
-      return defaultKey.getBytes();
+      return generateSalt();
     }
-    byte[] salt = generateSalt();
     try {
       return deriveKeyInternal(token.getBytes(), salt, ITERATION_COUNT, dkLen);
     } catch (NoSuchAlgorithmException | InvalidKeyException e) {
@@ -184,7 +142,7 @@
     return Mac.getInstance(HMAC_ALGORITHM).getMacLength();
   }
 
-  private static byte[] generateSalt() {
+  public static byte[] generateSalt() {
     byte[] salt = new byte[SALT_LENGTH];
     new SecureRandom().nextBytes(salt);
     return salt;
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/exception/read/FileVersionTooOldException.java b/java/tsfile/src/main/java/org/apache/tsfile/exception/read/FileVersionTooOldException.java
index 2176f7f..ffa1b5f 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/exception/read/FileVersionTooOldException.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/exception/read/FileVersionTooOldException.java
@@ -23,10 +23,10 @@
 
 public class FileVersionTooOldException extends IOException {
 
-  public FileVersionTooOldException(byte currentVersion, byte minimumVersion) {
+  public FileVersionTooOldException(byte currentVersion, byte minimumVersion, byte maximumVersion) {
     super(
         String.format(
-            "The current version %d is too old, please at least upgrade to %d",
-            currentVersion, minimumVersion));
+            "The current version %d is not supported. Currently supported versions are %d to %d.",
+            currentVersion, minimumVersion, maximumVersion));
   }
 }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/AbstractAlignedTimeSeriesMetadata.java b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/AbstractAlignedTimeSeriesMetadata.java
index f793f7c..b7a1525 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/AbstractAlignedTimeSeriesMetadata.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/AbstractAlignedTimeSeriesMetadata.java
@@ -179,16 +179,13 @@
       return true;
     }
     if (valueTimeseriesMetadataList != null) {
-      int notMatchCount = 0;
       for (int i = 0, size = dataTypes.size(); i < size; i++) {
         TimeseriesMetadata valueTimeSeriesMetadata = valueTimeseriesMetadataList.get(i);
         if (valueTimeSeriesMetadata != null
             && !valueTimeSeriesMetadata.typeMatch(dataTypes.get(i))) {
           valueTimeseriesMetadataList.set(i, null);
-          notMatchCount++;
         }
       }
-      return notMatchCount != dataTypes.size();
     }
     return true;
   }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/TsFileMetadata.java b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/TsFileMetadata.java
index c0e6b46..5c77055 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/TsFileMetadata.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/TsFileMetadata.java
@@ -35,6 +35,7 @@
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Objects;
 import java.util.TreeMap;
 
 /** TSFileMetaData collects all metadata info and saves in its data structure. */
@@ -154,6 +155,13 @@
         if (propertiesMap.get("encryptKey") == null || propertiesMap.get("encryptKey").isEmpty()) {
           throw new EncryptException("TsfileMetadata null encryptKey while encryptLevel is 2");
         }
+        if (Objects.equals(
+                TSFileDescriptor.getInstance().getConfig().getEncryptType(),
+                "org.apache.tsfile.encrypt.UNENCRYPTED")
+            || Objects.equals(
+                TSFileDescriptor.getInstance().getConfig().getEncryptType(), "UNENCRYPTED")) {
+          throw new EncryptException("fail to decrypt encrypted tsfile in unencrypted system");
+        }
         IDecryptor decryptor =
             IDecryptor.getDecryptor(
                 propertiesMap.get("encryptType"),
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/enums/TSEncoding.java b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/enums/TSEncoding.java
index 2037437..20d4923 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/enums/TSEncoding.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/enums/TSEncoding.java
@@ -41,7 +41,13 @@
   FREQ((byte) 10),
   CHIMP((byte) 11),
   SPRINTZ((byte) 12),
-  RLBE((byte) 13);
+  RLBE((byte) 13),
+  CAMEL((byte) 14),
+  DESCENDING_BIT_PACKING((byte) 15),
+  SEPARATE_STORAGE((byte) 16),
+  LAMINAR((byte) 17),
+  FLEA((byte) 18);
+
   private final byte type;
 
   @SuppressWarnings("java:S2386") // used by other projects
@@ -63,6 +69,10 @@
     intSet.add(TSEncoding.CHIMP);
     intSet.add(TSEncoding.SPRINTZ);
     intSet.add(TSEncoding.RLBE);
+    intSet.add(TSEncoding.DESCENDING_BIT_PACKING);
+    intSet.add(TSEncoding.SEPARATE_STORAGE);
+    intSet.add(TSEncoding.LAMINAR);
+    intSet.add(TSEncoding.FLEA);
 
     TYPE_SUPPORTED_ENCODINGS.put(TSDataType.INT32, intSet);
     TYPE_SUPPORTED_ENCODINGS.put(TSDataType.INT64, intSet);
@@ -80,7 +90,11 @@
     floatSet.add(TSEncoding.RLBE);
 
     TYPE_SUPPORTED_ENCODINGS.put(TSDataType.FLOAT, floatSet);
-    TYPE_SUPPORTED_ENCODINGS.put(TSDataType.DOUBLE, floatSet);
+
+    Set<TSEncoding> doubleSet = new HashSet<>(floatSet);
+    doubleSet.add(TSEncoding.CAMEL);
+
+    TYPE_SUPPORTED_ENCODINGS.put(TSDataType.DOUBLE, doubleSet);
 
     Set<TSEncoding> textSet = new HashSet<>();
     textSet.add(TSEncoding.PLAIN);
@@ -135,6 +149,16 @@
         return TSEncoding.SPRINTZ;
       case 13:
         return TSEncoding.RLBE;
+      case 14:
+        return TSEncoding.CAMEL;
+      case 15:
+        return TSEncoding.DESCENDING_BIT_PACKING;
+      case 16:
+        return TSEncoding.SEPARATE_STORAGE;
+      case 17:
+        return TSEncoding.LAMINAR;
+      case 18:
+        return TSEncoding.FLEA;
       default:
         throw new IllegalArgumentException("Invalid input: " + encoding);
     }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/DoubleStatistics.java b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/DoubleStatistics.java
index 1d259ca..6d406d5 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/DoubleStatistics.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/DoubleStatistics.java
@@ -129,6 +129,16 @@
   }
 
   @Override
+  public void updateStats(double minValue, double maxValue) {
+    if (minValue < this.minValue) {
+      this.minValue = minValue;
+    }
+    if (maxValue > this.maxValue) {
+      this.maxValue = maxValue;
+    }
+  }
+
+  @Override
   public Double getMinValue() {
     return minValue;
   }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/FloatStatistics.java b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/FloatStatistics.java
index 0d9595a..fd76434 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/FloatStatistics.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/FloatStatistics.java
@@ -120,6 +120,16 @@
   }
 
   @Override
+  public void updateStats(float minValue, float maxValue) {
+    if (minValue < this.minValue) {
+      this.minValue = minValue;
+    }
+    if (maxValue > this.maxValue) {
+      this.maxValue = maxValue;
+    }
+  }
+
+  @Override
   public Float getMinValue() {
     return minValue;
   }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/IntegerStatistics.java b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/IntegerStatistics.java
index 22db0aa..b5cf408 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/IntegerStatistics.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/IntegerStatistics.java
@@ -121,6 +121,16 @@
   }
 
   @Override
+  public void updateStats(int minValue, int maxValue) {
+    if (minValue < this.minValue) {
+      this.minValue = minValue;
+    }
+    if (maxValue > this.maxValue) {
+      this.maxValue = maxValue;
+    }
+  }
+
+  @Override
   public Integer getMinValue() {
     return minValue;
   }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/Statistics.java b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/Statistics.java
index 3fc0f71..b1c2c46 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/Statistics.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/file/metadata/statistics/Statistics.java
@@ -24,6 +24,7 @@
 import org.apache.tsfile.exception.write.UnknownColumnTypeException;
 import org.apache.tsfile.read.filter.basic.Filter;
 import org.apache.tsfile.utils.Binary;
+import org.apache.tsfile.utils.Pair;
 import org.apache.tsfile.utils.ReadWriteForEncodingUtils;
 import org.apache.tsfile.utils.ReadWriteIOUtils;
 
@@ -35,7 +36,9 @@
 import java.io.OutputStream;
 import java.io.Serializable;
 import java.nio.ByteBuffer;
+import java.util.HashSet;
 import java.util.Objects;
+import java.util.Set;
 
 /**
  * This class is used for recording statistic information of each measurement in a delta file. While
@@ -63,6 +66,35 @@
 
   static final String STATS_UNSUPPORTED_MSG = "%s statistics does not support: %s";
 
+  private static final Set<Pair<TSDataType, TSDataType>> CAN_NOT_MERGE_PAIRS;
+
+  static {
+    CAN_NOT_MERGE_PAIRS = new HashSet<>();
+
+    // related pair about STRING
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.INT32, TSDataType.STRING);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.INT64, TSDataType.STRING);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.FLOAT, TSDataType.STRING);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.DOUBLE, TSDataType.STRING);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.BOOLEAN, TSDataType.STRING);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.TIMESTAMP, TSDataType.STRING);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.DATE, TSDataType.STRING);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.BLOB, TSDataType.STRING);
+
+    // related pair about TEXT
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.INT32, TSDataType.TEXT);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.INT64, TSDataType.TEXT);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.FLOAT, TSDataType.TEXT);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.DOUBLE, TSDataType.TEXT);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.BOOLEAN, TSDataType.TEXT);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.TIMESTAMP, TSDataType.TEXT);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.DATE, TSDataType.TEXT);
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.BLOB, TSDataType.TEXT);
+
+    // related pari about TEXT and STRING
+    addSymmetricPairs(CAN_NOT_MERGE_PAIRS, TSDataType.TEXT, TSDataType.STRING);
+  }
+
   /**
    * static method providing statistic instance for respective data type.
    *
@@ -229,7 +261,24 @@
     return to.isCompatible(from)
         &&
         // cannot alter from TEXT to STRING because we cannot add statistic to the existing chunks
-        !(from == TSDataType.TEXT && to == TSDataType.STRING);
+        isSatisfyMerge(from, to);
+  }
+
+  private static void addSymmetricPairs(
+      Set<Pair<TSDataType, TSDataType>> set, TSDataType... dataTypes) {
+    for (int i = 0; i < dataTypes.length; i++) {
+      for (int j = i + 1; j < dataTypes.length; j++) {
+        set.add(new Pair<>(dataTypes[i], dataTypes[j]));
+        set.add(new Pair<>(dataTypes[j], dataTypes[i]));
+      }
+    }
+  }
+
+  public static boolean isSatisfyMerge(TSDataType from, TSDataType to) {
+    if (from == to) {
+      return true;
+    }
+    return !CAN_NOT_MERGE_PAIRS.contains(new Pair<>(from, to));
   }
 
   public void update(long time, boolean value) {
@@ -393,6 +442,18 @@
     throw new UnsupportedOperationException();
   }
 
+  public void updateStats(int min, int max) {
+    throw new UnsupportedOperationException();
+  }
+
+  public void updateStats(float min, float max) {
+    throw new UnsupportedOperationException();
+  }
+
+  public void updateStats(double min, double max) {
+    throw new UnsupportedOperationException();
+  }
+
   public long getStartTime() {
     return startTime;
   }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/TsFileSequenceReader.java b/java/tsfile/src/main/java/org/apache/tsfile/read/TsFileSequenceReader.java
index 033e3d0..4330a60 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/TsFileSequenceReader.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/TsFileSequenceReader.java
@@ -285,8 +285,9 @@
   }
 
   private void checkFileVersion() throws FileVersionTooOldException {
-    if (TSFileConfig.VERSION_NUMBER - fileVersion > 1) {
-      throw new FileVersionTooOldException(fileVersion, (byte) (TSFileConfig.VERSION_NUMBER - 1));
+    if (fileVersion < TSFileConfig.VERSION_NUMBER_V3 || fileVersion > TSFileConfig.VERSION_NUMBER) {
+      throw new FileVersionTooOldException(
+          fileVersion, TSFileConfig.VERSION_NUMBER_V3, TSFileConfig.VERSION_NUMBER);
     }
   }
 
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/UnClosedTsFileReader.java b/java/tsfile/src/main/java/org/apache/tsfile/read/UnClosedTsFileReader.java
index f21b971..ce76fbc 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/UnClosedTsFileReader.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/UnClosedTsFileReader.java
@@ -40,7 +40,7 @@
 
   // ioSizeRecorder can be null
   public UnClosedTsFileReader(
-      String file, EncryptParameter decryptParam, LongConsumer ioSizeRecorder) throws IOException {
+      String file, EncryptParameter encryptParam, LongConsumer ioSizeRecorder) throws IOException {
     super(file, false, ioSizeRecorder);
     this.encryptParam = encryptParam;
   }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/common/block/TsBlockBuilder.java b/java/tsfile/src/main/java/org/apache/tsfile/read/common/block/TsBlockBuilder.java
index 028bffc..0e96fca 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/common/block/TsBlockBuilder.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/common/block/TsBlockBuilder.java
@@ -49,9 +49,6 @@
   // This could be any other small number.
   private static final int DEFAULT_INITIAL_EXPECTED_ENTRIES = 8;
 
-  private static final int DEFAULT_MAX_TSBLOCK_SIZE_IN_BYTES =
-      TSFileDescriptor.getInstance().getConfig().getMaxTsBlockSizeInBytes();
-
   private TimeColumnBuilder timeColumnBuilder;
   private ColumnBuilder[] valueColumnBuilders;
   private List<TSDataType> types;
@@ -77,7 +74,10 @@
   }
 
   public TsBlockBuilder(int initialExpectedEntries, List<TSDataType> types) {
-    this(initialExpectedEntries, DEFAULT_MAX_TSBLOCK_SIZE_IN_BYTES, types);
+    this(
+        initialExpectedEntries,
+        TSFileDescriptor.getInstance().getConfig().getMaxTsBlockSizeInBytes(),
+        types);
   }
 
   public static TsBlockBuilder createWithOnlyTimeColumn() {
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/common/block/column/BinaryColumn.java b/java/tsfile/src/main/java/org/apache/tsfile/read/common/block/column/BinaryColumn.java
index ecc1b66..88e2758 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/common/block/column/BinaryColumn.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/common/block/column/BinaryColumn.java
@@ -32,6 +32,7 @@
 import static org.apache.tsfile.read.common.block.column.ColumnUtil.checkArrayRange;
 import static org.apache.tsfile.read.common.block.column.ColumnUtil.checkReadablePosition;
 import static org.apache.tsfile.read.common.block.column.ColumnUtil.checkValidRegion;
+import static org.apache.tsfile.utils.RamUsageEstimator.NUM_BYTES_OBJECT_REF;
 import static org.apache.tsfile.utils.RamUsageEstimator.sizeOf;
 import static org.apache.tsfile.utils.RamUsageEstimator.sizeOfBooleanArray;
 
@@ -40,6 +41,8 @@
   private static final int INSTANCE_SIZE =
       (int) RamUsageEstimator.shallowSizeOfInstance(BinaryColumn.class);
 
+  public static final int SHALLOW_SIZE_IN_BYTES_PER_POSITION = NUM_BYTES_OBJECT_REF + Byte.BYTES;
+
   private final int arrayOffset;
   private int positionCount;
   private boolean[] valueIsNull;
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/common/block/column/BinaryColumnBuilder.java b/java/tsfile/src/main/java/org/apache/tsfile/read/common/block/column/BinaryColumnBuilder.java
index d9c560d..d6facd4 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/common/block/column/BinaryColumnBuilder.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/common/block/column/BinaryColumnBuilder.java
@@ -28,6 +28,7 @@
 import org.apache.tsfile.utils.TsPrimitiveType;
 import org.apache.tsfile.write.UnSupportedDataTypeException;
 
+import java.nio.charset.StandardCharsets;
 import java.util.Arrays;
 
 import static java.lang.Math.max;
@@ -77,6 +78,11 @@
 
     hasNonNullValue = true;
     positionCount++;
+    if (columnBuilderStatus != null) {
+      columnBuilderStatus.addBytes(
+          BinaryColumn.SHALLOW_SIZE_IN_BYTES_PER_POSITION
+              + (value == null ? 0 : (int) value.ramBytesUsed()));
+    }
     return this;
   }
 
@@ -96,6 +102,35 @@
   }
 
   @Override
+  public ColumnBuilder writeBoolean(boolean value) {
+    return writeBinary(new Binary(String.valueOf(value), StandardCharsets.UTF_8));
+  }
+
+  @Override
+  public ColumnBuilder writeInt(int value) {
+    return writeBinary(new Binary(String.valueOf(value), StandardCharsets.UTF_8));
+  }
+
+  @Override
+  public ColumnBuilder writeLong(long value) {
+    return writeBinary(new Binary(String.valueOf(value), StandardCharsets.UTF_8));
+  }
+
+  @Override
+  public ColumnBuilder writeFloat(float value) {
+    return writeBinary(new Binary(String.valueOf(value), StandardCharsets.UTF_8));
+  }
+
+  @Override
+  public ColumnBuilder writeDouble(double value) {
+    return writeBinary(new Binary(String.valueOf(value), StandardCharsets.UTF_8));
+  }
+
+  public ColumnBuilder writeDate(int value) {
+    return writeBinary(new Binary(TSDataType.getDateStringValue(value), StandardCharsets.UTF_8));
+  }
+
+  @Override
   public ColumnBuilder writeTsPrimitiveType(TsPrimitiveType value) {
     return writeBinary(value.getBinary());
   }
@@ -110,6 +145,9 @@
 
     hasNullValue = true;
     positionCount++;
+    if (columnBuilderStatus != null) {
+      columnBuilderStatus.addBytes(BinaryColumn.SHALLOW_SIZE_IN_BYTES_PER_POSITION);
+    }
     return this;
   }
 
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/common/type/UnknownType.java b/java/tsfile/src/main/java/org/apache/tsfile/read/common/type/UnknownType.java
index 818019c..802165d 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/common/type/UnknownType.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/common/type/UnknownType.java
@@ -28,7 +28,7 @@
 
 import static org.apache.tsfile.utils.Preconditions.checkArgument;
 
-public class UnknownType implements Type {
+public class UnknownType extends AbstractType {
   public static final UnknownType UNKNOWN = new UnknownType();
   public static final String NAME = "unknown";
 
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/filter/basic/Filter.java b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/basic/Filter.java
index cc48210..00801fd 100755
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/filter/basic/Filter.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/basic/Filter.java
@@ -26,6 +26,8 @@
 import org.apache.tsfile.read.filter.factory.TimeFilterApi;
 import org.apache.tsfile.read.filter.factory.ValueFilterApi;
 import org.apache.tsfile.read.filter.operator.And;
+import org.apache.tsfile.read.filter.operator.ExtractTimeFilterOperators;
+import org.apache.tsfile.read.filter.operator.ExtractValueFilterOperators;
 import org.apache.tsfile.read.filter.operator.GroupByFilter;
 import org.apache.tsfile.read.filter.operator.GroupByMonthFilter;
 import org.apache.tsfile.read.filter.operator.Not;
@@ -245,6 +247,30 @@
         return new Or(buffer);
       case NOT:
         return new Not(buffer);
+      case EXTRACT_TIME_EQ:
+        return new ExtractTimeFilterOperators.ExtractTimeEq(buffer);
+      case EXTRACT_TIME_NEQ:
+        return new ExtractTimeFilterOperators.ExtractTimeNotEq(buffer);
+      case EXTRACT_TIME_GT:
+        return new ExtractTimeFilterOperators.ExtractTimeGt(buffer);
+      case EXTRACT_TIME_GTEQ:
+        return new ExtractTimeFilterOperators.ExtractTimeGtEq(buffer);
+      case EXTRACT_TIME_LT:
+        return new ExtractTimeFilterOperators.ExtractTimeLt(buffer);
+      case EXTRACT_TIME_LTEQ:
+        return new ExtractTimeFilterOperators.ExtractTimeLtEq(buffer);
+      case EXTRACT_VALUE_EQ:
+        return new ExtractValueFilterOperators.ExtractValueEq(buffer);
+      case EXTRACT_VALUE_NEQ:
+        return new ExtractValueFilterOperators.ExtractValueNotEq(buffer);
+      case EXTRACT_VALUE_GT:
+        return new ExtractValueFilterOperators.ExtractValueGt(buffer);
+      case EXTRACT_VALUE_GTEQ:
+        return new ExtractValueFilterOperators.ExtractValueGtEq(buffer);
+      case EXTRACT_VALUE_LT:
+        return new ExtractValueFilterOperators.ExtractValueLt(buffer);
+      case EXTRACT_VALUE_LTEQ:
+        return new ExtractValueFilterOperators.ExtractValueLtEq(buffer);
       default:
         throw new UnsupportedOperationException("Unsupported operator type:" + type);
     }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/filter/basic/OperatorType.java b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/basic/OperatorType.java
index db8be98..d898e00 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/filter/basic/OperatorType.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/basic/OperatorType.java
@@ -65,7 +65,21 @@
 
   // is null
   VALUE_IS_NULL("IS NULL"),
-  VALUE_IS_NOT_NULL("IS NOT NULL");
+  VALUE_IS_NOT_NULL("IS NOT NULL"),
+
+  // extract comparison
+  EXTRACT_TIME_EQ("="),
+  EXTRACT_TIME_NEQ("!="),
+  EXTRACT_TIME_GT(">"),
+  EXTRACT_TIME_GTEQ(">="),
+  EXTRACT_TIME_LT("<"),
+  EXTRACT_TIME_LTEQ("<="),
+  EXTRACT_VALUE_EQ("="),
+  EXTRACT_VALUE_NEQ("!="),
+  EXTRACT_VALUE_GT(">"),
+  EXTRACT_VALUE_GTEQ(">="),
+  EXTRACT_VALUE_LT("<"),
+  EXTRACT_VALUE_LTEQ("<=");
 
   private final String symbol;
 
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/filter/basic/TimeFilter.java b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/basic/TimeFilter.java
index 18d444d..f1789b4 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/filter/basic/TimeFilter.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/basic/TimeFilter.java
@@ -137,7 +137,7 @@
     return satisfyInfo;
   }
 
-  protected abstract boolean timeSatisfy(long time);
+  public abstract boolean timeSatisfy(long time);
 
   @Override
   public boolean canSkip(IMetadata metadata) {
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/filter/factory/TimeFilterApi.java b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/factory/TimeFilterApi.java
index ce9f612..3ba5505 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/filter/factory/TimeFilterApi.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/factory/TimeFilterApi.java
@@ -19,6 +19,13 @@
 
 package org.apache.tsfile.read.filter.factory;
 
+import org.apache.tsfile.read.filter.operator.ExtractTimeFilterOperators.ExtractTimeEq;
+import org.apache.tsfile.read.filter.operator.ExtractTimeFilterOperators.ExtractTimeGt;
+import org.apache.tsfile.read.filter.operator.ExtractTimeFilterOperators.ExtractTimeGtEq;
+import org.apache.tsfile.read.filter.operator.ExtractTimeFilterOperators.ExtractTimeLt;
+import org.apache.tsfile.read.filter.operator.ExtractTimeFilterOperators.ExtractTimeLtEq;
+import org.apache.tsfile.read.filter.operator.ExtractTimeFilterOperators.ExtractTimeNotEq;
+import org.apache.tsfile.read.filter.operator.ExtractTimeFilterOperators.Field;
 import org.apache.tsfile.read.filter.operator.GroupByFilter;
 import org.apache.tsfile.read.filter.operator.GroupByMonthFilter;
 import org.apache.tsfile.read.filter.operator.TimeFilterOperators.TimeBetweenAnd;
@@ -33,6 +40,7 @@
 import org.apache.tsfile.read.filter.operator.TimeFilterOperators.TimeNotIn;
 import org.apache.tsfile.utils.TimeDuration;
 
+import java.time.ZoneId;
 import java.util.Set;
 import java.util.TimeZone;
 import java.util.concurrent.TimeUnit;
@@ -98,4 +106,34 @@
     return new GroupByMonthFilter(
         startTime, endTime, interval, slidingStep, timeZone, currPrecision);
   }
+
+  public static ExtractTimeGt extractTimeGt(
+      long value, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+    return new ExtractTimeGt(value, field, zoneId, currPrecision);
+  }
+
+  public static ExtractTimeGtEq extractTimeGtEq(
+      long value, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+    return new ExtractTimeGtEq(value, field, zoneId, currPrecision);
+  }
+
+  public static ExtractTimeLt extractTimeLt(
+      long value, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+    return new ExtractTimeLt(value, field, zoneId, currPrecision);
+  }
+
+  public static ExtractTimeLtEq extractTimeLtEq(
+      long value, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+    return new ExtractTimeLtEq(value, field, zoneId, currPrecision);
+  }
+
+  public static ExtractTimeEq extractTimeEq(
+      long value, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+    return new ExtractTimeEq(value, field, zoneId, currPrecision);
+  }
+
+  public static ExtractTimeNotEq extractTimeNotEq(
+      long value, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+    return new ExtractTimeNotEq(value, field, zoneId, currPrecision);
+  }
 }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/filter/factory/ValueFilterApi.java b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/factory/ValueFilterApi.java
index 53c4b74..745d78a 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/filter/factory/ValueFilterApi.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/factory/ValueFilterApi.java
@@ -25,6 +25,8 @@
 import org.apache.tsfile.read.filter.operator.BinaryFilterOperators;
 import org.apache.tsfile.read.filter.operator.BooleanFilterOperators;
 import org.apache.tsfile.read.filter.operator.DoubleFilterOperators;
+import org.apache.tsfile.read.filter.operator.ExtractTimeFilterOperators;
+import org.apache.tsfile.read.filter.operator.ExtractValueFilterOperators;
 import org.apache.tsfile.read.filter.operator.FloatFilterOperators;
 import org.apache.tsfile.read.filter.operator.IntegerFilterOperators;
 import org.apache.tsfile.read.filter.operator.LongFilterOperators;
@@ -33,8 +35,10 @@
 import org.apache.tsfile.read.filter.operator.ValueIsNullOperator;
 import org.apache.tsfile.utils.Binary;
 
+import java.time.ZoneId;
 import java.util.Objects;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 import java.util.regex.Pattern;
 
 public class ValueFilterApi {
@@ -56,14 +60,14 @@
         return new BooleanFilterOperators.ValueGt(measurementIndex, (boolean) value);
       case INT32:
       case DATE:
-        return new IntegerFilterOperators.ValueGt(measurementIndex, (int) value);
+        return new IntegerFilterOperators.ValueGt(measurementIndex, ((Number) value).intValue());
       case INT64:
       case TIMESTAMP:
-        return new LongFilterOperators.ValueGt(measurementIndex, (long) value);
+        return new LongFilterOperators.ValueGt(measurementIndex, ((Number) value).longValue());
       case DOUBLE:
-        return new DoubleFilterOperators.ValueGt(measurementIndex, (double) value);
+        return new DoubleFilterOperators.ValueGt(measurementIndex, ((Number) value).doubleValue());
       case FLOAT:
-        return new FloatFilterOperators.ValueGt(measurementIndex, (float) value);
+        return new FloatFilterOperators.ValueGt(measurementIndex, ((Number) value).floatValue());
       case TEXT:
       case BLOB:
         return new BinaryFilterOperators.ValueGt(measurementIndex, (Binary) value);
@@ -82,14 +86,15 @@
         return new BooleanFilterOperators.ValueGtEq(measurementIndex, (boolean) value);
       case INT32:
       case DATE:
-        return new IntegerFilterOperators.ValueGtEq(measurementIndex, (int) value);
+        return new IntegerFilterOperators.ValueGtEq(measurementIndex, ((Number) value).intValue());
       case INT64:
       case TIMESTAMP:
-        return new LongFilterOperators.ValueGtEq(measurementIndex, (long) value);
+        return new LongFilterOperators.ValueGtEq(measurementIndex, ((Number) value).longValue());
       case DOUBLE:
-        return new DoubleFilterOperators.ValueGtEq(measurementIndex, (double) value);
+        return new DoubleFilterOperators.ValueGtEq(
+            measurementIndex, ((Number) value).doubleValue());
       case FLOAT:
-        return new FloatFilterOperators.ValueGtEq(measurementIndex, (float) value);
+        return new FloatFilterOperators.ValueGtEq(measurementIndex, ((Number) value).floatValue());
       case TEXT:
       case BLOB:
         return new BinaryFilterOperators.ValueGtEq(measurementIndex, (Binary) value);
@@ -108,14 +113,14 @@
         return new BooleanFilterOperators.ValueLt(measurementIndex, (boolean) value);
       case INT32:
       case DATE:
-        return new IntegerFilterOperators.ValueLt(measurementIndex, (int) value);
+        return new IntegerFilterOperators.ValueLt(measurementIndex, ((Number) value).intValue());
       case INT64:
       case TIMESTAMP:
-        return new LongFilterOperators.ValueLt(measurementIndex, (long) value);
+        return new LongFilterOperators.ValueLt(measurementIndex, ((Number) value).longValue());
       case DOUBLE:
-        return new DoubleFilterOperators.ValueLt(measurementIndex, (double) value);
+        return new DoubleFilterOperators.ValueLt(measurementIndex, ((Number) value).doubleValue());
       case FLOAT:
-        return new FloatFilterOperators.ValueLt(measurementIndex, (float) value);
+        return new FloatFilterOperators.ValueLt(measurementIndex, ((Number) value).floatValue());
       case TEXT:
       case BLOB:
         return new BinaryFilterOperators.ValueLt(measurementIndex, (Binary) value);
@@ -134,14 +139,15 @@
         return new BooleanFilterOperators.ValueLtEq(measurementIndex, (boolean) value);
       case INT32:
       case DATE:
-        return new IntegerFilterOperators.ValueLtEq(measurementIndex, (int) value);
+        return new IntegerFilterOperators.ValueLtEq(measurementIndex, ((Number) value).intValue());
       case INT64:
       case TIMESTAMP:
-        return new LongFilterOperators.ValueLtEq(measurementIndex, (long) value);
+        return new LongFilterOperators.ValueLtEq(measurementIndex, ((Number) value).longValue());
       case DOUBLE:
-        return new DoubleFilterOperators.ValueLtEq(measurementIndex, (double) value);
+        return new DoubleFilterOperators.ValueLtEq(
+            measurementIndex, ((Number) value).doubleValue());
       case FLOAT:
-        return new FloatFilterOperators.ValueLtEq(measurementIndex, (float) value);
+        return new FloatFilterOperators.ValueLtEq(measurementIndex, ((Number) value).floatValue());
       case TEXT:
       case BLOB:
         return new BinaryFilterOperators.ValueLtEq(measurementIndex, (Binary) value);
@@ -160,14 +166,14 @@
         return new BooleanFilterOperators.ValueEq(measurementIndex, (boolean) value);
       case INT32:
       case DATE:
-        return new IntegerFilterOperators.ValueEq(measurementIndex, (int) value);
+        return new IntegerFilterOperators.ValueEq(measurementIndex, ((Number) value).intValue());
       case INT64:
       case TIMESTAMP:
-        return new LongFilterOperators.ValueEq(measurementIndex, (long) value);
+        return new LongFilterOperators.ValueEq(measurementIndex, ((Number) value).longValue());
       case DOUBLE:
-        return new DoubleFilterOperators.ValueEq(measurementIndex, (double) value);
+        return new DoubleFilterOperators.ValueEq(measurementIndex, ((Number) value).doubleValue());
       case FLOAT:
-        return new FloatFilterOperators.ValueEq(measurementIndex, (float) value);
+        return new FloatFilterOperators.ValueEq(measurementIndex, ((Number) value).floatValue());
       case TEXT:
       case BLOB:
         return new BinaryFilterOperators.ValueEq(measurementIndex, (Binary) value);
@@ -186,14 +192,15 @@
         return new BooleanFilterOperators.ValueNotEq(measurementIndex, (boolean) value);
       case INT32:
       case DATE:
-        return new IntegerFilterOperators.ValueNotEq(measurementIndex, (int) value);
+        return new IntegerFilterOperators.ValueNotEq(measurementIndex, ((Number) value).intValue());
       case INT64:
       case TIMESTAMP:
-        return new LongFilterOperators.ValueNotEq(measurementIndex, (long) value);
+        return new LongFilterOperators.ValueNotEq(measurementIndex, ((Number) value).longValue());
       case DOUBLE:
-        return new DoubleFilterOperators.ValueNotEq(measurementIndex, (double) value);
+        return new DoubleFilterOperators.ValueNotEq(
+            measurementIndex, ((Number) value).doubleValue());
       case FLOAT:
-        return new FloatFilterOperators.ValueNotEq(measurementIndex, (float) value);
+        return new FloatFilterOperators.ValueNotEq(measurementIndex, ((Number) value).floatValue());
       case TEXT:
       case BLOB:
         return new BinaryFilterOperators.ValueNotEq(measurementIndex, (Binary) value);
@@ -224,17 +231,17 @@
       case INT32:
       case DATE:
         return new IntegerFilterOperators.ValueBetweenAnd(
-            measurementIndex, (int) value1, (int) value2);
+            measurementIndex, ((Number) value1).intValue(), ((Number) value2).intValue());
       case INT64:
       case TIMESTAMP:
         return new LongFilterOperators.ValueBetweenAnd(
-            measurementIndex, (long) value1, (long) value2);
+            measurementIndex, ((Number) value1).longValue(), ((Number) value2).longValue());
       case DOUBLE:
         return new DoubleFilterOperators.ValueBetweenAnd(
-            measurementIndex, (double) value1, (double) value2);
+            measurementIndex, ((Number) value1).doubleValue(), ((Number) value2).doubleValue());
       case FLOAT:
         return new FloatFilterOperators.ValueBetweenAnd(
-            measurementIndex, (float) value1, (float) value2);
+            measurementIndex, ((Number) value1).floatValue(), ((Number) value2).floatValue());
       case TEXT:
       case BLOB:
         return new BinaryFilterOperators.ValueBetweenAnd(
@@ -259,17 +266,17 @@
       case INT32:
       case DATE:
         return new IntegerFilterOperators.ValueNotBetweenAnd(
-            measurementIndex, (int) value1, (int) value2);
+            measurementIndex, ((Number) value1).intValue(), ((Number) value2).intValue());
       case INT64:
       case TIMESTAMP:
         return new LongFilterOperators.ValueNotBetweenAnd(
-            measurementIndex, (long) value1, (long) value2);
+            measurementIndex, ((Number) value1).longValue(), ((Number) value2).longValue());
       case DOUBLE:
         return new DoubleFilterOperators.ValueNotBetweenAnd(
-            measurementIndex, (double) value1, (double) value2);
+            measurementIndex, ((Number) value1).doubleValue(), ((Number) value2).doubleValue());
       case FLOAT:
         return new FloatFilterOperators.ValueNotBetweenAnd(
-            measurementIndex, (float) value1, (float) value2);
+            measurementIndex, ((Number) value1).floatValue(), ((Number) value2).floatValue());
       case TEXT:
       case BLOB:
         return new BinaryFilterOperators.ValueNotBetweenAnd(
@@ -435,4 +442,64 @@
         throw new UnsupportedOperationException("Unsupported data type: " + type);
     }
   }
+
+  public static Filter extractValueGt(
+      int measurementIndex,
+      long value,
+      ExtractTimeFilterOperators.Field field,
+      ZoneId zoneId,
+      TimeUnit currPrecision) {
+    return new ExtractValueFilterOperators.ExtractValueGt(
+        measurementIndex, value, field, zoneId, currPrecision);
+  }
+
+  public static Filter extractValueGtEq(
+      int measurementIndex,
+      long value,
+      ExtractTimeFilterOperators.Field field,
+      ZoneId zoneId,
+      TimeUnit currPrecision) {
+    return new ExtractValueFilterOperators.ExtractValueGtEq(
+        measurementIndex, value, field, zoneId, currPrecision);
+  }
+
+  public static Filter extractValueLt(
+      int measurementIndex,
+      long value,
+      ExtractTimeFilterOperators.Field field,
+      ZoneId zoneId,
+      TimeUnit currPrecision) {
+    return new ExtractValueFilterOperators.ExtractValueLt(
+        measurementIndex, value, field, zoneId, currPrecision);
+  }
+
+  public static Filter extractValueLtEq(
+      int measurementIndex,
+      long value,
+      ExtractTimeFilterOperators.Field field,
+      ZoneId zoneId,
+      TimeUnit currPrecision) {
+    return new ExtractValueFilterOperators.ExtractValueLtEq(
+        measurementIndex, value, field, zoneId, currPrecision);
+  }
+
+  public static Filter extractValueEq(
+      int measurementIndex,
+      long value,
+      ExtractTimeFilterOperators.Field field,
+      ZoneId zoneId,
+      TimeUnit currPrecision) {
+    return new ExtractValueFilterOperators.ExtractValueEq(
+        measurementIndex, value, field, zoneId, currPrecision);
+  }
+
+  public static Filter extractValueNotEq(
+      int measurementIndex,
+      long value,
+      ExtractTimeFilterOperators.Field field,
+      ZoneId zoneId,
+      TimeUnit currPrecision) {
+    return new ExtractValueFilterOperators.ExtractValueNotEq(
+        measurementIndex, value, field, zoneId, currPrecision);
+  }
 }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/filter/operator/ExtractTimeFilterOperators.java b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/operator/ExtractTimeFilterOperators.java
new file mode 100644
index 0000000..3c93af5
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/operator/ExtractTimeFilterOperators.java
@@ -0,0 +1,632 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.read.filter.operator;
+
+import org.apache.tsfile.read.common.TimeRange;
+import org.apache.tsfile.read.filter.basic.Filter;
+import org.apache.tsfile.read.filter.basic.OperatorType;
+import org.apache.tsfile.read.filter.basic.TimeFilter;
+import org.apache.tsfile.read.filter.factory.TimeFilterApi;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.time.DayOfWeek;
+import java.time.Instant;
+import java.time.LocalDate;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+import java.time.temporal.ChronoUnit;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiFunction;
+import java.util.function.Function;
+
+import static java.time.temporal.ChronoField.ALIGNED_WEEK_OF_YEAR;
+
+/**
+ * These are the extract time column operators in a filter predicate expression tree. They are
+ * constructed by using the methods in {@link TimeFilterApi}
+ */
+public final class ExtractTimeFilterOperators {
+
+  public enum Field {
+    YEAR,
+    QUARTER,
+    MONTH,
+    WEEK,
+    DAY,
+    DAY_OF_MONTH,
+    DAY_OF_WEEK,
+    DOW,
+    DAY_OF_YEAR,
+    DOY,
+    HOUR,
+    MINUTE,
+    SECOND,
+    MS,
+    US,
+    NS
+  }
+
+  private ExtractTimeFilterOperators() {
+    // forbidden construction
+  }
+
+  private static final String OPERATOR_TO_STRING_FORMAT = "extract %s from time %s %s";
+
+  abstract static class ExtractTimeCompareFilter extends TimeFilter {
+    protected final long constant;
+
+    protected final Field field;
+    protected final ZoneId zoneId;
+    protected final TimeUnit currPrecision;
+
+    private final transient Function<Long, Long> CAST_TIMESTAMP_TO_MS;
+    private final transient Function<Long, Long> EXTRACT_TIMESTAMP_MS_PART;
+    private final transient Function<Long, Long> EXTRACT_TIMESTAMP_US_PART;
+    private final transient Function<Long, Long> EXTRACT_TIMESTAMP_NS_PART;
+    protected final transient Function<Integer, Long> GET_YEAR_TIMESTAMP;
+
+    // calculate extraction of time
+    protected final transient Function<Long, Long> evaluateFunction;
+    // calculate if the truncations of input times are the same
+    protected final transient BiFunction<Long, Long, Boolean> truncatedEqualsFunction;
+
+    // constant cannot be null
+    protected ExtractTimeCompareFilter(
+        long constant, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+      this.constant = constant;
+      this.field = field;
+      this.zoneId = zoneId;
+      this.currPrecision = currPrecision;
+      // make lifestyles of these functions are same with object to avoid switch case in calculation
+      switch (currPrecision) {
+        case MICROSECONDS:
+          CAST_TIMESTAMP_TO_MS = timestamp -> timestamp / 1000;
+          EXTRACT_TIMESTAMP_MS_PART = timestamp -> Math.floorMod(timestamp, 1000_000L) / 1000;
+          EXTRACT_TIMESTAMP_US_PART = timestamp -> Math.floorMod(timestamp, 1000L);
+          EXTRACT_TIMESTAMP_NS_PART = timestamp -> 0L;
+          GET_YEAR_TIMESTAMP =
+              year ->
+                  Math.multiplyExact(
+                      LocalDate.of(year, 1, 1).atStartOfDay(zoneId).toEpochSecond(), 1000_000L);
+          break;
+        case NANOSECONDS:
+          CAST_TIMESTAMP_TO_MS = timestamp -> timestamp / 1000000;
+          EXTRACT_TIMESTAMP_MS_PART =
+              timestamp -> Math.floorMod(timestamp, 1000_000_000L) / 1000_000;
+          EXTRACT_TIMESTAMP_US_PART = timestamp -> Math.floorMod(timestamp, 1000_000L) / 1000;
+          EXTRACT_TIMESTAMP_NS_PART = timestamp -> Math.floorMod(timestamp, 1000L);
+          GET_YEAR_TIMESTAMP =
+              year ->
+                  Math.multiplyExact(
+                      LocalDate.of(year, 1, 1).atStartOfDay(zoneId).toEpochSecond(), 1000_000_000L);
+          break;
+        case MILLISECONDS:
+        default:
+          CAST_TIMESTAMP_TO_MS = timestamp -> timestamp;
+          EXTRACT_TIMESTAMP_MS_PART = timestamp -> Math.floorMod(timestamp, 1000L);
+          EXTRACT_TIMESTAMP_US_PART = timestamp -> 0L;
+          EXTRACT_TIMESTAMP_NS_PART = timestamp -> 0L;
+          GET_YEAR_TIMESTAMP =
+              year ->
+                  Math.multiplyExact(
+                      LocalDate.of(year, 1, 1).atStartOfDay(zoneId).toEpochSecond(), 1000L);
+          break;
+      }
+      evaluateFunction = constructEvaluateFunction(field, zoneId);
+      truncatedEqualsFunction = constructTruncatedEqualsFunction(field, zoneId);
+    }
+
+    protected Function<Long, Long> constructEvaluateFunction(Field field, ZoneId zoneId) {
+      switch (field) {
+        case YEAR:
+          return timestamp -> (long) convertToZonedDateTime(timestamp, zoneId).getYear();
+        case QUARTER:
+          return timestamp -> (convertToZonedDateTime(timestamp, zoneId).getMonthValue() + 2L) / 3L;
+        case MONTH:
+          return timestamp -> (long) convertToZonedDateTime(timestamp, zoneId).getMonthValue();
+        case WEEK:
+          return timestamp ->
+              convertToZonedDateTime(timestamp, zoneId).getLong(ALIGNED_WEEK_OF_YEAR);
+        case DAY:
+        case DAY_OF_MONTH:
+          return timestamp -> (long) convertToZonedDateTime(timestamp, zoneId).getDayOfMonth();
+        case DAY_OF_WEEK:
+        case DOW:
+          return timestamp ->
+              (long) convertToZonedDateTime(timestamp, zoneId).getDayOfWeek().getValue();
+        case DAY_OF_YEAR:
+        case DOY:
+          return timestamp -> (long) convertToZonedDateTime(timestamp, zoneId).getDayOfYear();
+        case HOUR:
+          return timestamp -> (long) convertToZonedDateTime(timestamp, zoneId).getHour();
+        case MINUTE:
+          return timestamp -> (long) convertToZonedDateTime(timestamp, zoneId).getMinute();
+        case SECOND:
+          return timestamp -> (long) convertToZonedDateTime(timestamp, zoneId).getSecond();
+        case MS:
+          return EXTRACT_TIMESTAMP_MS_PART;
+        case US:
+          return EXTRACT_TIMESTAMP_US_PART;
+        case NS:
+          return EXTRACT_TIMESTAMP_NS_PART;
+        default:
+          throw new UnsupportedOperationException("Unexpected extract field: " + field);
+      }
+    }
+
+    /** Truncate timestamps to based unit then compare */
+    protected BiFunction<Long, Long, Boolean> constructTruncatedEqualsFunction(
+        Field field, ZoneId zoneId) {
+      switch (field) {
+        case YEAR:
+          return (timestamp1, timestamp2) -> true;
+          // base YEAR
+        case QUARTER:
+        case MONTH:
+        case WEEK:
+        case DAY_OF_YEAR:
+        case DOY:
+          return (timestamp1, timestamp2) ->
+              convertToZonedDateTime(timestamp1, zoneId)
+                  .withMonth(1)
+                  .withDayOfMonth(1)
+                  .truncatedTo(ChronoUnit.DAYS)
+                  .equals(
+                      convertToZonedDateTime(timestamp2, zoneId)
+                          .withMonth(1)
+                          .withDayOfMonth(1)
+                          .truncatedTo(ChronoUnit.DAYS));
+          // base MONTH
+        case DAY:
+        case DAY_OF_MONTH:
+          return (timestamp1, timestamp2) ->
+              convertToZonedDateTime(timestamp1, zoneId)
+                  .withDayOfMonth(1)
+                  .truncatedTo(ChronoUnit.DAYS)
+                  .equals(
+                      convertToZonedDateTime(timestamp2, zoneId)
+                          .withDayOfMonth(1)
+                          .truncatedTo(ChronoUnit.DAYS));
+          // base WEEK
+        case DAY_OF_WEEK:
+        case DOW:
+          return (timestamp1, timestamp2) ->
+              convertToZonedDateTime(timestamp1, zoneId)
+                  .with(DayOfWeek.MONDAY)
+                  .truncatedTo(ChronoUnit.DAYS)
+                  .equals(
+                      convertToZonedDateTime(timestamp2, zoneId)
+                          .with(DayOfWeek.MONDAY)
+                          .truncatedTo(ChronoUnit.DAYS));
+          // base DAY
+        case HOUR:
+          return (timestamp1, timestamp2) ->
+              convertToZonedDateTime(timestamp1, zoneId)
+                  .truncatedTo(ChronoUnit.DAYS)
+                  .equals(convertToZonedDateTime(timestamp2, zoneId).truncatedTo(ChronoUnit.DAYS));
+          // base HOUR
+        case MINUTE:
+          return (timestamp1, timestamp2) ->
+              convertToZonedDateTime(timestamp1, zoneId)
+                  .truncatedTo(ChronoUnit.HOURS)
+                  .equals(convertToZonedDateTime(timestamp2, zoneId).truncatedTo(ChronoUnit.HOURS));
+          // base MINUTE
+        case SECOND:
+          return (timestamp1, timestamp2) ->
+              convertToZonedDateTime(timestamp1, zoneId)
+                  .truncatedTo(ChronoUnit.MINUTES)
+                  .equals(
+                      convertToZonedDateTime(timestamp2, zoneId).truncatedTo(ChronoUnit.MINUTES));
+          // base SECOND
+        case MS:
+          return (timestamp1, timestamp2) ->
+              convertToZonedDateTime(timestamp1, zoneId)
+                  .truncatedTo(ChronoUnit.SECONDS)
+                  .equals(
+                      convertToZonedDateTime(timestamp2, zoneId).truncatedTo(ChronoUnit.SECONDS));
+          // base MS
+        case US:
+          return (timestamp1, timestamp2) ->
+              convertToZonedDateTime(timestamp1, zoneId)
+                  .truncatedTo(ChronoUnit.MILLIS)
+                  .equals(
+                      convertToZonedDateTime(timestamp2, zoneId).truncatedTo(ChronoUnit.MILLIS));
+          // base US
+        case NS:
+          return (timestamp1, timestamp2) ->
+              convertToZonedDateTime(timestamp1, zoneId)
+                  .truncatedTo(ChronoUnit.MICROS)
+                  .equals(
+                      convertToZonedDateTime(timestamp2, zoneId).truncatedTo(ChronoUnit.MICROS));
+        default:
+          throw new UnsupportedOperationException("Unexpected extract field: " + field);
+      }
+    }
+
+    private ZonedDateTime convertToZonedDateTime(long timestamp, ZoneId zoneId) {
+      timestamp = CAST_TIMESTAMP_TO_MS.apply(timestamp);
+      return ZonedDateTime.ofInstant(Instant.ofEpochMilli(timestamp), zoneId);
+    }
+
+    protected ExtractTimeCompareFilter(ByteBuffer buffer) {
+      this(
+          ReadWriteIOUtils.readLong(buffer),
+          Field.values()[ReadWriteIOUtils.readInt(buffer)],
+          ZoneId.of(Objects.requireNonNull(ReadWriteIOUtils.readString(buffer))),
+          TimeUnit.values()[ReadWriteIOUtils.readInt(buffer)]);
+    }
+
+    public long getConstant() {
+      return constant;
+    }
+
+    public Field getField() {
+      return field;
+    }
+
+    public TimeUnit getCurrPrecision() {
+      return currPrecision;
+    }
+
+    public ZoneId getZoneId() {
+      return zoneId;
+    }
+
+    @Override
+    public void serialize(DataOutputStream outputStream) throws IOException {
+      super.serialize(outputStream);
+      ReadWriteIOUtils.write(constant, outputStream);
+      ReadWriteIOUtils.write(field.ordinal(), outputStream);
+      ReadWriteIOUtils.write(zoneId.getId(), outputStream);
+      ReadWriteIOUtils.write(currPrecision.ordinal(), outputStream);
+    }
+
+    @Override
+    public List<TimeRange> getTimeRanges() {
+      return Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE));
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+      ExtractTimeCompareFilter that = (ExtractTimeCompareFilter) o;
+      return constant == that.constant
+          && zoneId.equals(that.zoneId)
+          && currPrecision == that.currPrecision;
+    }
+
+    @Override
+    public int hashCode() {
+      return Objects.hash(constant, field, zoneId, currPrecision);
+    }
+
+    @Override
+    public String toString() {
+      return String.format(
+          OPERATOR_TO_STRING_FORMAT, field, getOperatorType().getSymbol(), constant);
+    }
+  }
+
+  public static final class ExtractTimeEq extends ExtractTimeCompareFilter {
+
+    public ExtractTimeEq(long constant, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+      super(constant, field, zoneId, currPrecision);
+    }
+
+    public ExtractTimeEq(ByteBuffer buffer) {
+      super(buffer);
+    }
+
+    @Override
+    public boolean timeSatisfy(long time) {
+      return evaluateFunction.apply(time) == constant;
+    }
+
+    @Override
+    public boolean satisfyStartEndTime(long startTime, long endTime) {
+      return !(truncatedEqualsFunction.apply(startTime, endTime)
+          && (evaluateFunction.apply(endTime) < constant
+              || evaluateFunction.apply(startTime) > constant));
+    }
+
+    @Override
+    public boolean containStartEndTime(long startTime, long endTime) {
+      return truncatedEqualsFunction.apply(startTime, endTime)
+          && evaluateFunction.apply(startTime) == constant
+          && evaluateFunction.apply(endTime) == constant;
+    }
+
+    @Override
+    public List<TimeRange> getTimeRanges() {
+      if (field == Field.YEAR) {
+        int year = (int) constant;
+        return Collections.singletonList(
+            new TimeRange(GET_YEAR_TIMESTAMP.apply(year), GET_YEAR_TIMESTAMP.apply(year + 1) - 1));
+      }
+      return Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE));
+    }
+
+    @Override
+    public Filter reverse() {
+      return new ExtractTimeNotEq(constant, field, zoneId, currPrecision);
+    }
+
+    @Override
+    public OperatorType getOperatorType() {
+      return OperatorType.EXTRACT_TIME_EQ;
+    }
+  }
+
+  public static final class ExtractTimeNotEq extends ExtractTimeCompareFilter {
+
+    public ExtractTimeNotEq(long constant, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+      super(constant, field, zoneId, currPrecision);
+    }
+
+    public ExtractTimeNotEq(ByteBuffer buffer) {
+      super(buffer);
+    }
+
+    @Override
+    public boolean timeSatisfy(long time) {
+      return evaluateFunction.apply(time) != constant;
+    }
+
+    @Override
+    public boolean satisfyStartEndTime(long startTime, long endTime) {
+      return !(truncatedEqualsFunction.apply(startTime, endTime)
+          && evaluateFunction.apply(startTime) == constant
+          && evaluateFunction.apply(endTime) == constant);
+    }
+
+    @Override
+    public boolean containStartEndTime(long startTime, long endTime) {
+      return truncatedEqualsFunction.apply(startTime, endTime)
+          && (evaluateFunction.apply(startTime) > constant
+              || evaluateFunction.apply(endTime) < constant);
+    }
+
+    @Override
+    public List<TimeRange> getTimeRanges() {
+      if (field == Field.YEAR) {
+        List<TimeRange> res = new ArrayList<>();
+        int year = (int) constant;
+        res.add(new TimeRange(Long.MIN_VALUE, GET_YEAR_TIMESTAMP.apply(year) - 1));
+        res.add(new TimeRange(GET_YEAR_TIMESTAMP.apply(year + 1), Long.MAX_VALUE));
+        return res;
+      }
+      return Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE));
+    }
+
+    @Override
+    public Filter reverse() {
+      return new ExtractTimeEq(constant, field, zoneId, currPrecision);
+    }
+
+    @Override
+    public OperatorType getOperatorType() {
+      return OperatorType.EXTRACT_TIME_NEQ;
+    }
+  }
+
+  public static final class ExtractTimeLt extends ExtractTimeCompareFilter {
+
+    public ExtractTimeLt(long constant, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+      super(constant, field, zoneId, currPrecision);
+    }
+
+    public ExtractTimeLt(ByteBuffer buffer) {
+      super(buffer);
+    }
+
+    @Override
+    public boolean timeSatisfy(long time) {
+      return evaluateFunction.apply(time) < constant;
+    }
+
+    @Override
+    public boolean satisfyStartEndTime(long startTime, long endTime) {
+      return !(truncatedEqualsFunction.apply(startTime, endTime)
+          && evaluateFunction.apply(startTime) >= constant);
+    }
+
+    @Override
+    public boolean containStartEndTime(long startTime, long endTime) {
+      return truncatedEqualsFunction.apply(startTime, endTime)
+          && evaluateFunction.apply(endTime) < constant;
+    }
+
+    @Override
+    public List<TimeRange> getTimeRanges() {
+      if (field == Field.YEAR) {
+        int year = (int) constant;
+        return Collections.singletonList(
+            new TimeRange(Long.MIN_VALUE, GET_YEAR_TIMESTAMP.apply(year) - 1));
+      }
+      return Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE));
+    }
+
+    @Override
+    public Filter reverse() {
+      return new ExtractTimeGtEq(constant, field, zoneId, currPrecision);
+    }
+
+    @Override
+    public OperatorType getOperatorType() {
+      return OperatorType.EXTRACT_TIME_LT;
+    }
+  }
+
+  public static final class ExtractTimeLtEq extends ExtractTimeCompareFilter {
+
+    public ExtractTimeLtEq(long constant, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+      super(constant, field, zoneId, currPrecision);
+    }
+
+    public ExtractTimeLtEq(ByteBuffer buffer) {
+      super(buffer);
+    }
+
+    @Override
+    public boolean timeSatisfy(long time) {
+      return evaluateFunction.apply(time) <= constant;
+    }
+
+    @Override
+    public boolean satisfyStartEndTime(long startTime, long endTime) {
+      return !(truncatedEqualsFunction.apply(startTime, endTime)
+          && evaluateFunction.apply(startTime) > constant);
+    }
+
+    @Override
+    public boolean containStartEndTime(long startTime, long endTime) {
+      return truncatedEqualsFunction.apply(startTime, endTime)
+          && evaluateFunction.apply(endTime) <= constant;
+    }
+
+    @Override
+    public List<TimeRange> getTimeRanges() {
+      if (field == Field.YEAR) {
+        int year = (int) constant;
+        return Collections.singletonList(
+            new TimeRange(Long.MIN_VALUE, GET_YEAR_TIMESTAMP.apply(year + 1) - 1));
+      }
+      return Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE));
+    }
+
+    @Override
+    public Filter reverse() {
+      return new ExtractTimeGt(constant, field, zoneId, currPrecision);
+    }
+
+    @Override
+    public OperatorType getOperatorType() {
+      return OperatorType.EXTRACT_TIME_LTEQ;
+    }
+  }
+
+  public static final class ExtractTimeGt extends ExtractTimeCompareFilter {
+
+    public ExtractTimeGt(long constant, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+      super(constant, field, zoneId, currPrecision);
+    }
+
+    public ExtractTimeGt(ByteBuffer buffer) {
+      super(buffer);
+    }
+
+    @Override
+    public boolean timeSatisfy(long time) {
+      return evaluateFunction.apply(time) > constant;
+    }
+
+    @Override
+    public boolean satisfyStartEndTime(long startTime, long endTime) {
+      return !(truncatedEqualsFunction.apply(startTime, endTime)
+          && evaluateFunction.apply(endTime) <= constant);
+    }
+
+    @Override
+    public boolean containStartEndTime(long startTime, long endTime) {
+      return truncatedEqualsFunction.apply(startTime, endTime)
+          && evaluateFunction.apply(startTime) > constant;
+    }
+
+    @Override
+    public List<TimeRange> getTimeRanges() {
+      if (field == Field.YEAR) {
+        int year = (int) constant;
+        return Collections.singletonList(
+            new TimeRange(GET_YEAR_TIMESTAMP.apply(year + 1), Long.MAX_VALUE));
+      }
+      return Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE));
+    }
+
+    @Override
+    public Filter reverse() {
+      return new ExtractTimeLtEq(constant, field, zoneId, currPrecision);
+    }
+
+    @Override
+    public OperatorType getOperatorType() {
+      return OperatorType.EXTRACT_TIME_GT;
+    }
+  }
+
+  public static final class ExtractTimeGtEq extends ExtractTimeCompareFilter {
+
+    public ExtractTimeGtEq(long constant, Field field, ZoneId zoneId, TimeUnit currPrecision) {
+      super(constant, field, zoneId, currPrecision);
+    }
+
+    public ExtractTimeGtEq(ByteBuffer buffer) {
+      super(buffer);
+    }
+
+    @Override
+    public boolean timeSatisfy(long time) {
+      return evaluateFunction.apply(time) >= constant;
+    }
+
+    @Override
+    public boolean satisfyStartEndTime(long startTime, long endTime) {
+      return !(truncatedEqualsFunction.apply(startTime, endTime)
+          && evaluateFunction.apply(endTime) < constant);
+    }
+
+    @Override
+    public boolean containStartEndTime(long startTime, long endTime) {
+      return truncatedEqualsFunction.apply(startTime, endTime)
+          && evaluateFunction.apply(startTime) >= constant;
+    }
+
+    @Override
+    public List<TimeRange> getTimeRanges() {
+      if (field == Field.YEAR) {
+        int year = (int) constant;
+        return Collections.singletonList(
+            new TimeRange(GET_YEAR_TIMESTAMP.apply(year), Long.MAX_VALUE));
+      }
+      return Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE));
+    }
+
+    @Override
+    public Filter reverse() {
+      return new ExtractTimeLt(constant, field, zoneId, currPrecision);
+    }
+
+    @Override
+    public OperatorType getOperatorType() {
+      return OperatorType.EXTRACT_TIME_GTEQ;
+    }
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/filter/operator/ExtractValueFilterOperators.java b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/operator/ExtractValueFilterOperators.java
new file mode 100644
index 0000000..05a39f0
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/filter/operator/ExtractValueFilterOperators.java
@@ -0,0 +1,341 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.read.filter.operator;
+
+import org.apache.tsfile.file.metadata.statistics.Statistics;
+import org.apache.tsfile.read.filter.basic.Filter;
+import org.apache.tsfile.read.filter.basic.LongFilter;
+import org.apache.tsfile.read.filter.basic.OperatorType;
+import org.apache.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.Serializable;
+import java.nio.ByteBuffer;
+import java.time.ZoneId;
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+
+public final class ExtractValueFilterOperators {
+
+  private ExtractValueFilterOperators() {
+    // forbidden construction
+  }
+
+  private static final String EXTRACT_OPERATOR_TO_STRING_FORMAT =
+      "extract %s from measurements[%s] %s %s";
+
+  // The input type of Extract must be INT64
+  abstract static class ExtractValueCompareFilter extends LongFilter {
+
+    protected final ExtractTimeFilterOperators.ExtractTimeCompareFilter delegate;
+
+    protected ExtractValueCompareFilter(
+        int measurementIndex, ExtractTimeFilterOperators.ExtractTimeCompareFilter delegate) {
+      super(measurementIndex);
+      this.delegate = delegate;
+    }
+
+    @Override
+    public void serialize(DataOutputStream outputStream) throws IOException {
+      super.serialize(outputStream);
+      delegate.serialize(outputStream);
+    }
+
+    @Override
+    public boolean valueSatisfy(Object value) {
+      return valueSatisfy((long) value);
+    }
+
+    @Override
+    public boolean valueSatisfy(long value) {
+      return delegate.timeSatisfy(value);
+    }
+
+    @Override
+    @SuppressWarnings("unchecked")
+    public boolean canSkip(Statistics<? extends Serializable> statistics) {
+      if (statistics.isEmpty()) {
+        return false;
+      }
+      // has no intersection
+      return !delegate.satisfyStartEndTime(
+          (Long) statistics.getMinValue(), (Long) statistics.getMaxValue());
+    }
+
+    @Override
+    @SuppressWarnings("unchecked")
+    public boolean allSatisfy(Statistics<? extends Serializable> statistics) {
+      if (statistics.isEmpty()) {
+        return false;
+      }
+      // contains all start and end time
+      return delegate.containStartEndTime(
+          (Long) statistics.getMinValue(), (Long) statistics.getMaxValue());
+    }
+
+    @Override
+    public boolean equals(Object o) {
+      if (this == o) {
+        return true;
+      }
+      if (o == null || getClass() != o.getClass()) {
+        return false;
+      }
+      if (!super.equals(o)) {
+        return false;
+      }
+      ExtractTimeFilterOperators.ExtractTimeCompareFilter thatDelegate =
+          ((ExtractValueCompareFilter) o).delegate;
+      return delegate.equals(thatDelegate);
+    }
+
+    @Override
+    public int hashCode() {
+      return Objects.hash(super.hashCode(), delegate);
+    }
+
+    @Override
+    public String toString() {
+      return String.format(
+          EXTRACT_OPERATOR_TO_STRING_FORMAT,
+          delegate.getField(),
+          measurementIndex,
+          getOperatorType().getSymbol(),
+          delegate.getConstant());
+    }
+  }
+
+  public static final class ExtractValueEq extends ExtractValueCompareFilter {
+
+    public ExtractValueEq(
+        int measurementIndex,
+        long constant,
+        ExtractTimeFilterOperators.Field field,
+        ZoneId zoneId,
+        TimeUnit currPrecision) {
+      super(
+          measurementIndex,
+          new ExtractTimeFilterOperators.ExtractTimeEq(constant, field, zoneId, currPrecision));
+    }
+
+    public ExtractValueEq(ByteBuffer buffer) {
+      super(
+          ReadWriteIOUtils.readInt(buffer),
+          (ExtractTimeFilterOperators.ExtractTimeCompareFilter)
+              ExtractTimeFilterOperators.ExtractTimeEq.deserialize(buffer));
+    }
+
+    @Override
+    public Filter reverse() {
+      return new ExtractValueNotEq(
+          measurementIndex,
+          delegate.getConstant(),
+          delegate.getField(),
+          delegate.getZoneId(),
+          delegate.getCurrPrecision());
+    }
+
+    @Override
+    public OperatorType getOperatorType() {
+      return OperatorType.EXTRACT_VALUE_EQ;
+    }
+  }
+
+  public static final class ExtractValueNotEq extends ExtractValueCompareFilter {
+
+    public ExtractValueNotEq(
+        int measurementIndex,
+        long constant,
+        ExtractTimeFilterOperators.Field field,
+        ZoneId zoneId,
+        TimeUnit currPrecision) {
+      super(
+          measurementIndex,
+          new ExtractTimeFilterOperators.ExtractTimeNotEq(constant, field, zoneId, currPrecision));
+    }
+
+    public ExtractValueNotEq(ByteBuffer buffer) {
+      super(
+          ReadWriteIOUtils.readInt(buffer),
+          (ExtractTimeFilterOperators.ExtractTimeCompareFilter)
+              ExtractTimeFilterOperators.ExtractTimeNotEq.deserialize(buffer));
+    }
+
+    @Override
+    public Filter reverse() {
+      return new ExtractValueEq(
+          measurementIndex,
+          delegate.getConstant(),
+          delegate.getField(),
+          delegate.getZoneId(),
+          delegate.getCurrPrecision());
+    }
+
+    @Override
+    public OperatorType getOperatorType() {
+      return OperatorType.EXTRACT_VALUE_NEQ;
+    }
+  }
+
+  public static final class ExtractValueLt extends ExtractValueCompareFilter {
+
+    public ExtractValueLt(
+        int measurementIndex,
+        long constant,
+        ExtractTimeFilterOperators.Field field,
+        ZoneId zoneId,
+        TimeUnit currPrecision) {
+      super(
+          measurementIndex,
+          new ExtractTimeFilterOperators.ExtractTimeLt(constant, field, zoneId, currPrecision));
+    }
+
+    public ExtractValueLt(ByteBuffer buffer) {
+      super(
+          ReadWriteIOUtils.readInt(buffer),
+          (ExtractTimeFilterOperators.ExtractTimeCompareFilter)
+              ExtractTimeFilterOperators.ExtractTimeLt.deserialize(buffer));
+    }
+
+    @Override
+    public Filter reverse() {
+      return new ExtractValueGtEq(
+          measurementIndex,
+          delegate.getConstant(),
+          delegate.getField(),
+          delegate.getZoneId(),
+          delegate.getCurrPrecision());
+    }
+
+    @Override
+    public OperatorType getOperatorType() {
+      return OperatorType.EXTRACT_VALUE_LT;
+    }
+  }
+
+  public static final class ExtractValueLtEq extends ExtractValueCompareFilter {
+
+    public ExtractValueLtEq(
+        int measurementIndex,
+        long constant,
+        ExtractTimeFilterOperators.Field field,
+        ZoneId zoneId,
+        TimeUnit currPrecision) {
+      super(
+          measurementIndex,
+          new ExtractTimeFilterOperators.ExtractTimeLtEq(constant, field, zoneId, currPrecision));
+    }
+
+    public ExtractValueLtEq(ByteBuffer buffer) {
+      super(
+          ReadWriteIOUtils.readInt(buffer),
+          (ExtractTimeFilterOperators.ExtractTimeCompareFilter)
+              ExtractTimeFilterOperators.ExtractTimeLtEq.deserialize(buffer));
+    }
+
+    @Override
+    public Filter reverse() {
+      return new ExtractValueGt(
+          measurementIndex,
+          delegate.getConstant(),
+          delegate.getField(),
+          delegate.getZoneId(),
+          delegate.getCurrPrecision());
+    }
+
+    @Override
+    public OperatorType getOperatorType() {
+      return OperatorType.EXTRACT_VALUE_LTEQ;
+    }
+  }
+
+  public static final class ExtractValueGt extends ExtractValueCompareFilter {
+
+    public ExtractValueGt(
+        int measurementIndex,
+        long constant,
+        ExtractTimeFilterOperators.Field field,
+        ZoneId zoneId,
+        TimeUnit currPrecision) {
+      super(
+          measurementIndex,
+          new ExtractTimeFilterOperators.ExtractTimeGt(constant, field, zoneId, currPrecision));
+    }
+
+    public ExtractValueGt(ByteBuffer buffer) {
+      super(
+          ReadWriteIOUtils.readInt(buffer),
+          (ExtractTimeFilterOperators.ExtractTimeCompareFilter)
+              ExtractTimeFilterOperators.ExtractTimeGt.deserialize(buffer));
+    }
+
+    @Override
+    public Filter reverse() {
+      return new ExtractValueLtEq(
+          measurementIndex,
+          delegate.getConstant(),
+          delegate.getField(),
+          delegate.getZoneId(),
+          delegate.getCurrPrecision());
+    }
+
+    @Override
+    public OperatorType getOperatorType() {
+      return OperatorType.EXTRACT_VALUE_GT;
+    }
+  }
+
+  public static final class ExtractValueGtEq extends ExtractValueCompareFilter {
+
+    public ExtractValueGtEq(
+        int measurementIndex,
+        long constant,
+        ExtractTimeFilterOperators.Field field,
+        ZoneId zoneId,
+        TimeUnit currPrecision) {
+      super(
+          measurementIndex,
+          new ExtractTimeFilterOperators.ExtractTimeGtEq(constant, field, zoneId, currPrecision));
+    }
+
+    public ExtractValueGtEq(ByteBuffer buffer) {
+      super(
+          ReadWriteIOUtils.readInt(buffer),
+          (ExtractTimeFilterOperators.ExtractTimeCompareFilter)
+              ExtractTimeFilterOperators.ExtractTimeGtEq.deserialize(buffer));
+    }
+
+    @Override
+    public Filter reverse() {
+      return new ExtractValueLt(
+          measurementIndex,
+          delegate.getConstant(),
+          delegate.getField(),
+          delegate.getZoneId(),
+          delegate.getCurrPrecision());
+    }
+
+    @Override
+    public OperatorType getOperatorType() {
+      return OperatorType.EXTRACT_VALUE_GTEQ;
+    }
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/query/dataset/AbstractResultSet.java b/java/tsfile/src/main/java/org/apache/tsfile/read/query/dataset/AbstractResultSet.java
index 1035279..b4be108 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/query/dataset/AbstractResultSet.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/query/dataset/AbstractResultSet.java
@@ -27,9 +27,9 @@
 
 import java.io.IOException;
 import java.time.LocalDate;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.TreeMap;
 
 public abstract class AbstractResultSet implements ResultSet {
 
@@ -41,7 +41,7 @@
     // Add Time at first column
     this.resultSetMetadata = new ResultSetMetadataImpl(columnNameList, tsDataTypeList);
     int columnNum = tsDataTypeList.size() + 1;
-    this.columnNameToColumnIndexMap = new HashMap<>(tsDataTypeList.size());
+    this.columnNameToColumnIndexMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
     for (int columnIndex = 1; columnIndex <= columnNum; columnIndex++) {
       this.columnNameToColumnIndexMap.put(
           resultSetMetadata.getColumnName(columnIndex), columnIndex);
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/reader/IPageReader.java b/java/tsfile/src/main/java/org/apache/tsfile/read/reader/IPageReader.java
index 9d6980b..1b870f8 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/reader/IPageReader.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/reader/IPageReader.java
@@ -43,6 +43,8 @@
 
   boolean isModified();
 
+  void setModified(boolean modified);
+
   void initTsBlockBuilder(List<TSDataType> dataTypes);
 
   void setLimitOffset(PaginationController paginationController);
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/reader/page/AbstractAlignedPageReader.java b/java/tsfile/src/main/java/org/apache/tsfile/read/reader/page/AbstractAlignedPageReader.java
index 9230d48..ad5af3b 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/reader/page/AbstractAlignedPageReader.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/reader/page/AbstractAlignedPageReader.java
@@ -349,6 +349,11 @@
   }
 
   @Override
+  public void setModified(boolean modified) {
+    this.isModified = modified;
+  }
+
+  @Override
   public void initTsBlockBuilder(List<TSDataType> dataTypes) {
     if (paginationController.hasLimit()) {
       builder =
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/reader/page/PageReader.java b/java/tsfile/src/main/java/org/apache/tsfile/read/reader/page/PageReader.java
index ee74f81..7ba234c 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/reader/page/PageReader.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/reader/page/PageReader.java
@@ -414,6 +414,11 @@
   }
 
   @Override
+  public void setModified(boolean modified) {
+    pageHeader.setModified(modified);
+  }
+
+  @Override
   public void initTsBlockBuilder(List<TSDataType> dataTypes) {
     // do nothing
   }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/read/reader/page/ValuePageReader.java b/java/tsfile/src/main/java/org/apache/tsfile/read/reader/page/ValuePageReader.java
index fc9dd37..998b211 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/read/reader/page/ValuePageReader.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/read/reader/page/ValuePageReader.java
@@ -27,6 +27,7 @@
 import org.apache.tsfile.read.common.BatchData;
 import org.apache.tsfile.read.common.BatchDataFactory;
 import org.apache.tsfile.read.common.TimeRange;
+import org.apache.tsfile.read.common.block.column.BinaryColumnBuilder;
 import org.apache.tsfile.read.filter.basic.Filter;
 import org.apache.tsfile.utils.Binary;
 import org.apache.tsfile.utils.ReadWriteIOUtils;
@@ -318,7 +319,6 @@
           }
           break;
         case INT32:
-        case DATE:
           int anInt = valueDecoder.readInt(valueBuffer);
           if (keepCurrentRow[i]) {
             if (isDeleted[i]) {
@@ -328,6 +328,20 @@
             }
           }
           break;
+        case DATE:
+          int anDate = valueDecoder.readInt(valueBuffer);
+          if (keepCurrentRow[i]) {
+            if (isDeleted[i]) {
+              columnBuilder.appendNull();
+            } else {
+              if (columnBuilder instanceof BinaryColumnBuilder) {
+                ((BinaryColumnBuilder) columnBuilder).writeDate(anDate);
+              } else {
+                columnBuilder.writeInt(anDate);
+              }
+            }
+          }
+          break;
         case INT64:
         case TIMESTAMP:
           long aLong = valueDecoder.readLong(valueBuffer);
@@ -403,12 +417,21 @@
           }
           break;
         case INT32:
-        case DATE:
           int anInt = valueDecoder.readInt(valueBuffer);
           if (keepCurrentRow[i]) {
             columnBuilder.writeInt(anInt);
           }
           break;
+        case DATE:
+          int anDate = valueDecoder.readInt(valueBuffer);
+          if (keepCurrentRow[i]) {
+            if (columnBuilder instanceof BinaryColumnBuilder) {
+              ((BinaryColumnBuilder) columnBuilder).writeDate(anDate);
+            } else {
+              columnBuilder.writeInt(anDate);
+            }
+          }
+          break;
         case INT64:
         case TIMESTAMP:
           long aLong = valueDecoder.readLong(valueBuffer);
@@ -485,7 +508,15 @@
             continue;
           }
           int aInt = valueDecoder.readInt(valueBuffer);
-          columnBuilder.writeInt(aInt);
+          if (dataType == TSDataType.INT32) {
+            columnBuilder.writeInt(aInt);
+          } else {
+            if (columnBuilder instanceof BinaryColumnBuilder) {
+              ((BinaryColumnBuilder) columnBuilder).writeDate(aInt);
+            } else {
+              columnBuilder.writeInt(aInt);
+            }
+          }
         }
         break;
       case INT64:
@@ -584,6 +615,10 @@
     return pageHeader.isModified();
   }
 
+  public void setModified(boolean modified) {
+    pageHeader.setModified(modified);
+  }
+
   public boolean isDeleted(long timestamp) {
     while (deleteIntervalList != null && deleteCursor < deleteIntervalList.size()) {
       if (deleteIntervalList.get(deleteCursor).contains(timestamp)) {
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/utils/RegexUtils.java b/java/tsfile/src/main/java/org/apache/tsfile/utils/RegexUtils.java
new file mode 100644
index 0000000..75c6234
--- /dev/null
+++ b/java/tsfile/src/main/java/org/apache/tsfile/utils/RegexUtils.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.utils;
+
+import java.util.regex.Pattern;
+import java.util.regex.PatternSyntaxException;
+
+public class RegexUtils {
+
+  private RegexUtils() {
+    // util class
+  }
+
+  /**
+   * The main idea of this part comes from
+   * https://codereview.stackexchange.com/questions/36861/convert-sql-like-to-regex/36864
+   */
+  public static String parseLikePatternToRegex(String likePattern) {
+    String unescapeValue = unescapeString(likePattern);
+    String specialRegexStr = ".^$*+?{}[]|()";
+    StringBuilder patternStrBuild = new StringBuilder();
+    patternStrBuild.append("^");
+    for (int i = 0; i < unescapeValue.length(); i++) {
+      String ch = String.valueOf(unescapeValue.charAt(i));
+      if (specialRegexStr.contains(ch)) {
+        ch = "\\" + unescapeValue.charAt(i);
+      }
+      if (i == 0
+          || !"\\".equals(String.valueOf(unescapeValue.charAt(i - 1)))
+          || i >= 2
+              && "\\\\"
+                  .equals(
+                      patternStrBuild.substring(
+                          patternStrBuild.length() - 2, patternStrBuild.length()))) {
+        String replaceStr = ch.replace("%", ".*?").replace("_", ".");
+        patternStrBuild.append(replaceStr);
+      } else {
+        patternStrBuild.append(ch);
+      }
+    }
+    patternStrBuild.append("$");
+    return patternStrBuild.toString();
+  }
+
+  // This Method is for un-escaping strings except '\' before special string '%', '_', '\', because
+  // we need to use '\' to judge whether to replace this to regexp string
+  private static String unescapeString(String value) {
+    StringBuilder stringBuilder = new StringBuilder();
+    int curIndex = 0;
+    while (curIndex < value.length()) {
+      String ch = String.valueOf(value.charAt(curIndex));
+      if ("\\".equals(ch) && curIndex < value.length() - 1) {
+        String nextChar = String.valueOf(value.charAt(curIndex + 1));
+        if ("%".equals(nextChar) || "_".equals(nextChar) || "\\".equals(nextChar)) {
+          stringBuilder.append(ch);
+          if ("\\".equals(nextChar)) {
+            curIndex++;
+          }
+        }
+      } else {
+        stringBuilder.append(ch);
+      }
+      curIndex++;
+    }
+    return stringBuilder.toString();
+  }
+
+  public static Pattern compileRegex(String regex) {
+    try {
+      return Pattern.compile(regex);
+    } catch (PatternSyntaxException e) {
+      throw new PatternSyntaxException("Illegal regex expression: ", regex, e.getIndex());
+    }
+  }
+}
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/write/TsFileWriter.java b/java/tsfile/src/main/java/org/apache/tsfile/write/TsFileWriter.java
index d1a13e7..ff37612 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/write/TsFileWriter.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/write/TsFileWriter.java
@@ -233,6 +233,10 @@
     }
   }
 
+  public void setChunkGroupSizeThreshold(long chunkGroupSizeThreshold) {
+    this.chunkGroupSizeThreshold = chunkGroupSizeThreshold;
+  }
+
   public void registerSchemaTemplate(
       String templateName, Map<String, IMeasurementSchema> template, boolean isAligned) {
     getSchema().registerSchemaTemplate(templateName, new MeasurementGroup(isAligned, template));
@@ -501,7 +505,7 @@
   }
 
   private IChunkGroupWriter tryToInitialGroupWriter(
-      IDeviceID deviceId, boolean isAligned, boolean isTableModel) {
+      IDeviceID deviceId, boolean isAligned, boolean isTableModel) throws IOException {
     IChunkGroupWriter groupWriter = groupWriters.get(deviceId);
     if (groupWriter == null) {
       if (isAligned) {
@@ -509,6 +513,8 @@
             isTableModel
                 ? new TableChunkGroupWriterImpl(deviceId, encryptParam)
                 : new AlignedChunkGroupWriterImpl(deviceId, encryptParam);
+        initAllSeriesWriterForAlignedSeries(
+            (AlignedChunkGroupWriterImpl) groupWriter, deviceId, isTableModel);
         if (!isUnseq) { // Sequence File
           ((AlignedChunkGroupWriterImpl) groupWriter)
               .setLastTime(alignedDeviceLastTimeMap.get(deviceId));
@@ -526,6 +532,21 @@
     return groupWriter;
   }
 
+  private void initAllSeriesWriterForAlignedSeries(
+      AlignedChunkGroupWriterImpl alignedChunkGroupWriter, IDeviceID deviceID, boolean isTableModel)
+      throws IOException {
+    Schema schema = getSchema();
+    if (isTableModel) {
+      alignedChunkGroupWriter.tryToAddSeriesWriter(
+          schema.getTableSchemaMap().get(deviceID.getTableName()).getColumnSchemas());
+    } else {
+      MeasurementGroup deviceSchema = schema.getSeriesSchema(deviceID);
+      for (IMeasurementSchema measurementSchema : deviceSchema.getMeasurementSchemaMap().values()) {
+        alignedChunkGroupWriter.tryToAddSeriesWriterInternal(measurementSchema);
+      }
+    }
+  }
+
   /**
    * write a record in type of T.
    *
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/write/chunk/AlignedChunkWriterImpl.java b/java/tsfile/src/main/java/org/apache/tsfile/write/chunk/AlignedChunkWriterImpl.java
index 49ec4d7..2ec4bd8 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/write/chunk/AlignedChunkWriterImpl.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/write/chunk/AlignedChunkWriterImpl.java
@@ -61,7 +61,7 @@
     timeChunkWriter =
         new TimeChunkWriter(
             schema.getMeasurementName(),
-            schema.getCompressor(),
+            schema.getTimeCompressor(),
             schema.getTimeTSEncoding(),
             schema.getTimeEncoder(),
             this.encryptParam);
@@ -76,7 +76,7 @@
       valueChunkWriterList.add(
           new ValueChunkWriter(
               valueMeasurementIdList.get(i),
-              schema.getCompressor(),
+              schema.getValueCompressor(i),
               valueTSDataTypeList.get(i),
               valueTSEncodingList.get(i),
               valueEncoderList.get(i),
@@ -92,7 +92,7 @@
     timeChunkWriter =
         new TimeChunkWriter(
             schema.getMeasurementName(),
-            schema.getCompressor(),
+            schema.getTimeCompressor(),
             schema.getTimeTSEncoding(),
             schema.getTimeEncoder(),
             this.encryptParam);
@@ -107,7 +107,7 @@
       valueChunkWriterList.add(
           new ValueChunkWriter(
               valueMeasurementIdList.get(i),
-              schema.getCompressor(),
+              schema.getValueCompressor(i),
               valueTSDataTypeList.get(i),
               valueTSEncodingList.get(i),
               valueEncoderList.get(i),
@@ -193,7 +193,8 @@
     TSEncoding timeEncoding =
         TSEncoding.valueOf(TSFileDescriptor.getInstance().getConfig().getTimeEncoder());
     TSDataType timeType = TSFileDescriptor.getInstance().getConfig().getTimeSeriesDataType();
-    CompressionType timeCompression = TSFileDescriptor.getInstance().getConfig().getCompressor();
+    CompressionType timeCompression =
+        TSFileDescriptor.getInstance().getConfig().getCompressor(TSDataType.INT64);
     timeChunkWriter =
         new TimeChunkWriter(
             "",
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/write/record/Tablet.java b/java/tsfile/src/main/java/org/apache/tsfile/write/record/Tablet.java
index d84cd05..c984b42 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/write/record/Tablet.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/write/record/Tablet.java
@@ -42,6 +42,7 @@
 import java.nio.ByteBuffer;
 import java.time.LocalDate;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -57,6 +58,7 @@
  *
  * <p>Notice: The tablet should not have empty cell, please use BitMap to denote null value
  */
+@SuppressWarnings("SuspiciousSystemArraycopy")
 public class Tablet {
 
   private static final int DEFAULT_SIZE = 1024;
@@ -95,7 +97,7 @@
   private int rowSize;
 
   /** The maximum number of rows for this {@link Tablet} */
-  private final int maxRowNumber;
+  private int maxRowNumber;
 
   /**
    * Return a {@link Tablet} with default specified row number. This is the standard constructor
@@ -507,6 +509,9 @@
       throw new IllegalArgumentException(
           "The data type of column index " + columnIndex + " is not TEXT/STRING/BLOB");
     }
+    if (val == null) {
+      return;
+    }
     final Binary[] sensor = (Binary[]) values[columnIndex];
     sensor[rowIndex] = new Binary(val, TSFileConfig.STRING_CHARSET);
     updateBitMap(rowIndex, columnIndex, false);
@@ -524,6 +529,9 @@
       throw new IllegalArgumentException(
           "The data type of column index " + columnIndex + " is not TEXT/STRING/BLOB");
     }
+    if (val == null) {
+      return;
+    }
     final Binary[] sensor = (Binary[]) values[columnIndex];
     sensor[rowIndex] = new Binary(val);
     updateBitMap(rowIndex, columnIndex, false);
@@ -541,6 +549,9 @@
       throw new IllegalArgumentException(
           "The data type of column index " + columnIndex + " is not DATE");
     }
+    if (val == null) {
+      return;
+    }
     final LocalDate[] sensor = (LocalDate[]) values[columnIndex];
     sensor[rowIndex] = val;
     updateBitMap(rowIndex, columnIndex, false);
@@ -623,32 +634,36 @@
   }
 
   private Object createValueColumnOfDataType(TSDataType dataType) {
+    return createValueColumnOfDataType(dataType, maxRowNumber);
+  }
+
+  private Object createValueColumnOfDataType(TSDataType dataType, int capacity) {
 
     Object valueColumn;
     switch (dataType) {
       case INT32:
-        valueColumn = new int[maxRowNumber];
+        valueColumn = new int[capacity];
         break;
       case INT64:
       case TIMESTAMP:
-        valueColumn = new long[maxRowNumber];
+        valueColumn = new long[capacity];
         break;
       case FLOAT:
-        valueColumn = new float[maxRowNumber];
+        valueColumn = new float[capacity];
         break;
       case DOUBLE:
-        valueColumn = new double[maxRowNumber];
+        valueColumn = new double[capacity];
         break;
       case BOOLEAN:
-        valueColumn = new boolean[maxRowNumber];
+        valueColumn = new boolean[capacity];
         break;
       case TEXT:
       case STRING:
       case BLOB:
-        valueColumn = new Binary[maxRowNumber];
+        valueColumn = new Binary[capacity];
         break;
       case DATE:
-        valueColumn = new LocalDate[maxRowNumber];
+        valueColumn = new LocalDate[capacity];
         break;
       default:
         throw new UnSupportedDataTypeException(String.format(NOT_SUPPORT_DATATYPE, dataType));
@@ -1311,4 +1326,156 @@
     }
     return true;
   }
+
+  /**
+   * Append `another` to the tail of this tablet.
+   *
+   * @return true if append successfully, false if the tablets have inconsistent insertTarget of
+   *     schemas.
+   */
+  public boolean append(Tablet another) {
+    return append(another, 0);
+  }
+
+  /**
+   * Append `another` to the tail of this tablet, with a preferred capacity after appending. To
+   * avoid frequent memory copy, it is highly recommended to use this method instead of
+   * `append(Tablet)` when multiple appending could be involved.
+   *
+   * @param preferredCapacity if the total size of the two tablets is below this value, this tablet
+   *     will extend to the capacity.
+   * @return true if append successfully, false if the tablets have inconsistent insertTarget of *
+   *     schemas.
+   */
+  public boolean append(Tablet another, int preferredCapacity) {
+    if (!Objects.equals(insertTargetName, another.insertTargetName)) {
+      return false;
+    }
+
+    if (!Objects.equals(schemas, another.schemas)) {
+      return false;
+    }
+
+    if (!Objects.equals(columnCategories, another.columnCategories)) {
+      return false;
+    }
+
+    int prevCapacity = timestamps.length;
+    appendTimestamps(another, preferredCapacity);
+    appendValues(another, prevCapacity, preferredCapacity);
+    appendBitMaps(another, prevCapacity, preferredCapacity);
+
+    maxRowNumber = Math.max(preferredCapacity, Math.max(maxRowNumber, rowSize + another.rowSize));
+    rowSize = rowSize + another.rowSize;
+    return true;
+  }
+
+  private void appendTimestamps(Tablet another, int preferredCapacity) {
+    int capacity = timestamps.length;
+    int thisSize = rowSize;
+    int thatSize = another.rowSize;
+    int totalSize = Math.max(thisSize + thatSize, preferredCapacity);
+
+    if (thisSize + thatSize <= capacity && capacity >= preferredCapacity) {
+      System.arraycopy(another.timestamps, 0, timestamps, thisSize, thatSize);
+    } else {
+      timestamps = Arrays.copyOf(timestamps, totalSize);
+      System.arraycopy(another.timestamps, 0, timestamps, thisSize, thatSize);
+    }
+  }
+
+  private void appendValues(Tablet another, int prevCapacity, int preferredCapacity) {
+    for (int i = 0; i < schemas.size(); i++) {
+      appendValue(another, prevCapacity, i, schemas.get(i).getType(), preferredCapacity);
+    }
+  }
+
+  private void appendValue(
+      Tablet another,
+      int prevCapacity,
+      int columnIndex,
+      TSDataType dataType,
+      int preferredCapacity) {
+    Object thisCol = values[columnIndex];
+    Object anotherCol = another.values[columnIndex];
+
+    int thisSize = rowSize;
+    int thatSize = another.rowSize;
+    int totalSize = Math.max(thisSize + thatSize, preferredCapacity);
+
+    if (thisSize + thatSize <= prevCapacity && prevCapacity >= preferredCapacity) {
+      System.arraycopy(anotherCol, 0, thisCol, thisSize, thatSize);
+    } else {
+      Object newCol = createValueColumnOfDataType(dataType, totalSize);
+      System.arraycopy(thisCol, 0, newCol, 0, thisSize);
+      System.arraycopy(anotherCol, 0, newCol, thisSize, thatSize);
+      values[columnIndex] = newCol;
+    }
+  }
+
+  private void appendBitMaps(Tablet another, int prevCapacity, int preferredCapacity) {
+    if (bitMaps == null && another.bitMaps == null) {
+      return;
+    }
+
+    if (bitMaps == null) {
+      appendBitMapsWhenThisNull(another, prevCapacity, preferredCapacity);
+    } else if (another.bitMaps == null) {
+      appendBitMapsWhenThatNull(another, prevCapacity, preferredCapacity);
+    } else {
+      appendBitMapsWhenNoNull(another, prevCapacity, preferredCapacity);
+    }
+  }
+
+  private void appendBitMapsWhenThisNull(Tablet another, int prevCapacity, int preferredCapacity) {
+    int thisSize = rowSize;
+    int thatSize = another.rowSize;
+    bitMaps = new BitMap[schemas.size()];
+    int totalSize = Math.max(prevCapacity, Math.max(thisSize + thatSize, preferredCapacity));
+    for (int i = 0; i < bitMaps.length; i++) {
+      if (another.bitMaps[i] != null) {
+        bitMaps[i] = new BitMap(totalSize);
+        bitMaps[i].append(another.bitMaps[i], thisSize, thatSize);
+      }
+    }
+  }
+
+  private void appendBitMapsWhenThatNull(Tablet another, int prevCapacity, int preferredCapacity) {
+    int thisSize = rowSize;
+    int thatSize = another.rowSize;
+    int totalSize = Math.max(prevCapacity, Math.max(thisSize + thatSize, preferredCapacity));
+    for (BitMap bitMap : bitMaps) {
+      if (bitMap != null) {
+        bitMap.extend(totalSize);
+        for (int j = 0; j < thatSize; j++) {
+          bitMap.unmark(j + thisSize);
+        }
+      }
+    }
+  }
+
+  private void appendBitMapsWhenNoNull(Tablet another, int prevCapacity, int preferredCapacity) {
+    int thisSize = rowSize;
+    int thatSize = another.rowSize;
+    int totalSize = Math.max(prevCapacity, Math.max(thisSize + thatSize, preferredCapacity));
+
+    for (int i = 0; i < bitMaps.length; i++) {
+      if (bitMaps[i] == null && another.bitMaps[i] == null) {
+        continue;
+      }
+
+      if (bitMaps[i] == null && another.bitMaps[i] != null) {
+        bitMaps[i] = new BitMap(totalSize);
+        bitMaps[i].append(another.bitMaps[i], thisSize, thatSize);
+      } else if (bitMaps[i] != null && another.bitMaps[i] == null) {
+        bitMaps[i].extend(totalSize);
+        for (int j = 0; j < thatSize; j++) {
+          bitMaps[i].unmark(j + thisSize);
+        }
+      } else if (bitMaps[i] != null && another.bitMaps[i] != null) {
+        bitMaps[i].extend(totalSize);
+        bitMaps[i].append(another.bitMaps[i], thisSize, thatSize);
+      }
+    }
+  }
 }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/write/schema/MeasurementSchema.java b/java/tsfile/src/main/java/org/apache/tsfile/write/schema/MeasurementSchema.java
index f63c2dc..59b1d01 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/write/schema/MeasurementSchema.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/write/schema/MeasurementSchema.java
@@ -67,7 +67,7 @@
         measurementName,
         dataType,
         TSEncoding.valueOf(TSFileDescriptor.getInstance().getConfig().getValueEncoder(dataType)),
-        TSFileDescriptor.getInstance().getConfig().getCompressor(),
+        TSFileDescriptor.getInstance().getConfig().getCompressor(dataType),
         null);
   }
 
@@ -77,7 +77,7 @@
         measurementName,
         dataType,
         encoding,
-        TSFileDescriptor.getInstance().getConfig().getCompressor(),
+        TSFileDescriptor.getInstance().getConfig().getCompressor(dataType),
         null);
   }
 
@@ -465,6 +465,9 @@
   public long ramBytesUsed() {
     return INSTANCE_SIZE
         + RamUsageEstimator.sizeOf(measurementName)
-        + RamUsageEstimator.sizeOfMap(props);
+        + RamUsageEstimator.sizeOfMapWithKnownShallowSize(
+            props,
+            RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP,
+            RamUsageEstimator.SHALLOW_SIZE_OF_HASHMAP_ENTRY);
   }
 }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/write/schema/TimeseriesSchema.java b/java/tsfile/src/main/java/org/apache/tsfile/write/schema/TimeseriesSchema.java
index e05912d..21c7426 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/write/schema/TimeseriesSchema.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/write/schema/TimeseriesSchema.java
@@ -56,7 +56,7 @@
         fullPath,
         tsDataType,
         TSEncoding.valueOf(TSFileDescriptor.getInstance().getConfig().getValueEncoder(tsDataType)),
-        TSFileDescriptor.getInstance().getConfig().getCompressor(),
+        TSFileDescriptor.getInstance().getConfig().getCompressor(tsDataType),
         Collections.emptyMap());
   }
 
@@ -66,7 +66,7 @@
         fullPath,
         type,
         encoding,
-        TSFileDescriptor.getInstance().getConfig().getCompressor(),
+        TSFileDescriptor.getInstance().getConfig().getCompressor(type),
         Collections.emptyMap());
   }
 
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/write/schema/VectorMeasurementSchema.java b/java/tsfile/src/main/java/org/apache/tsfile/write/schema/VectorMeasurementSchema.java
index c53fee3..777eaf8 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/write/schema/VectorMeasurementSchema.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/write/schema/VectorMeasurementSchema.java
@@ -47,13 +47,19 @@
       RamUsageEstimator.shallowSizeOfInstance(VectorMeasurementSchema.class);
   private static final long BUILDER_SIZE =
       RamUsageEstimator.shallowSizeOfInstance(TSEncodingBuilder.class);
+  private static final byte NO_UNIFIED_COMPRESSOR = -1;
 
   private String deviceId;
   private Map<String, Integer> measurementsToIndexMap;
   private byte[] types;
   private byte[] encodings;
   private TSEncodingBuilder[] encodingConverters;
-  private byte compressor;
+
+  /** For compatibility of old versions. */
+  private byte unifiedCompressor;
+
+  /** [0] is for the time column. */
+  private byte[] compressors;
 
   public VectorMeasurementSchema() {}
 
@@ -80,7 +86,34 @@
     }
     this.encodings = encodingsInByte;
     this.encodingConverters = new TSEncodingBuilder[subMeasurements.length];
-    this.compressor = compressionType.serialize();
+    this.unifiedCompressor = compressionType.serialize();
+  }
+
+  public VectorMeasurementSchema(
+      String deviceId,
+      String[] subMeasurements,
+      TSDataType[] types,
+      TSEncoding[] encodings,
+      byte[] compressors) {
+    this.deviceId = deviceId;
+    this.measurementsToIndexMap = new HashMap<>();
+    for (int i = 0; i < subMeasurements.length; i++) {
+      measurementsToIndexMap.put(subMeasurements[i], i);
+    }
+    byte[] typesInByte = new byte[types.length];
+    for (int i = 0; i < types.length; i++) {
+      typesInByte[i] = types[i].serialize();
+    }
+    this.types = typesInByte;
+
+    byte[] encodingsInByte = new byte[encodings.length];
+    for (int i = 0; i < encodings.length; i++) {
+      encodingsInByte[i] = encodings[i].serialize();
+    }
+    this.encodings = encodingsInByte;
+    this.encodingConverters = new TSEncodingBuilder[subMeasurements.length];
+    this.unifiedCompressor = NO_UNIFIED_COMPRESSOR;
+    this.compressors = compressors;
   }
 
   public VectorMeasurementSchema(String deviceId, String[] subMeasurements, TSDataType[] types) {
@@ -101,7 +134,15 @@
               .serialize();
     }
     this.encodingConverters = new TSEncodingBuilder[subMeasurements.length];
-    this.compressor = TSFileDescriptor.getInstance().getConfig().getCompressor().serialize();
+    this.unifiedCompressor = NO_UNIFIED_COMPRESSOR;
+    // the first column is time
+    this.compressors = new byte[subMeasurements.length + 1];
+    compressors[0] =
+        TSFileDescriptor.getInstance().getConfig().getCompressor(TSDataType.INT64).serialize();
+    for (int i = 0; i < types.length; i++) {
+      compressors[i + 1] =
+          TSFileDescriptor.getInstance().getConfig().getCompressor(types[i]).serialize();
+    }
   }
 
   public VectorMeasurementSchema(
@@ -124,9 +165,24 @@
     return deviceId;
   }
 
+  @Deprecated // Aligned series should not invoke this method
   @Override
   public CompressionType getCompressor() {
-    return CompressionType.deserialize(compressor);
+    throw new UnsupportedOperationException("Aligned series should not invoke this method");
+  }
+
+  public CompressionType getTimeCompressor() {
+    if (compressors != null) {
+      return CompressionType.deserialize(compressors[0]);
+    }
+    return CompressionType.deserialize(unifiedCompressor);
+  }
+
+  public CompressionType getValueCompressor(int index) {
+    if (compressors != null) {
+      return CompressionType.deserialize(compressors[index + 1]);
+    }
+    return CompressionType.deserialize(unifiedCompressor);
   }
 
   @Override
@@ -276,7 +332,11 @@
     for (byte encoding : encodings) {
       byteLen += ReadWriteIOUtils.write(encoding, buffer);
     }
-    byteLen += ReadWriteIOUtils.write(compressor, buffer);
+    byteLen += ReadWriteIOUtils.write(unifiedCompressor, buffer);
+    if (unifiedCompressor == NO_UNIFIED_COMPRESSOR) {
+      buffer.put(compressors);
+      byteLen += compressors.length;
+    }
 
     return byteLen;
   }
@@ -297,7 +357,11 @@
     for (byte encoding : encodings) {
       byteLen += ReadWriteIOUtils.write(encoding, outputStream);
     }
-    byteLen += ReadWriteIOUtils.write(compressor, outputStream);
+    byteLen += ReadWriteIOUtils.write(unifiedCompressor, outputStream);
+    if (unifiedCompressor == NO_UNIFIED_COMPRESSOR) {
+      outputStream.write(compressors);
+      byteLen += compressors.length;
+    }
 
     return byteLen;
   }
@@ -348,7 +412,15 @@
     }
     vectorMeasurementSchema.encodings = encodings;
 
-    vectorMeasurementSchema.compressor = ReadWriteIOUtils.readByte(inputStream);
+    vectorMeasurementSchema.unifiedCompressor = ReadWriteIOUtils.readByte(inputStream);
+    if (vectorMeasurementSchema.unifiedCompressor == NO_UNIFIED_COMPRESSOR) {
+      byte[] compressors = new byte[measurementSize + 1];
+      int read = inputStream.read(compressors);
+      if (read != measurementSize) {
+        throw new IOException("Unexpected end of stream when reading compressors");
+      }
+      vectorMeasurementSchema.compressors = compressors;
+    }
     return vectorMeasurementSchema;
   }
 
@@ -375,7 +447,12 @@
     }
     vectorMeasurementSchema.encodings = encodings;
 
-    vectorMeasurementSchema.compressor = ReadWriteIOUtils.readByte(buffer);
+    vectorMeasurementSchema.unifiedCompressor = ReadWriteIOUtils.readByte(buffer);
+    if (vectorMeasurementSchema.unifiedCompressor == NO_UNIFIED_COMPRESSOR) {
+      byte[] compressors = new byte[measurementSize + 1];
+      buffer.get(compressors);
+      vectorMeasurementSchema.compressors = compressors;
+    }
     return vectorMeasurementSchema;
   }
 
@@ -391,12 +468,13 @@
     return Arrays.equals(types, that.types)
         && Arrays.equals(encodings, that.encodings)
         && Objects.equals(deviceId, that.deviceId)
-        && Objects.equals(compressor, that.compressor);
+        && Objects.equals(unifiedCompressor, that.unifiedCompressor)
+        && Objects.equals(compressors, that.compressors);
   }
 
   @Override
   public int hashCode() {
-    return Objects.hash(deviceId, types, encodings, compressor);
+    return Objects.hash(deviceId, types, encodings, unifiedCompressor, compressors);
   }
 
   /** compare by vector name */
@@ -424,7 +502,14 @@
           TSEncoding.deserialize(encodings[entry.getValue()]).toString());
       sc.addTail("],");
     }
-    sc.addTail(CompressionType.deserialize(compressor).toString());
+    if (unifiedCompressor != NO_UNIFIED_COMPRESSOR) {
+      sc.addTail(CompressionType.deserialize(unifiedCompressor).toString());
+    } else {
+      for (byte compressor : compressors) {
+        sc.addTail(CompressionType.deserialize(compressor).toString()).addTail(",");
+      }
+    }
+
     return sc.toString();
   }
 
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/write/v4/AbstractTableModelTsFileWriter.java b/java/tsfile/src/main/java/org/apache/tsfile/write/v4/AbstractTableModelTsFileWriter.java
index 92f4c10..3120bb4 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/write/v4/AbstractTableModelTsFileWriter.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/write/v4/AbstractTableModelTsFileWriter.java
@@ -146,7 +146,7 @@
   }
 
   protected IChunkGroupWriter tryToInitialGroupWriter(
-      IDeviceID deviceId, boolean isAligned, boolean isTableModel) {
+      IDeviceID deviceId, boolean isAligned, boolean isTableModel) throws IOException {
     IChunkGroupWriter groupWriter = groupWriters.get(deviceId);
     if (groupWriter == null) {
       if (isAligned) {
@@ -156,6 +156,7 @@
                 : new AlignedChunkGroupWriterImpl(deviceId, encryptParam);
         ((AlignedChunkGroupWriterImpl) groupWriter)
             .setLastTime(alignedDeviceLastTimeMap.get(deviceId));
+        initAllSeriesWriterForAlignedSeries((AlignedChunkGroupWriterImpl) groupWriter);
       } else {
         groupWriter = new NonAlignedChunkGroupWriterImpl(deviceId, encryptParam);
         ((NonAlignedChunkGroupWriterImpl) groupWriter)
@@ -167,6 +168,9 @@
     return groupWriter;
   }
 
+  protected abstract void initAllSeriesWriterForAlignedSeries(
+      AlignedChunkGroupWriterImpl alignedChunkGroupWriter) throws IOException;
+
   /**
    * calculate total memory size occupied by all ChunkGroupWriter instances currently.
    *
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/write/v4/DeviceTableModelWriter.java b/java/tsfile/src/main/java/org/apache/tsfile/write/v4/DeviceTableModelWriter.java
index 66fca2c..f64f285 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/write/v4/DeviceTableModelWriter.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/write/v4/DeviceTableModelWriter.java
@@ -29,6 +29,7 @@
 import org.apache.tsfile.file.metadata.TableSchema;
 import org.apache.tsfile.utils.Pair;
 import org.apache.tsfile.utils.WriteUtils;
+import org.apache.tsfile.write.chunk.AlignedChunkGroupWriterImpl;
 import org.apache.tsfile.write.record.Tablet;
 import org.apache.tsfile.write.schema.IMeasurementSchema;
 
@@ -40,6 +41,7 @@
 public class DeviceTableModelWriter extends AbstractTableModelTsFileWriter {
 
   private String tableName;
+  private TableSchema tableSchema;
   private boolean isTableWriteAligned = true;
 
   public DeviceTableModelWriter(File file, TableSchema tableSchema, long memoryThreshold)
@@ -74,6 +76,12 @@
     checkMemorySizeAndMayFlushChunks();
   }
 
+  @Override
+  protected void initAllSeriesWriterForAlignedSeries(
+      AlignedChunkGroupWriterImpl alignedChunkGroupWriter) throws IOException {
+    alignedChunkGroupWriter.tryToAddSeriesWriter(tableSchema.getColumnSchemas());
+  }
+
   private void checkIsTableExistAndSetColumnCategoryList(Tablet tablet)
       throws WriteProcessException {
     String tabletTableName = tablet.getTableName();
@@ -102,6 +110,7 @@
 
   private void registerTableSchema(TableSchema tableSchema) {
     this.tableName = tableSchema.getTableName();
+    this.tableSchema = tableSchema;
     getSchema().registerTableSchema(tableSchema);
   }
 }
diff --git a/java/tsfile/src/main/java/org/apache/tsfile/write/writer/TsFileIOWriter.java b/java/tsfile/src/main/java/org/apache/tsfile/write/writer/TsFileIOWriter.java
index 7d8dc49..31a6af5 100644
--- a/java/tsfile/src/main/java/org/apache/tsfile/write/writer/TsFileIOWriter.java
+++ b/java/tsfile/src/main/java/org/apache/tsfile/write/writer/TsFileIOWriter.java
@@ -403,6 +403,10 @@
    */
   @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
   public void endFile() throws IOException {
+    if (!canWrite) {
+      return;
+    }
+
     checkInMemoryPathCount();
     readChunkMetadataAndConstructIndexTree();
 
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/common/bitStream/TestBitStream.java b/java/tsfile/src/test/java/org/apache/tsfile/common/bitStream/TestBitStream.java
new file mode 100644
index 0000000..447f582
--- /dev/null
+++ b/java/tsfile/src/test/java/org/apache/tsfile/common/bitStream/TestBitStream.java
@@ -0,0 +1,346 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.common.bitStream;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.EOFException;
+import java.io.IOException;
+
+import static org.apache.tsfile.common.bitStream.BitInputStream.readVarLong;
+import static org.apache.tsfile.common.bitStream.BitOutputStream.writeVarInt;
+import static org.apache.tsfile.common.bitStream.BitOutputStream.writeVarLong;
+
+public class TestBitStream {
+
+  @Test
+  public void testWriteAndReadInt() throws IOException {
+    ByteArrayOutputStream bout = new ByteArrayOutputStream();
+    BitOutputStream out = new BitOutputStream(bout);
+
+    out.writeInt(0, 0); // No-op write
+    out.writeInt(0x78563412, 32); // Full int
+    out.writeInt(2, 4); // Partial int
+    out.writeInt(3, 3);
+    out.writeInt(0, 1);
+    out.writeInt(0xA8, 8); // One byte
+    out.writeInt(0x11, 6); // 6 bits
+    out.close();
+
+    byte[] expected = new byte[] {0x78, 0x56, 0x34, 0x12, 0x26, (byte) 0xA8, 0x44};
+    Assert.assertArrayEquals(expected, bout.toByteArray());
+  }
+
+  @Test
+  public void testBitInputWithMarkAndEOF() throws IOException {
+    byte[] data = new byte[] {0x12, 0x34, 0x56, 0x78, 0x32, (byte) 0xA8, 0x11};
+    BitInputStream in = new BitInputStream(new ByteArrayInputStream(data), data.length * 8);
+
+    Assert.assertTrue(in.markSupported());
+    Assert.assertEquals(56, in.availableBits());
+    Assert.assertEquals(0, in.readInt(0));
+    Assert.assertEquals(0x12345678, in.readInt(32));
+    Assert.assertEquals(3, in.readInt(4));
+
+    in.mark(200);
+    Assert.assertEquals(1, in.readInt(3));
+    Assert.assertEquals(0, in.readInt(1));
+    Assert.assertEquals(0xA8, in.readInt(8));
+
+    in.reset();
+    Assert.assertEquals(2, in.readInt(4));
+    Assert.assertEquals(0xA8, in.readInt(8));
+
+    Assert.assertEquals(8, in.availableBits());
+    Assert.assertEquals(0x110, in.readInt(12));
+
+    try {
+      in.readInt(1);
+      Assert.fail("Expected EOFException");
+    } catch (EOFException ignored) {
+    }
+
+    in.close();
+  }
+
+  @Test
+  public void testWriteAndReadLong() throws IOException {
+    ByteArrayOutputStream bout = new ByteArrayOutputStream();
+    BitOutputStream out = new BitOutputStream(bout);
+
+    long[] values = {0L, 1L, 0xFFFFFFFFL, 0x123456789ABCDEFL, Long.MAX_VALUE, Long.MIN_VALUE};
+    int[] bits = {1, 2, 32, 60, 64, 64};
+
+    for (int i = 0; i < values.length; i++) {
+      out.writeLong(values[i], bits[i]);
+    }
+    out.close();
+
+    BitInputStream in =
+        new BitInputStream(new ByteArrayInputStream(bout.toByteArray()), out.getBitsWritten());
+    for (int i = 0; i < values.length; i++) {
+      long actual = in.readLong(bits[i]);
+      Assert.assertEquals("Mismatch at index " + i, values[i], actual);
+    }
+    in.close();
+  }
+
+  @Test
+  public void testWriteAndReadBits() throws IOException {
+    boolean[] bits = {true, false, true, true, false, false, false, true, false, true};
+
+    ByteArrayOutputStream bout = new ByteArrayOutputStream();
+    BitOutputStream out = new BitOutputStream(bout);
+    for (boolean b : bits) {
+      out.writeBit(b);
+    }
+    out.close();
+
+    BitInputStream in =
+        new BitInputStream(new ByteArrayInputStream(bout.toByteArray()), out.getBitsWritten());
+    for (int i = 0; i < bits.length; i++) {
+      boolean actual = in.readBit();
+      Assert.assertEquals("Bit mismatch at index " + i, bits[i], actual);
+    }
+
+    try {
+      in.readBit();
+      Assert.fail("Expected EOFException");
+    } catch (EOFException ignored) {
+    }
+
+    in.close();
+  }
+
+  @Test
+  public void testLongBitWidths() throws IOException {
+    for (int bits = 1; bits <= 64; bits++) {
+      long value = (1L << (bits - 1)) | 1L;
+
+      ByteArrayOutputStream bout = new ByteArrayOutputStream();
+      BitOutputStream out = new BitOutputStream(bout);
+      out.writeLong(value, bits);
+      out.close();
+
+      BitInputStream in =
+          new BitInputStream(new ByteArrayInputStream(bout.toByteArray()), out.getBitsWritten());
+      long result = in.readLong(bits);
+      Assert.assertEquals("Failed at bit width = " + bits, value, result);
+      in.close();
+    }
+  }
+
+  @Test
+  public void testAllZerosAndAllOnesLong() throws IOException {
+    ByteArrayOutputStream bout = new ByteArrayOutputStream();
+    BitOutputStream out = new BitOutputStream(bout);
+
+    out.writeLong(0L, 64);
+    out.writeLong(-1L, 64);
+    out.close();
+
+    BitInputStream in =
+        new BitInputStream(new ByteArrayInputStream(bout.toByteArray()), out.getBitsWritten());
+    Assert.assertEquals(0L, in.readLong(64));
+    Assert.assertEquals(-1L, in.readLong(64));
+    in.close();
+  }
+
+  @Test
+  public void testBitBoundaryCrossing() throws IOException {
+    boolean[] bits = {
+      false,
+      true,
+      true,
+      false,
+      true,
+      false,
+      false,
+      true, // first byte
+      true,
+      true,
+      false,
+      true,
+      false,
+      true,
+      true // crosses byte
+    };
+
+    ByteArrayOutputStream bout = new ByteArrayOutputStream();
+    BitOutputStream out = new BitOutputStream(bout);
+    for (boolean b : bits) {
+      out.writeBit(b);
+    }
+    out.close();
+
+    BitInputStream in =
+        new BitInputStream(new ByteArrayInputStream(bout.toByteArray()), out.getBitsWritten());
+    for (int i = 0; i < bits.length; i++) {
+      Assert.assertEquals("Mismatch at bit index " + i, bits[i], in.readBit());
+    }
+    in.close();
+  }
+
+  @Test
+  public void testMixedLongAndBit() throws IOException {
+    ByteArrayOutputStream bout = new ByteArrayOutputStream();
+    BitOutputStream out = new BitOutputStream(bout);
+
+    out.writeLong(0x1FL, 5); // 11111
+    out.writeBit(true); // 1
+    out.writeBit(false); // 0
+    out.writeBit(true); // 1
+    out.close();
+
+    BitInputStream in =
+        new BitInputStream(new ByteArrayInputStream(bout.toByteArray()), out.getBitsWritten());
+
+    Assert.assertEquals(0x1F, in.readLong(5));
+    Assert.assertTrue(in.readBit());
+    Assert.assertFalse(in.readBit());
+    Assert.assertTrue(in.readBit());
+
+    in.close();
+  }
+
+  @Test
+  public void testVarLongSymmetry() throws IOException {
+    long[] testValues = {
+      0, 1, -1, 63, -63, 64, -64, 128, -128, 1024, -1024, Long.MAX_VALUE, Long.MIN_VALUE
+    };
+
+    for (long original : testValues) {
+      ByteArrayOutputStream bout = new ByteArrayOutputStream();
+      BitOutputStream out = new BitOutputStream(bout);
+      writeVarLong(original, out);
+      out.close();
+      BitInputStream in =
+          new BitInputStream(new ByteArrayInputStream(bout.toByteArray()), out.getBitsWritten());
+      long decoded = readVarLong(in);
+      in.close();
+
+      Assert.assertEquals("Mismatch for value: " + original, original, decoded);
+    }
+  }
+
+  @Test
+  public void testVarLongContinuousRange() throws IOException {
+    for (int value = -10000; value <= 10000; value++) {
+      ByteArrayOutputStream bout = new ByteArrayOutputStream();
+      BitOutputStream out = new BitOutputStream(bout);
+      writeVarLong(value, out);
+      out.close();
+
+      BitInputStream in =
+          new BitInputStream(new ByteArrayInputStream(bout.toByteArray()), out.getBitsWritten());
+      long decoded = readVarLong(in);
+      in.close();
+
+      Assert.assertEquals("Mismatch in range test for: " + value, value, decoded);
+    }
+  }
+
+  @Test
+  public void testVarLongBitLengthGrowth() throws IOException {
+    long[] values = {0, 1, 2, 64, 128, 8192, 1 << 20, Long.MAX_VALUE / 2};
+    int lastBits = 0;
+
+    for (long value : values) {
+      ByteArrayOutputStream bout = new ByteArrayOutputStream();
+      BitOutputStream out = new BitOutputStream(bout);
+      int bits = writeVarLong(value, out);
+      out.close();
+
+      Assert.assertTrue("Bit length didn't increase for " + value, bits >= lastBits);
+      lastBits = bits;
+    }
+  }
+
+  @Test
+  public void testVarIntSymmetry() throws IOException {
+    int[] values = {
+      0,
+      1,
+      -1,
+      63,
+      -63,
+      127,
+      -128,
+      255,
+      -256,
+      1023,
+      -1023,
+      16384,
+      -16384,
+      Integer.MAX_VALUE,
+      Integer.MIN_VALUE
+    };
+
+    for (int value : values) {
+      ByteArrayOutputStream bout = new ByteArrayOutputStream();
+      BitOutputStream out = new BitOutputStream(bout);
+      writeVarInt(value, out);
+      out.close();
+
+      BitInputStream in =
+          new BitInputStream(new ByteArrayInputStream(bout.toByteArray()), out.getBitsWritten());
+      int decoded = BitInputStream.readVarInt(in);
+      in.close();
+
+      Assert.assertEquals("Mismatch for value: " + value, value, decoded);
+    }
+  }
+
+  @Test
+  public void testVarIntRange() throws IOException {
+    for (int value = -10000; value <= 10000; value++) {
+      ByteArrayOutputStream bout = new ByteArrayOutputStream();
+      BitOutputStream out = new BitOutputStream(bout);
+      writeVarInt(value, out);
+      out.close();
+
+      BitInputStream in =
+          new BitInputStream(new ByteArrayInputStream(bout.toByteArray()), out.getBitsWritten());
+      int decoded = in.readVarInt(in);
+      in.close();
+
+      Assert.assertEquals("Mismatch in range for value: " + value, value, decoded);
+    }
+  }
+
+  @Test
+  public void testBitLengthGrowth() throws IOException {
+    int[] values = {0, 1, 2, 64, 128, 1024, 16384, 1 << 20};
+    int lastBits = 0;
+
+    for (int value : values) {
+      ByteArrayOutputStream bout = new ByteArrayOutputStream();
+      BitOutputStream out = new BitOutputStream(bout);
+      int bits = writeVarInt(value, out);
+      out.close();
+
+      Assert.assertTrue("Bit length not increasing for value: " + value, bits >= lastBits);
+      lastBits = bits;
+    }
+  }
+}
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/CamelDecoderTest.java b/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/CamelDecoderTest.java
new file mode 100644
index 0000000..cb72c19
--- /dev/null
+++ b/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/CamelDecoderTest.java
@@ -0,0 +1,281 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.decoder;
+
+import org.apache.tsfile.common.bitStream.BitInputStream;
+import org.apache.tsfile.common.bitStream.BitOutputStream;
+import org.apache.tsfile.encoding.encoder.CamelEncoder;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Random;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class CamelDecoderTest {
+
+  @Test
+  public void testRandomizedCompressDecompress() throws Exception {
+    Random random = new Random();
+    int sampleSize = 10_000;
+    double[] original = new double[sampleSize];
+
+    // Generate random test data (excluding NaN and ±Infinity)
+    for (int i = 0; i < sampleSize; i++) {
+      double v;
+      do {
+        long bits = random.nextLong();
+        v = Double.longBitsToDouble(bits);
+      } while (Double.isNaN(v) || Double.isInfinite(v));
+      original[i] = v;
+    }
+
+    compressDecompressAndAssert(original, 0);
+  }
+
+  private void compressDecompressAndAssert(double[] original, double tolerance) throws Exception {
+    CamelEncoder encoder = new CamelEncoder();
+    ByteArrayOutputStream bout = new ByteArrayOutputStream();
+    for (double v : original) {
+      encoder.encode(v, bout);
+    }
+    encoder.flush(bout);
+    // Decode and verify
+    CamelDecoder decoder = new CamelDecoder();
+    ByteBuffer buffer = ByteBuffer.wrap(bout.toByteArray());
+
+    int i = 0;
+    while (decoder.hasNext(buffer)) {
+      double actual = decoder.readDouble(buffer);
+      double expected = original[i];
+      if (Double.isNaN(expected)) {
+        assertTrue("Expected NaN at index " + i, Double.isNaN(actual));
+      } else {
+        assertEquals("Mismatch at index " + i, expected, actual, tolerance);
+      }
+      i++;
+    }
+    assertEquals(original.length, i);
+  }
+
+  @Test
+  public void testSpecialFloatingValues() throws Exception {
+    double[] original =
+        new double[] {
+          Double.NaN,
+          Double.POSITIVE_INFINITY,
+          Double.NEGATIVE_INFINITY,
+          +0.0,
+          -0.0,
+          Double.MIN_VALUE,
+          -Double.MIN_VALUE,
+          Double.MIN_NORMAL,
+          -Double.MIN_NORMAL,
+          Double.MAX_VALUE,
+          -Double.MAX_VALUE
+        };
+    compressDecompressAndAssert(original, 0.0);
+  }
+
+  @Test
+  public void testMonotonicSequence() throws Exception {
+    double[] increasing = new double[500];
+    double[] decreasing = new double[500];
+    for (int i = 0; i < 500; i++) {
+      increasing[i] = 100.0 + i * 0.0001;
+      decreasing[i] = 100.0 - i * 0.0001;
+    }
+    compressDecompressAndAssert(increasing, 0);
+    compressDecompressAndAssert(decreasing, 0);
+  }
+
+  @Test
+  public void testPrecisionEdgeCases() throws Exception {
+    double[] original = {
+      9007199254740991.0, // 2^53 - 1
+      9007199254740992.0, // 2^53
+      9007199254740993.0,
+      1.0000000000000001, // Precision loss (equals 1.0)
+      1.0000000000000002,
+      12345,
+      21332213
+    };
+    compressDecompressAndAssert(original, 0.0);
+  }
+
+  @Test
+  public void testAlternatingSignsAndDecimals() throws Exception {
+    double[] original = new double[2];
+    for (int i = 0; i < 2; i++) {
+      double base = i * 0.123456 % 1000;
+      original[i] = (i % 2 == 0) ? base : -base;
+    }
+    compressDecompressAndAssert(original, 0.0);
+  }
+
+  @Test
+  public void testMinimalDeltaSequence() throws Exception {
+    double[] original = new double[64];
+    double base = 100.0;
+    for (int i = 0; i < original.length; i++) {
+      original[i] = base + i * Math.ulp(base);
+    }
+    compressDecompressAndAssert(original, 0.0);
+  }
+
+  @Test
+  public void testRepeatedValues() throws Exception {
+    double repeated = 123.456789;
+    double[] original = new double[1000];
+    Arrays.fill(original, repeated);
+    compressDecompressAndAssert(original, 0.0);
+  }
+
+  private void testGorillaValues(double[] values) throws Exception {
+    ByteArrayOutputStream bout = new ByteArrayOutputStream();
+    BitOutputStream out = new BitOutputStream(bout);
+
+    CamelEncoder.GorillaEncoder encoder = new CamelEncoder().getGorillaEncoder();
+    for (double v : values) {
+      encoder.encode(v, out);
+    }
+    encoder.close(out);
+
+    byte[] encoded = bout.toByteArray();
+    BitInputStream in = new BitInputStream(new ByteArrayInputStream(encoded), out.getBitsWritten());
+    InputStream inputStream = new ByteArrayInputStream(encoded);
+    CamelDecoder.GorillaDecoder decoder =
+        new CamelDecoder(inputStream, out.getBitsWritten()).getGorillaDecoder();
+
+    int idx = 0;
+    for (double expected : values) {
+      double actual = decoder.decode(in);
+      Assert.assertEquals("Mismatch decoding: ", expected, actual, 0.0);
+    }
+  }
+
+  @Test
+  public void testGorillaAllZeros() throws Exception {
+    double[] values = new double[100];
+    Arrays.fill(values, 0.0);
+    testGorillaValues(values);
+  }
+
+  @Test
+  public void testGorillaConstantValue() throws Exception {
+    double[] values = new double[200];
+    Arrays.fill(values, 123456.789);
+    testGorillaValues(values);
+  }
+
+  @Test
+  public void testGorillaMinMaxValues() throws Exception {
+    double[] values = {
+      Double.MIN_VALUE, Double.MAX_VALUE, -Double.MAX_VALUE, -Double.MIN_VALUE, 0.0, -0.0
+    };
+    testGorillaValues(values);
+  }
+
+  @Test
+  public void testGorillaMixedSigns() throws Exception {
+    double[] values = {-1.1, 2.2, -3.3, 4.4, -5.5, 6.6, -7.7};
+    testGorillaValues(values);
+  }
+
+  @Test
+  public void testGorillaHighPrecisionValues() throws Exception {
+    double[] values = {0.1, 0.2, 0.3, 0.1 + 0.2, 0.4 - 0.1};
+    testGorillaValues(values);
+  }
+
+  @Test
+  public void testGorillaXorEdgeTrigger() throws Exception {
+    double[] values = {
+      1.00000001,
+      1.00000002,
+      1.00000003,
+      1.00000001, // back to earlier value
+      1.00000009
+    };
+    testGorillaValues(values);
+  }
+
+  @Test
+  public void testLargeSeries() throws Exception {
+    double[] values = new double[1000];
+    for (int i = 0; i < values.length; i++) {
+      values[i] = Math.sin(i / 10.0);
+    }
+    testGorillaValues(values);
+  }
+
+  private static final int[] FLUSH_SIZES = {32, 64, 128, 256, 512, 1000};
+  private static final int TOTAL_VALUES = 1_000_000;
+
+  @Test
+  public void testBatchFlushForVariousBlockSizes() throws IOException {
+    Random random = new Random(42);
+    for (int blockSize : FLUSH_SIZES) {
+      // Prepare encoder and output buffer
+      CamelEncoder encoder = new CamelEncoder();
+      ByteArrayOutputStream bout = new ByteArrayOutputStream();
+      double[] original = new double[TOTAL_VALUES];
+
+      // Generate random data and flush every blockSize values
+      for (int i = 0; i < TOTAL_VALUES; i++) {
+        double v;
+        do {
+          long bits = random.nextLong();
+          v = Double.longBitsToDouble(bits);
+        } while (Double.isNaN(v) || Double.isInfinite(v));
+        original[i] = v;
+        encoder.encode(v, bout);
+        if ((i + 1) % blockSize == 0) {
+          encoder.flush(bout);
+        }
+      }
+      // Final flush to cover trailing values
+      encoder.flush(bout);
+
+      // Decode and verify
+      CamelDecoder decoder = new CamelDecoder();
+      ByteBuffer buffer = ByteBuffer.wrap(bout.toByteArray());
+      for (int i = 0; i < TOTAL_VALUES; i++) {
+        Assert.assertTrue(
+            "Decoder should have next for blockSize=" + blockSize, decoder.hasNext(buffer));
+        double decoded = decoder.readDouble(buffer);
+
+        Assert.assertEquals(
+            "Mismatch at index " + i + " for blockSize=" + blockSize, original[i], decoded, 0);
+      }
+      Assert.assertFalse(
+          "Decoder should be exhausted after reading all values for blockSize=" + blockSize,
+          decoder.hasNext(buffer));
+    }
+  }
+}
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/DescendingBitPackingDecoderTest.java b/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/DescendingBitPackingDecoderTest.java
new file mode 100644
index 0000000..c6f9981
--- /dev/null
+++ b/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/DescendingBitPackingDecoderTest.java
@@ -0,0 +1,225 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.decoder;
+
+import org.apache.tsfile.common.conf.TSFileDescriptor;
+import org.apache.tsfile.encoding.encoder.DescendingBitPackingEncoder;
+import org.apache.tsfile.encoding.encoder.Encoder;
+import org.apache.tsfile.enums.ColumnCategory;
+import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.file.metadata.ColumnSchemaBuilder;
+import org.apache.tsfile.file.metadata.TableSchema;
+import org.apache.tsfile.fileSystem.FSFactoryProducer;
+import org.apache.tsfile.read.query.dataset.ResultSet;
+import org.apache.tsfile.read.query.dataset.ResultSetMetadata;
+import org.apache.tsfile.read.v4.ITsFileReader;
+import org.apache.tsfile.read.v4.TsFileReaderBuilder;
+import org.apache.tsfile.write.record.Tablet;
+import org.apache.tsfile.write.v4.ITsFileWriter;
+import org.apache.tsfile.write.v4.TsFileWriterBuilder;
+
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+public class DescendingBitPackingDecoderTest {
+  protected static long[] getTestData() {
+    return new long[] {
+      0,
+      -1,
+      1,
+      -2,
+      2,
+      0,
+      0,
+      -3,
+      3,
+      0,
+      -4,
+      0,
+      0,
+      0,
+      4,
+      -5,
+      5,
+      Long.MIN_VALUE,
+      Long.MAX_VALUE,
+      Long.MAX_VALUE - 1,
+      Long.MIN_VALUE + 1,
+      0,
+      0,
+      5,
+      -5,
+      10,
+      -2,
+      4,
+      3,
+      2,
+      1,
+      -1,
+      2,
+      -3,
+      1,
+      2,
+      1,
+      1,
+      -1,
+      -1,
+      0,
+      0,
+      0
+    };
+  }
+
+  protected static long[] getEndToEndTestData() {
+    int size = 10000;
+    long[] data = new long[size];
+    for (int i = 0; i < size; i++) {
+      data[i] = i % 2 == 0 ? i : -i;
+    }
+    return data;
+  }
+
+  @Test
+  public void test() throws Exception {
+    long[] original = getTestData();
+    compressDecompressAndAssert(
+        original, new DescendingBitPackingEncoder(), new DescendingBitPackingDecoder());
+  }
+
+  @Test
+  public void endToEndTest() throws Exception {
+    long[] original = getEndToEndTestData();
+    endToEndCompressDecompressAndAssert(original, "DESCENDING_BIT_PACKING");
+  }
+
+  protected static void compressDecompressAndAssert(
+      long[] original, Encoder encoder, Decoder decoder) throws Exception {
+    ByteArrayOutputStream bout = new ByteArrayOutputStream();
+    for (long v : original) {
+      encoder.encode(v, bout);
+    }
+    encoder.flush(bout);
+    // Decode and verify
+    ByteBuffer buffer = ByteBuffer.wrap(bout.toByteArray());
+
+    int i = 0;
+    while (decoder.hasNext(buffer)) {
+      long actual = decoder.readLong(buffer);
+      long expected = original[i];
+      assertEquals("Mismatch at index " + i, expected, actual);
+      i++;
+    }
+    assertEquals(original.length, i);
+  }
+
+  protected static long[] endToEndCompressDecompressAndAssert(long[] original, String encoder)
+      throws Exception {
+    long[] expResult = new long[3];
+    long startTime, endTime;
+
+    int rowNum = original.length;
+
+    TSFileDescriptor.getInstance().getConfig().setInt64Encoding(encoder);
+    String path = "test.tsfile";
+
+    startTime = System.nanoTime();
+    File f = FSFactoryProducer.getFSFactory().getFile(path);
+
+    String tableName = "table1";
+
+    TableSchema tableSchema =
+        new TableSchema(
+            tableName,
+            Arrays.asList(
+                new ColumnSchemaBuilder()
+                    .name("value")
+                    .dataType(TSDataType.INT64)
+                    .category(ColumnCategory.FIELD)
+                    .build()));
+
+    long memoryThreshold = 512;
+
+    ITsFileWriter writer =
+        new TsFileWriterBuilder()
+            .file(f)
+            .tableSchema(tableSchema)
+            .memoryThreshold(memoryThreshold)
+            .build();
+
+    Tablet tablet = new Tablet(Arrays.asList("value"), Arrays.asList(TSDataType.INT64), rowNum);
+
+    for (int row = 0; row < rowNum; row++) {
+      long timestamp = row;
+      tablet.addTimestamp(row, timestamp);
+      tablet.addValue(row, "value", original[row]);
+    }
+
+    writer.write(tablet);
+    writer.close();
+    endTime = System.nanoTime();
+    expResult[1] = endTime - startTime;
+
+    expResult[0] = Files.size(Paths.get(path));
+
+    startTime = System.nanoTime();
+    f = FSFactoryProducer.getFSFactory().getFile(path);
+
+    ITsFileReader reader = new TsFileReaderBuilder().file(f).build();
+
+    ResultSet resultSet = reader.query(tableName, Arrays.asList("value"), 0, rowNum - 1);
+
+    ResultSetMetadata metadata = resultSet.getMetadata();
+    System.out.println(metadata);
+
+    // StringJoiner sj = new StringJoiner(" ");
+    // for (int column = 1; column <= 1; column++) {
+    // sj.add(metadata.getColumnName(column) + "(" + metadata.getColumnType(column)
+    // + ") ");
+    // }
+    // System.out.println(sj.toString());
+
+    List<Long> result = new ArrayList<>();
+    while (resultSet.next()) {
+      Long timeField = resultSet.getLong("Time");
+      Long valueField = resultSet.isNull("value") ? null : resultSet.getLong("value");
+      result.add(valueField);
+    }
+    reader.close();
+    endTime = System.nanoTime();
+    expResult[2] = endTime - startTime;
+
+    assertEquals(original.length, result.size());
+    for (int i = 0; i < original.length; i++) {
+      assertEquals(original[i], result.get(i).longValue());
+    }
+
+    return expResult;
+  }
+}
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/FleaDecoderTest.java b/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/FleaDecoderTest.java
new file mode 100644
index 0000000..9a6c8ee
--- /dev/null
+++ b/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/FleaDecoderTest.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.decoder;
+
+import org.apache.tsfile.encoding.encoder.FleaEncoder;
+
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Stream;
+
+public class FleaDecoderTest {
+  @Test
+  public void test() throws Exception {
+    long[] original = DescendingBitPackingDecoderTest.getTestData();
+    DescendingBitPackingDecoderTest.compressDecompressAndAssert(
+        original, new FleaEncoder(), new FleaDecoder());
+  }
+
+  @Test
+  public void endToEndTest() throws Exception {
+    long[] original = DescendingBitPackingDecoderTest.getEndToEndTestData();
+    DescendingBitPackingDecoderTest.endToEndCompressDecompressAndAssert(original, "FLEA");
+  }
+
+  @Test
+  public void fileTest() throws Exception {
+    List<String> files = new ArrayList<>();
+    String[] dataFolderPathList = {
+      "../../../encoding-periodic-ng/data", "../../../encoding-periodic-ng/data_no_period"
+    };
+    String resultPath = "../../../encoding-periodic-ng/exp_results/results_tsfile_integration.csv";
+
+    for (String dataFolderPath : dataFolderPathList) {
+      Path folder = Paths.get(dataFolderPath);
+      try (Stream<Path> paths = Files.walk(folder)) {
+        paths.filter(Files::isRegularFile).forEach(filePath -> files.add(filePath.toString()));
+      } catch (IOException e) {
+        e.printStackTrace();
+      }
+    }
+    // open result file
+    Path resultFile = Paths.get(resultPath);
+    if (!Files.exists(resultFile)) {
+      Files.createFile(resultFile);
+    }
+    // write header (delete old content)
+    Files.write(
+        resultFile, "file,origin_size,compressed_size,encoding_time,decoding_time\n".getBytes());
+
+    for (String file : files) {
+      System.out.println("file: " + file);
+      Path path = Paths.get(file);
+      long[] original = Files.lines(path).skip(1).mapToLong(Long::parseLong).toArray();
+      long[] expResult =
+          DescendingBitPackingDecoderTest.endToEndCompressDecompressAndAssert(original, "FLEA");
+      Files.write(
+          resultFile,
+          (file
+                  + ","
+                  + original.length * 8
+                  + ","
+                  + expResult[0]
+                  + ","
+                  + expResult[1]
+                  + ","
+                  + expResult[2]
+                  + "\n")
+              .getBytes(),
+          java.nio.file.StandardOpenOption.APPEND);
+      System.out.println("OK");
+    }
+  }
+}
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/LaminarDecoderTest.java b/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/LaminarDecoderTest.java
new file mode 100644
index 0000000..fbb1fc4
--- /dev/null
+++ b/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/LaminarDecoderTest.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.decoder;
+
+import org.apache.tsfile.encoding.encoder.LaminarEncoder;
+
+import org.junit.Test;
+
+public class LaminarDecoderTest {
+  protected static long[] getPartitionTestData() {
+    return new long[] {
+      10, -9, 8, -7, 6, 5, -4, 3, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0
+    };
+  }
+
+  @Test
+  public void test() throws Exception {
+    long[] original = getPartitionTestData();
+    DescendingBitPackingDecoderTest.compressDecompressAndAssert(
+        original, new LaminarEncoder(), new LaminarDecoder());
+  }
+
+  @Test
+  public void endToEndTest() throws Exception {
+    long[] original = DescendingBitPackingDecoderTest.getEndToEndTestData();
+    DescendingBitPackingDecoderTest.endToEndCompressDecompressAndAssert(original, "LAMINAR");
+  }
+}
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/SeparateStorageTest.java b/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/SeparateStorageTest.java
new file mode 100644
index 0000000..f17f9d6
--- /dev/null
+++ b/java/tsfile/src/test/java/org/apache/tsfile/encoding/decoder/SeparateStorageTest.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.decoder;
+
+import org.apache.tsfile.encoding.encoder.SeparateStorageEncoder;
+
+import org.junit.Test;
+
+public class SeparateStorageTest {
+  @Test
+  public void test() throws Exception {
+    long[] original = DescendingBitPackingDecoderTest.getTestData();
+    DescendingBitPackingDecoderTest.compressDecompressAndAssert(
+        original, new SeparateStorageEncoder(), new SeparateStorageDecoder());
+  }
+
+  @Test
+  public void endToEndTest() throws Exception {
+    long[] original = DescendingBitPackingDecoderTest.getEndToEndTestData();
+    DescendingBitPackingDecoderTest.endToEndCompressDecompressAndAssert(
+        original, "SEPARATE_STORAGE");
+  }
+}
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/encoding/encoder/TSEncodingBuilderTest.java b/java/tsfile/src/test/java/org/apache/tsfile/encoding/encoder/TSEncodingBuilderTest.java
new file mode 100644
index 0000000..7a866a3
--- /dev/null
+++ b/java/tsfile/src/test/java/org/apache/tsfile/encoding/encoder/TSEncodingBuilderTest.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.tsfile.encoding.encoder;
+
+import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.file.metadata.enums.TSEncoding;
+import org.apache.tsfile.write.UnSupportedDataTypeException;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+public class TSEncodingBuilderTest {
+
+  private static final String ERROR_MSG = "Unsupported dataType: %s doesn't support data type: %s";
+
+  @Test
+  public void testTSEncodingBuilder() {
+    Set<TSDataType> supportedDataTypes =
+        Arrays.stream(TSDataType.values()).collect(Collectors.toSet());
+    supportedDataTypes.remove(TSDataType.VECTOR);
+    supportedDataTypes.remove(TSDataType.UNKNOWN);
+
+    for (TSDataType dataType : supportedDataTypes) {
+      for (TSEncoding encoding : TSEncoding.values()) {
+        if (TSEncoding.isSupported(dataType, encoding)) {
+          try {
+            TSEncodingBuilder.getEncodingBuilder(encoding).getEncoder(dataType);
+          } catch (UnSupportedDataTypeException e) {
+            Assert.fail(e.getMessage());
+          }
+        } else {
+          try {
+            TSEncodingBuilder.getEncodingBuilder(encoding).getEncoder(dataType);
+            Assert.fail(String.format(ERROR_MSG, encoding, dataType));
+          } catch (UnsupportedOperationException e) {
+            Assert.assertEquals("Unsupported encoding: " + encoding, e.getMessage());
+          } catch (UnSupportedDataTypeException e) {
+            Assert.assertEquals(String.format(ERROR_MSG, encoding, dataType), e.getMessage());
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/read/TsFileV4ReadWriteInterfacesTest.java b/java/tsfile/src/test/java/org/apache/tsfile/read/TsFileV4ReadWriteInterfacesTest.java
index d9b7147..51aa64c 100644
--- a/java/tsfile/src/test/java/org/apache/tsfile/read/TsFileV4ReadWriteInterfacesTest.java
+++ b/java/tsfile/src/test/java/org/apache/tsfile/read/TsFileV4ReadWriteInterfacesTest.java
@@ -21,10 +21,13 @@
 
 import org.apache.tsfile.enums.ColumnCategory;
 import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.exception.write.WriteProcessException;
+import org.apache.tsfile.file.metadata.AbstractAlignedChunkMetadata;
 import org.apache.tsfile.file.metadata.IDeviceID;
 import org.apache.tsfile.file.metadata.StringArrayDeviceID;
 import org.apache.tsfile.file.metadata.TableSchema;
 import org.apache.tsfile.read.v4.DeviceTableModelReader;
+import org.apache.tsfile.utils.Pair;
 import org.apache.tsfile.utils.TsFileGeneratorForTest;
 import org.apache.tsfile.utils.TsFileGeneratorUtils;
 import org.apache.tsfile.write.record.Tablet;
@@ -37,6 +40,7 @@
 import org.junit.Test;
 
 import java.io.File;
+import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Paths;
 import java.util.ArrayList;
@@ -46,6 +50,104 @@
 public class TsFileV4ReadWriteInterfacesTest {
 
   @Test
+  public void testWriteSomeColumns() throws IOException, WriteProcessException {
+    String filePath = TsFileGeneratorForTest.getTestTsFilePath("db", 0, 0, 0);
+
+    TableSchema tableSchema =
+        new TableSchema(
+            "t1",
+            Arrays.asList(
+                new MeasurementSchema("device", TSDataType.STRING),
+                new MeasurementSchema("s1", TSDataType.INT32),
+                new MeasurementSchema("s2", TSDataType.INT32),
+                new MeasurementSchema("s3", TSDataType.INT32)),
+            Arrays.asList(
+                ColumnCategory.TAG,
+                ColumnCategory.FIELD,
+                ColumnCategory.FIELD,
+                ColumnCategory.FIELD));
+    Tablet tablet1 =
+        new Tablet(
+            tableSchema.getTableName(),
+            Arrays.asList("device", "s1"),
+            Arrays.asList(TSDataType.STRING, TSDataType.INT32),
+            Arrays.asList(ColumnCategory.TAG, ColumnCategory.FIELD));
+    for (int i = 0; i < 1000; i++) {
+      tablet1.addTimestamp(i, i);
+      tablet1.addValue("device", i, "d1");
+      tablet1.addValue("s1", i, 0);
+    }
+    Tablet tablet2 =
+        new Tablet(
+            tableSchema.getTableName(),
+            IMeasurementSchema.getMeasurementNameList(tableSchema.getColumnSchemas()),
+            IMeasurementSchema.getDataTypeList(tableSchema.getColumnSchemas()),
+            tableSchema.getColumnTypes());
+    for (int i = 0; i < 1000; i++) {
+      tablet2.addTimestamp(i, 1005 + i);
+      tablet2.addValue("device", i, "d1");
+      tablet2.addValue("s1", i, 1);
+      tablet2.addValue("s2", i, 1);
+      tablet2.addValue("s3", i, 1);
+    }
+    try (ITsFileWriter writer =
+        new TsFileWriterBuilder()
+            .file(new File(filePath))
+            .tableSchema(tableSchema)
+            .memoryThreshold(1)
+            .build()) {
+      writer.write(tablet1);
+      writer.write(tablet2);
+    }
+    try (TsFileSequenceReader reader = new TsFileSequenceReader(filePath)) {
+      TsFileDeviceIterator deviceIterator = reader.getAllDevicesIteratorWithIsAligned();
+      while (deviceIterator.hasNext()) {
+        Pair<IDeviceID, Boolean> pair = deviceIterator.next();
+        List<AbstractAlignedChunkMetadata> alignedChunkMetadataList =
+            reader.getAlignedChunkMetadataByMetadataIndexNode(
+                pair.getLeft(), deviceIterator.getFirstMeasurementNodeOfCurrentDevice(), false);
+        Assert.assertFalse(alignedChunkMetadataList.isEmpty());
+        Assert.assertEquals(3, alignedChunkMetadataList.get(0).getValueChunkMetadataList().size());
+        Assert.assertEquals(
+            1000,
+            alignedChunkMetadataList
+                .get(0)
+                .getValueChunkMetadataList()
+                .get(0)
+                .getStatistics()
+                .getCount());
+        Assert.assertNull(alignedChunkMetadataList.get(0).getValueChunkMetadataList().get(1));
+        Assert.assertNull(alignedChunkMetadataList.get(0).getValueChunkMetadataList().get(2));
+        Assert.assertEquals(3, alignedChunkMetadataList.get(1).getValueChunkMetadataList().size());
+        Assert.assertEquals(
+            1000,
+            alignedChunkMetadataList
+                .get(1)
+                .getValueChunkMetadataList()
+                .get(0)
+                .getStatistics()
+                .getCount());
+        Assert.assertEquals(
+            1000,
+            alignedChunkMetadataList
+                .get(1)
+                .getValueChunkMetadataList()
+                .get(1)
+                .getStatistics()
+                .getCount());
+        Assert.assertEquals(
+            1000,
+            alignedChunkMetadataList
+                .get(1)
+                .getValueChunkMetadataList()
+                .get(2)
+                .getStatistics()
+                .getCount());
+      }
+    }
+  }
+
+  @Test
   public void testGetTableDeviceMethods() throws Exception {
     String filePath = TsFileGeneratorForTest.getTestTsFilePath("root.testsg", 0, 0, 0);
     try {
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/read/filter/ExtractTimeFilterTest.java b/java/tsfile/src/test/java/org/apache/tsfile/read/filter/ExtractTimeFilterTest.java
new file mode 100644
index 0000000..aff2001
--- /dev/null
+++ b/java/tsfile/src/test/java/org/apache/tsfile/read/filter/ExtractTimeFilterTest.java
@@ -0,0 +1,436 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.tsfile.read.filter;
+
+import org.apache.tsfile.read.common.TimeRange;
+import org.apache.tsfile.read.filter.basic.Filter;
+import org.apache.tsfile.read.filter.factory.TimeFilterApi;
+import org.apache.tsfile.read.filter.operator.ExtractTimeFilterOperators.Field;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.time.ZoneId;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.concurrent.TimeUnit;
+
+public class ExtractTimeFilterTest {
+  // 2025/07/08 09:18:51 00:00:00+8:00
+  private final long testTime1 = 1751937531000L;
+  // 2025/07/08 10:18:51 00:00:00+8:00
+  private final long testTime2 = 1751941131000L;
+  private final ZoneId zoneId1 = ZoneId.of("+0000");
+  private final ZoneId zoneId2 = ZoneId.of("+0800");
+
+  private final long DAY_INTERVAL = TimeUnit.DAYS.toMillis(1);
+
+  @Test
+  public void testEq() {
+    // 1751936400000L -> 2025/07/08 09:00:00+8:00
+    // 1751940000000L -> 2025/07/08 10:00:00+8:00
+    Filter extractTimeEq1 =
+        TimeFilterApi.extractTimeEq(1, Field.HOUR, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertFalse(extractTimeEq1.satisfy(testTime2, 100));
+    Assert.assertTrue(extractTimeEq1.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(extractTimeEq1.satisfyStartEndTime(1751936400000L, 2751936400000L));
+    Assert.assertTrue(extractTimeEq1.satisfyStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertFalse(extractTimeEq1.satisfyStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+
+    Assert.assertTrue(extractTimeEq1.containStartEndTime(1751936400000L, 1751940000000L - 1));
+    Assert.assertTrue(extractTimeEq1.containStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertFalse(extractTimeEq1.containStartEndTime(1751936400000L - 1, 1751940000000L));
+    Assert.assertFalse(extractTimeEq1.containStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertFalse(extractTimeEq1.containStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+    Assert.assertFalse(extractTimeEq1.containStartEndTime(1751936400000L, 2751936400000L));
+
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    Filter extractTimeEq2 =
+        TimeFilterApi.extractTimeEq(9, Field.HOUR, zoneId2, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq2.satisfy(testTime1, 100));
+    Assert.assertFalse(extractTimeEq2.satisfy(testTime2, 100));
+    Assert.assertTrue(extractTimeEq2.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(extractTimeEq2.satisfyStartEndTime(1751936400000L, 2751936400000L));
+    Assert.assertTrue(extractTimeEq2.satisfyStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertFalse(extractTimeEq2.satisfyStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+
+    Assert.assertTrue(extractTimeEq2.containStartEndTime(1751936400000L, 1751940000000L - 1));
+    Assert.assertTrue(extractTimeEq2.containStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertFalse(extractTimeEq2.containStartEndTime(1751936400000L - 1, 1751940000000L));
+    Assert.assertFalse(extractTimeEq2.containStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertFalse(extractTimeEq2.containStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+    Assert.assertFalse(extractTimeEq2.containStartEndTime(1751936400000L, 2751936400000L));
+
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq2.getTimeRanges());
+
+    // test other extracted results
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(2025, Field.YEAR, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    // 1735689600000L -> 2025/01/01 00:00:00+00:00
+    // 1767225600000L -> 2026/01/01 00:00:00+00:00
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(1735689600000L, 1767225600000L - 1)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(3, Field.QUARTER, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(7, Field.MONTH, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(27, Field.WEEK, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(8, Field.DAY, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 =
+        TimeFilterApi.extractTimeEq(8, Field.DAY_OF_MONTH, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 =
+        TimeFilterApi.extractTimeEq(2, Field.DAY_OF_WEEK, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 =
+        TimeFilterApi.extractTimeEq(189, Field.DAY_OF_YEAR, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(18, Field.MINUTE, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(51, Field.SECOND, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(0, Field.MS, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(0, Field.US, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(0, Field.NS, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(testTime1, 100));
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, Long.MAX_VALUE)),
+        extractTimeEq1.getTimeRanges());
+
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(0, Field.MS, zoneId1, TimeUnit.MICROSECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(1751937531000025L, 100));
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(25, Field.US, zoneId1, TimeUnit.MICROSECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(1751937531000025L, 100));
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(0, Field.NS, zoneId1, TimeUnit.MICROSECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(1751937531000025L, 100));
+
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(0, Field.MS, zoneId1, TimeUnit.NANOSECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(1751937531000025026L, 100));
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(25, Field.US, zoneId1, TimeUnit.NANOSECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(1751937531000025026L, 100));
+    extractTimeEq1 = TimeFilterApi.extractTimeEq(26, Field.NS, zoneId1, TimeUnit.NANOSECONDS);
+    Assert.assertTrue(extractTimeEq1.satisfy(1751937531000025026L, 100));
+  }
+
+  @Test
+  public void testNotEq() {
+    // 1751936400000L -> 2025/07/08 09:00:00+8:00
+    // 1751940000000L -> 2025/07/08 10:00:00+8:00
+    Filter filter1 = TimeFilterApi.extractTimeNotEq(1, Field.HOUR, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertFalse(filter1.satisfy(testTime1, 100));
+    Assert.assertTrue(filter1.satisfy(testTime2, 100));
+    Assert.assertFalse(filter1.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertFalse(filter1.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.satisfyStartEndTime(1751936400000L, 2751936400000L));
+    Assert.assertTrue(filter1.satisfyStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.satisfyStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L, 1751940000000L - 1));
+    Assert.assertFalse(filter1.containStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L - 1, 1751940000000L));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.containStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L, 2751936400000L));
+
+    // attention: actual contains, but the method returns false
+    Assert.assertFalse(
+        filter1.containStartEndTime(1751940000000L, 1751936400000L + DAY_INTERVAL - 1));
+
+    // 1735689600000L -> 2025/01/01 00:00:00+00:00
+    // 1767225600000L -> 2026/01/01 00:00:00+00:00
+    filter1 = TimeFilterApi.extractTimeNotEq(2025, Field.YEAR, zoneId1, TimeUnit.MICROSECONDS);
+    Assert.assertEquals(
+        Arrays.asList(
+            new TimeRange(Long.MIN_VALUE, 1735689600000_000L - 1),
+            new TimeRange(1767225600000_000L, Long.MAX_VALUE)),
+        filter1.getTimeRanges());
+
+    Filter filter2 = TimeFilterApi.extractTimeNotEq(9, Field.HOUR, zoneId2, TimeUnit.MILLISECONDS);
+    Assert.assertFalse(filter2.satisfy(testTime1, 100));
+    Assert.assertTrue(filter2.satisfy(testTime2, 100));
+    Assert.assertFalse(filter2.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.satisfyStartEndTime(1751936400000L, 2751936400000L));
+    Assert.assertTrue(filter2.satisfyStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.satisfyStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L, 1751940000000L - 1));
+    Assert.assertFalse(filter2.containStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L - 1, 1751940000000L));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.containStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L, 2751936400000L));
+
+    // attention: actual contains, but the method returns false
+    Assert.assertFalse(
+        filter2.containStartEndTime(1751940000000L, 1751936400000L + DAY_INTERVAL - 1));
+
+    // 1735660800000L -> 2025/01/01 00:00:00+08:00
+    // 1767196800000L -> 2026/01/01 00:00:00+08:00
+    filter2 = TimeFilterApi.extractTimeNotEq(2025, Field.YEAR, zoneId2, TimeUnit.MICROSECONDS);
+    Assert.assertEquals(
+        Arrays.asList(
+            new TimeRange(Long.MIN_VALUE, 1735660800000_000L - 1),
+            new TimeRange(1767196800000_000L, Long.MAX_VALUE)),
+        filter2.getTimeRanges());
+  }
+
+  @Test
+  public void testGt() {
+    // 1751936400000L -> 2025/07/08 09:00:00+8:00
+    // 1751940000000L -> 2025/07/08 10:00:00+8:00
+    Filter filter1 = TimeFilterApi.extractTimeGt(5, Field.HOUR, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertFalse(filter1.satisfy(testTime1, 100));
+    Assert.assertFalse(filter1.satisfy(testTime2, 100));
+    Assert.assertFalse(filter1.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.satisfyStartEndTime(1751936400000L, 2751936400000L));
+    Assert.assertFalse(filter1.satisfyStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertFalse(filter1.satisfyStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L, 1751940000000L - 1));
+    Assert.assertFalse(filter1.containStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L - 1, 1751940000000L));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L, 2751936400000L));
+
+    // 1735689600000L -> 2025/01/01 00:00:00+00:00
+    // 1767225600000L -> 2026/01/01 00:00:00+00:00
+    filter1 = TimeFilterApi.extractTimeGt(2025, Field.YEAR, zoneId1, TimeUnit.MICROSECONDS);
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(1767225600000_000L, Long.MAX_VALUE)),
+        filter1.getTimeRanges());
+
+    Filter filter2 = TimeFilterApi.extractTimeGt(5, Field.HOUR, zoneId2, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(filter2.satisfy(testTime1, 100));
+    Assert.assertTrue(filter2.satisfy(testTime2, 100));
+    Assert.assertTrue(filter2.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.satisfyStartEndTime(1751936400000L, 2751936400000L));
+    Assert.assertTrue(filter2.satisfyStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.satisfyStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+
+    Assert.assertTrue(filter2.containStartEndTime(1751936400000L, 1751940000000L - 1));
+    Assert.assertTrue(filter2.containStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.containStartEndTime(1751936400000L - 1, 1751940000000L));
+    Assert.assertTrue(filter2.containStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.containStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L, 2751936400000L));
+
+    Filter filter3 = TimeFilterApi.extractTimeGt(9, Field.HOUR, zoneId2, TimeUnit.MILLISECONDS);
+    Assert.assertFalse(filter3.satisfy(testTime1, 100));
+    Assert.assertTrue(filter3.satisfy(testTime2, 100));
+    Assert.assertTrue(filter3.satisfyStartEndTime(testTime1, testTime2));
+    Assert.assertFalse(filter3.containStartEndTime(testTime1, testTime2));
+  }
+
+  @Test
+  public void testGtEq() {
+    Filter filter1 = TimeFilterApi.extractTimeGtEq(5, Field.HOUR, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertFalse(filter1.satisfy(testTime1, 100));
+    Assert.assertFalse(filter1.satisfy(testTime2, 100));
+    Assert.assertFalse(filter1.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.satisfyStartEndTime(1751936400000L, 2751936400000L));
+    Assert.assertFalse(filter1.satisfyStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertFalse(filter1.satisfyStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L, 1751940000000L - 1));
+    Assert.assertFalse(filter1.containStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L - 1, 1751940000000L));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L, 2751936400000L));
+
+    // 1735689600000L -> 2025/01/01 00:00:00+00:00
+    // 1767225600000L -> 2026/01/01 00:00:00+00:00
+    filter1 = TimeFilterApi.extractTimeGtEq(2025, Field.YEAR, zoneId1, TimeUnit.NANOSECONDS);
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(1735689600000_000_000L, Long.MAX_VALUE)),
+        filter1.getTimeRanges());
+
+    Filter filter2 = TimeFilterApi.extractTimeGtEq(5, Field.HOUR, zoneId2, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(filter2.satisfy(testTime1, 100));
+    Assert.assertTrue(filter2.satisfy(testTime2, 100));
+    Assert.assertTrue(filter2.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.satisfyStartEndTime(1751936400000L, 2751936400000L));
+    Assert.assertTrue(filter2.satisfyStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.satisfyStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+
+    Assert.assertTrue(filter2.containStartEndTime(1751936400000L, 1751940000000L - 1));
+    Assert.assertTrue(filter2.containStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.containStartEndTime(1751936400000L - 1, 1751940000000L));
+    Assert.assertTrue(filter2.containStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.containStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L, 2751936400000L));
+
+    Filter filter3 = TimeFilterApi.extractTimeGtEq(9, Field.HOUR, zoneId2, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(filter3.satisfy(testTime1, 100));
+    Assert.assertTrue(filter3.satisfy(testTime2, 100));
+    Assert.assertTrue(filter3.satisfyStartEndTime(testTime1, testTime2));
+    Assert.assertTrue(filter3.containStartEndTime(testTime1, testTime2));
+  }
+
+  @Test
+  public void testLt() {
+    Filter filter1 = TimeFilterApi.extractTimeLt(5, Field.HOUR, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(filter1.satisfy(testTime1, 100));
+    Assert.assertTrue(filter1.satisfy(testTime2, 100));
+    Assert.assertTrue(filter1.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.satisfyStartEndTime(1751936400000L, 2751936400000L));
+    Assert.assertTrue(filter1.satisfyStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.satisfyStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+
+    Assert.assertTrue(filter1.containStartEndTime(1751936400000L, 1751940000000L - 1));
+    Assert.assertTrue(filter1.containStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.containStartEndTime(1751936400000L - 1, 1751940000000L));
+    Assert.assertTrue(filter1.containStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.containStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L, 2751936400000L));
+
+    // 1735689600000L -> 2025/01/01 00:00:00+00:00
+    // 1767225600000L -> 2026/01/01 00:00:00+00:00
+    filter1 = TimeFilterApi.extractTimeLt(2025, Field.YEAR, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, 1735689600000L - 1)),
+        filter1.getTimeRanges());
+
+    Filter filter2 = TimeFilterApi.extractTimeLt(5, Field.HOUR, zoneId2, TimeUnit.MILLISECONDS);
+    Assert.assertFalse(filter2.satisfy(testTime1, 100));
+    Assert.assertFalse(filter2.satisfy(testTime2, 100));
+    Assert.assertFalse(filter2.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.satisfyStartEndTime(1751936400000L, 2751936400000L));
+    Assert.assertFalse(filter2.satisfyStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertFalse(filter2.satisfyStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L, 1751940000000L - 1));
+    Assert.assertFalse(filter2.containStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L - 1, 1751940000000L));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L, 2751936400000L));
+
+    Filter filter3 = TimeFilterApi.extractTimeGtEq(9, Field.HOUR, zoneId2, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(filter3.satisfy(testTime1, 100));
+    Assert.assertTrue(filter3.satisfy(testTime2, 100));
+    Assert.assertTrue(filter3.satisfyStartEndTime(testTime1, testTime2));
+    Assert.assertTrue(filter3.containStartEndTime(testTime1, testTime2));
+  }
+
+  @Test
+  public void testLtEq() {
+    // 1751936400000L -> 2025/07/08 09:00:00+8:00
+    // 1751940000000L -> 2025/07/08 10:00:00+8:00
+    Filter filter1 = TimeFilterApi.extractTimeLtEq(5, Field.HOUR, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertTrue(filter1.satisfy(testTime1, 100));
+    Assert.assertTrue(filter1.satisfy(testTime2, 100));
+    Assert.assertTrue(filter1.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.satisfyStartEndTime(1751936400000L, 2751936400000L));
+    Assert.assertTrue(filter1.satisfyStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.satisfyStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+
+    Assert.assertTrue(filter1.containStartEndTime(1751936400000L, 1751940000000L - 1));
+    Assert.assertTrue(filter1.containStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.containStartEndTime(1751936400000L - 1, 1751940000000L));
+    Assert.assertTrue(filter1.containStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertTrue(filter1.containStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+    Assert.assertFalse(filter1.containStartEndTime(1751936400000L, 2751936400000L));
+
+    // 1735689600000L -> 2025/01/01 00:00:00+00:00
+    // 1767225600000L -> 2026/01/01 00:00:00+00:00
+    filter1 = TimeFilterApi.extractTimeLtEq(2025, Field.YEAR, zoneId1, TimeUnit.MILLISECONDS);
+    Assert.assertEquals(
+        Collections.singletonList(new TimeRange(Long.MIN_VALUE, 1767225600000L - 1)),
+        filter1.getTimeRanges());
+
+    Filter filter2 = TimeFilterApi.extractTimeLt(5, Field.HOUR, zoneId2, TimeUnit.MILLISECONDS);
+    Assert.assertFalse(filter2.satisfy(testTime1, 100));
+    Assert.assertFalse(filter2.satisfy(testTime2, 100));
+    Assert.assertFalse(filter2.satisfyStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertTrue(filter2.satisfyStartEndTime(1751936400000L, 2751936400000L));
+    Assert.assertFalse(filter2.satisfyStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertFalse(filter2.satisfyStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L, 1751940000000L - 1));
+    Assert.assertFalse(filter2.containStartEndTime(testTime1 - 1, testTime1 + 1));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L - 1, 1751940000000L));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L - 1, testTime1 + 1));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L - 2, 1751936400000L - 1));
+    Assert.assertFalse(filter2.containStartEndTime(1751936400000L, 2751936400000L));
+
+    Filter filter3 = TimeFilterApi.extractTimeGt(9, Field.HOUR, zoneId2, TimeUnit.MILLISECONDS);
+    Assert.assertFalse(filter3.satisfy(testTime1, 100));
+    Assert.assertTrue(filter3.satisfy(testTime2, 100));
+    Assert.assertTrue(filter3.satisfyStartEndTime(testTime1, testTime2));
+    Assert.assertFalse(filter3.containStartEndTime(testTime1, testTime2));
+  }
+}
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/read/filter/ExtractValueFilterTest.java b/java/tsfile/src/test/java/org/apache/tsfile/read/filter/ExtractValueFilterTest.java
new file mode 100644
index 0000000..63cb0b0
--- /dev/null
+++ b/java/tsfile/src/test/java/org/apache/tsfile/read/filter/ExtractValueFilterTest.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.tsfile.read.filter;
+
+import org.apache.tsfile.file.metadata.IMetadata;
+import org.apache.tsfile.file.metadata.statistics.LongStatistics;
+import org.apache.tsfile.read.filter.basic.ValueFilter;
+import org.apache.tsfile.read.filter.factory.ValueFilterApi;
+import org.apache.tsfile.read.filter.operator.ExtractTimeFilterOperators;
+import org.apache.tsfile.read.filter.operator.ExtractValueFilterOperators;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.time.ZoneId;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.tsfile.read.filter.FilterTestUtil.newMetadata;
+
+public class ExtractValueFilterTest {
+  private final ZoneId zoneId1 = ZoneId.of("+0000");
+  private final ZoneId zoneId2 = ZoneId.of("+0800");
+
+  private final LongStatistics statistics = new LongStatistics();
+  private final IMetadata metadata = newMetadata(statistics);
+
+  // Test delegate logic is right
+  @Test
+  public void test() {
+    statistics.setEmpty(false);
+    // 2025/07/08 09:18:51 00:00:00+8:00
+    long testTime1 = 1751937531000L;
+    // 2025/07/08 10:18:51 00:00:00+8:00
+    long testTime2 = 1751941131000L;
+
+    // 1751936400000L -> 2025/07/08 09:00:00+8:00
+    // 1751940000000L -> 2025/07/08 10:00:00+8:00
+    ValueFilter extractValueEq1 =
+        new ExtractValueFilterOperators.ExtractValueEq(
+            ValueFilterApi.DEFAULT_MEASUREMENT_INDEX,
+            1,
+            ExtractTimeFilterOperators.Field.HOUR,
+            zoneId1,
+            TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractValueEq1.satisfy(testTime1, testTime1));
+    Assert.assertFalse(extractValueEq1.satisfy(testTime2, testTime2));
+    statistics.initializeStats(testTime1 - 1, testTime1 + 1, 0, 0, 0);
+    Assert.assertFalse(extractValueEq1.canSkip(metadata));
+    statistics.initializeStats(1751936400000L, 2751936400000L, 0, 0, 0);
+    Assert.assertFalse(extractValueEq1.canSkip(metadata));
+    statistics.initializeStats(1751936400000L - 1, testTime1 + 1, 0, 0, 0);
+    Assert.assertFalse(extractValueEq1.canSkip(metadata));
+    statistics.initializeStats(1751936400000L - 2, 1751936400000L - 1, 0, 0, 0);
+    Assert.assertTrue(extractValueEq1.canSkip(metadata));
+
+    statistics.initializeStats(1751936400000L, 1751940000000L - 1, 0, 0, 0);
+    Assert.assertTrue(extractValueEq1.allSatisfy(metadata));
+    statistics.initializeStats(testTime1 - 1, testTime1 + 1, 0, 0, 0);
+    Assert.assertTrue(extractValueEq1.allSatisfy(metadata));
+    statistics.initializeStats(1751936400000L - 1, 1751940000000L, 0, 0, 0);
+    Assert.assertFalse(extractValueEq1.allSatisfy(metadata));
+    statistics.initializeStats(1751936400000L - 1, testTime1 + 1, 0, 0, 0);
+    Assert.assertFalse(extractValueEq1.allSatisfy(metadata));
+    statistics.initializeStats(1751936400000L - 2, 1751936400000L - 1, 0, 0, 0);
+    Assert.assertFalse(extractValueEq1.allSatisfy(metadata));
+    statistics.initializeStats(1751936400000L, 2751936400000L, 0, 0, 0);
+    Assert.assertFalse(extractValueEq1.allSatisfy(metadata));
+
+    ValueFilter extractValueEq2 =
+        new ExtractValueFilterOperators.ExtractValueEq(
+            ValueFilterApi.DEFAULT_MEASUREMENT_INDEX,
+            9,
+            ExtractTimeFilterOperators.Field.HOUR,
+            zoneId2,
+            TimeUnit.MILLISECONDS);
+    Assert.assertTrue(extractValueEq2.satisfy(testTime1, testTime1));
+    Assert.assertFalse(extractValueEq2.satisfy(testTime2, testTime2));
+    statistics.initializeStats(testTime1 - 1, testTime1 + 1, 0, 0, 0);
+    Assert.assertFalse(extractValueEq2.canSkip(metadata));
+    statistics.initializeStats(1751936400000L, 2751936400000L, 0, 0, 0);
+    Assert.assertFalse(extractValueEq2.canSkip(metadata));
+    statistics.initializeStats(1751936400000L - 1, testTime1 + 1, 0, 0, 0);
+    Assert.assertFalse(extractValueEq2.canSkip(metadata));
+    statistics.initializeStats(1751936400000L - 2, 1751936400000L - 1, 0, 0, 0);
+    Assert.assertTrue(extractValueEq2.canSkip(metadata));
+
+    statistics.initializeStats(1751936400000L, 1751940000000L - 1, 0, 0, 0);
+    Assert.assertTrue(extractValueEq2.allSatisfy(metadata));
+    statistics.initializeStats(testTime1 - 1, testTime1 + 1, 0, 0, 0);
+    Assert.assertTrue(extractValueEq2.allSatisfy(metadata));
+    statistics.initializeStats(1751936400000L - 1, 1751940000000L, 0, 0, 0);
+    Assert.assertFalse(extractValueEq2.allSatisfy(metadata));
+    statistics.initializeStats(1751936400000L - 1, testTime1 + 1, 0, 0, 0);
+    Assert.assertFalse(extractValueEq2.allSatisfy(metadata));
+    statistics.initializeStats(1751936400000L - 2, 1751936400000L - 1, 0, 0, 0);
+    Assert.assertFalse(extractValueEq2.allSatisfy(metadata));
+    statistics.initializeStats(1751936400000L, 2751936400000L, 0, 0, 0);
+    Assert.assertFalse(extractValueEq2.allSatisfy(metadata));
+  }
+}
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/tableview/TableViewTest.java b/java/tsfile/src/test/java/org/apache/tsfile/tableview/TableViewTest.java
index 361cd3f..17552af 100644
--- a/java/tsfile/src/test/java/org/apache/tsfile/tableview/TableViewTest.java
+++ b/java/tsfile/src/test/java/org/apache/tsfile/tableview/TableViewTest.java
@@ -35,11 +35,15 @@
 import org.apache.tsfile.read.controller.MetadataQuerierByFileImpl;
 import org.apache.tsfile.read.expression.QueryExpression;
 import org.apache.tsfile.read.query.dataset.QueryDataSet;
+import org.apache.tsfile.read.query.dataset.ResultSet;
+import org.apache.tsfile.read.query.dataset.ResultSetMetadata;
 import org.apache.tsfile.read.query.executor.QueryExecutor;
 import org.apache.tsfile.read.query.executor.TableQueryExecutor;
 import org.apache.tsfile.read.query.executor.TableQueryExecutor.TableQueryOrdering;
 import org.apache.tsfile.read.query.executor.TsFileExecutor;
 import org.apache.tsfile.read.reader.block.TsBlockReader;
+import org.apache.tsfile.read.v4.ITsFileReader;
+import org.apache.tsfile.read.v4.TsFileReaderBuilder;
 import org.apache.tsfile.utils.Binary;
 import org.apache.tsfile.utils.TsFileSketchTool;
 import org.apache.tsfile.write.TsFileWriter;
@@ -154,6 +158,29 @@
   }
 
   @Test
+  public void testReadCaseSensitivity() throws Exception {
+    final File testFile = new File(testDir, "testFile");
+    writeTsFile(testTableSchema, testFile);
+
+    ArrayList<String> columns = new ArrayList<>(Arrays.asList("ID1", "ID2", "S1", "S2"));
+    try (ITsFileReader reader = new TsFileReaderBuilder().file(testFile).build();
+        ResultSet resultSet = reader.query(testTableSchema.getTableName(), columns, 2, 8)) {
+      // first column is Time
+      ResultSetMetadata metadata = resultSet.getMetadata();
+      for (int column = 2; column <= 5; column++) {
+        assertEquals(metadata.getColumnName(column), columns.get(column - 2));
+      }
+      while (resultSet.next()) {
+        Long timeField = resultSet.getLong("Time");
+        assertFalse(resultSet.isNull("ID1"));
+        assertFalse(resultSet.isNull("id2"));
+        assertFalse(resultSet.isNull("s1"));
+        assertFalse(resultSet.isNull("S2"));
+      }
+    }
+  }
+
+  @Test
   public void testDeviceIdWithNull() throws Exception {
     final File testFile = new File(testDir, "testFile");
     TableSchema tableSchema;
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/utils/BitMapTest.java b/java/tsfile/src/test/java/org/apache/tsfile/utils/BitMapTest.java
index 3aca8e6..2b6e10c 100644
--- a/java/tsfile/src/test/java/org/apache/tsfile/utils/BitMapTest.java
+++ b/java/tsfile/src/test/java/org/apache/tsfile/utils/BitMapTest.java
@@ -20,6 +20,9 @@
 
 import org.junit.Test;
 
+import java.util.Arrays;
+import java.util.Random;
+
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -104,4 +107,110 @@
 
     assertEquals((byte) 0b00001000, truncatedArray[0]);
   }
+
+  @Test
+  public void exhaustiveMergeTest() {
+    int maxLen = 96;
+    int maxSize = 128;
+    for (int i = 1; i <= maxLen; i++) {
+      for (int j = i; j <= maxSize; j++) {
+        for (int k = 0; k <= j - i; k++) {
+          for (int m = 0; m <= maxSize - i; m++) {
+            runOneCase(j, k, maxSize, m, i);
+          }
+        }
+      }
+    }
+  }
+
+  private static void runOneCase(int srcSize, int srcStart, int destSize, int destStart, int len) {
+    Random r = new Random();
+    BitMap src = new BitMap(srcSize);
+    BitMap dst = new BitMap(destSize);
+
+    for (int i = 0; i < src.getSize(); i++) {
+      if (r.nextBoolean()) {
+        src.mark(i);
+      }
+    }
+
+    for (int i = 0; i < dst.getSize(); i++) {
+      if (r.nextBoolean()) {
+        dst.mark(i);
+      }
+    }
+
+    BitMap copy =
+        new BitMap(src.getSize(), Arrays.copyOf(dst.getByteArray(), dst.getByteArray().length));
+
+    for (int i = 0; i < len; i++) {
+      if (src.isMarked(srcStart + i)) {
+        copy.mark(destStart + i);
+      }
+    }
+
+    dst.merge(src, srcStart, destStart, len);
+    assertArrayEquals(copy.getByteArray(), dst.getByteArray());
+  }
+
+  @Test
+  public void emptyRange() {
+    BitMap map = new BitMap(1);
+    map.markRange(0, 0);
+    assertEquals((byte) 0x00, map.getByteArray()[0]);
+  }
+
+  @Test
+  public void singleByteAllBits() {
+    for (int i = 0; i < 8; i++) {
+      for (int j = 0; j <= 8 - i; j++) {
+        doTest(8, i, j);
+      }
+    }
+  }
+
+  @Test
+  public void twoBytesHeadTail() {
+    for (int i = 0; i < 64; i++) {
+      for (int j = 0; j <= 64 - i; j += 8) {
+        doTest(64, i, j);
+      }
+    }
+  }
+
+  @Test
+  public void twoBytesPartialHead() {
+    for (int i = 0; i < 64; i += 8) {
+      for (int j = 0; j <= 64 - i; j += 8) {
+        doTest(64, i, j);
+      }
+    }
+  }
+
+  @Test
+  public void twoBytesPartialTail() {
+    int size = 64;
+    for (int i = 0; i < size; i += 8) {
+      for (int j = 1; j <= size - i; j++) {
+        doTest(size, i, j);
+      }
+    }
+  }
+
+  private void doTest(int size, int start, int length) {
+    BitMap map = new BitMap(size);
+    BitMap bitMap = new BitMap(size);
+    map.markRange(start, length);
+    for (int i = start; i < start + length; i++) {
+      bitMap.mark(i);
+    }
+    assertArrayEquals(bitMap.getByteArray(), map.getByteArray());
+
+    map.unmarkRange(start, length);
+    for (int i = start; i < start + length; i++) {
+      bitMap.unmark(i);
+    }
+    System.out.println(start + "        " + length);
+    assertArrayEquals(bitMap.getByteArray(), map.getByteArray());
+  }
 }
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/utils/TsFileUtilsTest.java b/java/tsfile/src/test/java/org/apache/tsfile/utils/TsFileUtilsTest.java
index caf5401..b6e79fc 100644
--- a/java/tsfile/src/test/java/org/apache/tsfile/utils/TsFileUtilsTest.java
+++ b/java/tsfile/src/test/java/org/apache/tsfile/utils/TsFileUtilsTest.java
@@ -20,6 +20,7 @@
 
 import org.apache.tsfile.common.conf.TSFileConfig;
 import org.apache.tsfile.constant.TestConstant;
+import org.apache.tsfile.read.TsFileSequenceReader;
 import org.apache.tsfile.write.writer.LocalTsFileOutput;
 import org.apache.tsfile.write.writer.TsFileIOWriter;
 
@@ -67,6 +68,9 @@
   @Test
   public void isTsFileCompleteTest() throws IOException {
     Assert.assertTrue(TsFileUtils.isTsFileComplete(new File(COMPLETE_FILE_PATH)));
-    Assert.assertFalse(TsFileUtils.isTsFileComplete(new File(INCOMPLETE_FILE_PATH)));
+    try (TsFileSequenceReader reader = new TsFileSequenceReader(INCOMPLETE_FILE_PATH)) {
+      Assert.fail();
+    } catch (Exception ignored) {
+    }
   }
 }
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/utils/TypeCastTest.java b/java/tsfile/src/test/java/org/apache/tsfile/utils/TypeCastTest.java
index 10d26db..4d1d000 100644
--- a/java/tsfile/src/test/java/org/apache/tsfile/utils/TypeCastTest.java
+++ b/java/tsfile/src/test/java/org/apache/tsfile/utils/TypeCastTest.java
@@ -24,6 +24,7 @@
 import org.junit.Test;
 
 import java.nio.charset.StandardCharsets;
+import java.time.LocalDate;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
@@ -46,7 +47,21 @@
       for (TSDataType to : dataTypes) {
         Object src = genValue(from);
         if (to.isCompatible(from)) {
-          assertEquals(genValue(to), to.castFromSingleValue(from, src));
+          if (to == TSDataType.STRING || to == TSDataType.TEXT) {
+            if (from == TSDataType.DATE) {
+              assertEquals(
+                  new Binary(LocalDate.ofEpochDay((int) src).toString(), StandardCharsets.UTF_8),
+                  new Binary(
+                      LocalDate.ofEpochDay(Long.parseLong(genValue(to).toString())).toString(),
+                      StandardCharsets.UTF_8));
+            } else {
+              assertEquals(
+                  new Binary(src.toString(), StandardCharsets.UTF_8),
+                  to.castFromSingleValue(from, src));
+            }
+          } else {
+            assertEquals(genValue(to), to.castFromSingleValue(from, src));
+          }
         } else {
           assertThrows(ClassCastException.class, () -> to.castFromSingleValue(from, src));
         }
@@ -66,7 +81,7 @@
         Object array = genValueArray(from);
         if (!to.isCompatible(from)) {
           assertThrows(ClassCastException.class, () -> to.castFromArray(from, array));
-          return;
+          continue;
         }
         switch (to) {
           case INT32:
@@ -84,8 +99,66 @@
           case STRING:
           case BLOB:
           case TEXT:
-            assertArrayEquals(
-                (Binary[]) genValueArray(to), (Binary[]) to.castFromArray(from, array));
+            switch (from) {
+              case BLOB:
+              case STRING:
+                assertArrayEquals((Binary[]) array, (Binary[]) to.castFromArray(from, array));
+                break;
+              case INT32:
+                int[] tmpInt = (int[]) array;
+                Binary[] intResult = new Binary[tmpInt.length];
+                for (int i = 0; i < tmpInt.length; i++) {
+                  intResult[i] = new Binary(String.valueOf(tmpInt[i]), StandardCharsets.UTF_8);
+                }
+                assertArrayEquals(intResult, (Binary[]) to.castFromArray(from, array));
+                break;
+              case DATE:
+                int[] tmpDate = (int[]) array;
+                Binary[] dateResult = new Binary[tmpDate.length];
+                for (int i = 0; i < tmpDate.length; i++) {
+                  dateResult[i] =
+                      new Binary(TSDataType.getDateStringValue(tmpDate[i]), StandardCharsets.UTF_8);
+                }
+                assertArrayEquals(dateResult, (Binary[]) to.castFromArray(from, array));
+                break;
+              case INT64:
+              case TIMESTAMP:
+                long[] tmpLong = (long[]) array;
+                Binary[] longResult = new Binary[tmpLong.length];
+                for (int i = 0; i < tmpLong.length; i++) {
+                  longResult[i] = new Binary(String.valueOf(tmpLong[i]), StandardCharsets.UTF_8);
+                }
+                assertArrayEquals(longResult, (Binary[]) to.castFromArray(from, array));
+                break;
+              case FLOAT:
+                float[] tmpFloat = (float[]) array;
+                Binary[] floatResult = new Binary[tmpFloat.length];
+                for (int i = 0; i < tmpFloat.length; i++) {
+                  floatResult[i] = new Binary(String.valueOf(tmpFloat[i]), StandardCharsets.UTF_8);
+                }
+                assertArrayEquals(floatResult, (Binary[]) to.castFromArray(from, array));
+                break;
+              case DOUBLE:
+                double[] tmpDouble = (double[]) array;
+                Binary[] doubleResult = new Binary[tmpDouble.length];
+                for (int i = 0; i < tmpDouble.length; i++) {
+                  doubleResult[i] =
+                      new Binary(String.valueOf(tmpDouble[i]), StandardCharsets.UTF_8);
+                }
+                assertArrayEquals(doubleResult, (Binary[]) to.castFromArray(from, array));
+                break;
+              case BOOLEAN:
+                boolean[] tmpBoolean = (boolean[]) array;
+                Binary[] booleanResult = new Binary[tmpBoolean.length];
+                for (int i = 0; i < tmpBoolean.length; i++) {
+                  booleanResult[i] =
+                      new Binary(String.valueOf(tmpBoolean[i]), StandardCharsets.UTF_8);
+                }
+                assertArrayEquals(booleanResult, (Binary[]) to.castFromArray(from, array));
+                break;
+              default:
+                break;
+            }
             break;
           case FLOAT:
             assertArrayEquals(
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/write/ChunkRewriteTest.java b/java/tsfile/src/test/java/org/apache/tsfile/write/ChunkRewriteTest.java
index 90215cc..30e3605 100644
--- a/java/tsfile/src/test/java/org/apache/tsfile/write/ChunkRewriteTest.java
+++ b/java/tsfile/src/test/java/org/apache/tsfile/write/ChunkRewriteTest.java
@@ -385,7 +385,7 @@
             measurementSchema.getMeasurementName(),
             newChunkData.capacity(),
             TSDataType.VECTOR,
-            measurementSchema.getCompressor(),
+            measurementSchema.getTimeCompressor(),
             measurementSchema.getTimeTSEncoding(),
             timeChunkWriter.getNumOfPages());
     return new Chunk(newChunkHeader, newChunkData, null, timeChunkWriter.getStatistics());
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/write/TsFileIOWriterTest.java b/java/tsfile/src/test/java/org/apache/tsfile/write/TsFileIOWriterTest.java
index 1dd2eff..51a8b80 100644
--- a/java/tsfile/src/test/java/org/apache/tsfile/write/TsFileIOWriterTest.java
+++ b/java/tsfile/src/test/java/org/apache/tsfile/write/TsFileIOWriterTest.java
@@ -19,22 +19,32 @@
 package org.apache.tsfile.write;
 
 import org.apache.tsfile.common.conf.TSFileConfig;
+import org.apache.tsfile.common.conf.TSFileDescriptor;
 import org.apache.tsfile.common.constant.TsFileConstant;
 import org.apache.tsfile.constant.TestConstant;
+import org.apache.tsfile.enums.ColumnCategory;
 import org.apache.tsfile.enums.TSDataType;
+import org.apache.tsfile.exception.write.WriteProcessException;
 import org.apache.tsfile.file.MetaMarker;
 import org.apache.tsfile.file.header.ChunkGroupHeader;
 import org.apache.tsfile.file.header.ChunkHeader;
+import org.apache.tsfile.file.metadata.ChunkMetadata;
+import org.apache.tsfile.file.metadata.ColumnSchema;
 import org.apache.tsfile.file.metadata.IDeviceID;
+import org.apache.tsfile.file.metadata.IDeviceID.Factory;
 import org.apache.tsfile.file.metadata.MetadataIndexNode;
+import org.apache.tsfile.file.metadata.TableSchema;
 import org.apache.tsfile.file.metadata.TimeseriesMetadata;
 import org.apache.tsfile.file.metadata.TsFileMetadata;
+import org.apache.tsfile.file.metadata.enums.CompressionType;
 import org.apache.tsfile.file.metadata.enums.TSEncoding;
 import org.apache.tsfile.file.metadata.statistics.Statistics;
 import org.apache.tsfile.file.metadata.utils.TestHelper;
 import org.apache.tsfile.read.TsFileSequenceReader;
+import org.apache.tsfile.read.common.Chunk;
 import org.apache.tsfile.read.common.Path;
 import org.apache.tsfile.utils.MeasurementGroup;
+import org.apache.tsfile.write.record.Tablet;
 import org.apache.tsfile.write.schema.IMeasurementSchema;
 import org.apache.tsfile.write.schema.MeasurementSchema;
 import org.apache.tsfile.write.schema.Schema;
@@ -49,11 +59,14 @@
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import static org.junit.Assert.assertEquals;
+
 public class TsFileIOWriterTest {
 
   private static final String FILE_PATH =
@@ -100,13 +113,93 @@
   }
 
   @Test
+  public void changeTypeCompressionTest() throws IOException, WriteProcessException {
+    TSFileConfig config = TSFileDescriptor.getInstance().getConfig();
+    CompressionType prevInt32Compression = config.getCompressor(TSDataType.INT32);
+    CompressionType prevTextCompression = config.getCompressor(TSDataType.TEXT);
+    config.setInt32Compression("UNCOMPRESSED");
+    config.setTextCompression("GZIP");
+
+    try (TsFileIOWriter ioWriter =
+            new TsFileIOWriter(
+                new File(
+                    TestConstant.BASE_OUTPUT_PATH.concat("changeTypeCompressionTest.tsfile")));
+        TsFileWriter fileWriter = new TsFileWriter(ioWriter)) {
+      fileWriter.registerTimeseries(
+          Factory.DEFAULT_FACTORY.create("root.db1.d1"),
+          new MeasurementSchema("s1", TSDataType.INT32));
+      fileWriter.registerTimeseries(
+          Factory.DEFAULT_FACTORY.create("root.db1.d1"),
+          new MeasurementSchema("s2", TSDataType.TEXT));
+      TableSchema tableSchema =
+          new TableSchema(
+              "t1",
+              Arrays.asList(
+                  new ColumnSchema("s1", TSDataType.INT32, ColumnCategory.FIELD),
+                  new ColumnSchema("s2", TSDataType.TEXT, ColumnCategory.FIELD)));
+      fileWriter.registerTableSchema(tableSchema);
+
+      Tablet treeTablet =
+          new Tablet(
+              "root.db1.d1",
+              Arrays.asList(
+                  new MeasurementSchema("s1", TSDataType.INT32),
+                  new MeasurementSchema("s2", TSDataType.TEXT)));
+      treeTablet.addTimestamp(0, 0);
+      treeTablet.addValue(0, 0, 0);
+      treeTablet.addValue(0, 1, "0");
+      fileWriter.writeTree(treeTablet);
+
+      Tablet tableTablet =
+          new Tablet(
+              "t1",
+              Arrays.asList("s1", "s2"),
+              Arrays.asList(TSDataType.INT32, TSDataType.TEXT),
+              Arrays.asList(ColumnCategory.FIELD, ColumnCategory.FIELD));
+      tableTablet.addTimestamp(0, 0);
+      tableTablet.addValue(0, 0, 0);
+      tableTablet.addValue(0, 1, "0");
+      fileWriter.writeTable(tableTablet);
+      fileWriter.flush();
+
+      ChunkMetadata s1TreeChunkMeta =
+          ioWriter.getChunkGroupMetadataList().get(0).getChunkMetadataList().get(0);
+      ChunkMetadata s2TreeChunkMeta =
+          ioWriter.getChunkGroupMetadataList().get(0).getChunkMetadataList().get(1);
+      ChunkMetadata s1TableChunkMeta =
+          ioWriter.getChunkGroupMetadataList().get(1).getChunkMetadataList().get(1);
+      ChunkMetadata s2TableChunkMeta =
+          ioWriter.getChunkGroupMetadataList().get(1).getChunkMetadataList().get(2);
+
+      fileWriter.close();
+
+      try (TsFileSequenceReader sequenceReader =
+          new TsFileSequenceReader(
+              TestConstant.BASE_OUTPUT_PATH.concat("changeTypeCompressionTest.tsfile"))) {
+        Chunk chunk = sequenceReader.readMemChunk(s1TreeChunkMeta);
+        assertEquals(CompressionType.UNCOMPRESSED, chunk.getHeader().getCompressionType());
+        chunk = sequenceReader.readMemChunk(s2TreeChunkMeta);
+        assertEquals(CompressionType.GZIP, chunk.getHeader().getCompressionType());
+        chunk = sequenceReader.readMemChunk(s1TableChunkMeta);
+        assertEquals(CompressionType.UNCOMPRESSED, chunk.getHeader().getCompressionType());
+        chunk = sequenceReader.readMemChunk(s2TableChunkMeta);
+        assertEquals(CompressionType.GZIP, chunk.getHeader().getCompressionType());
+      }
+
+    } finally {
+      config.setInt32Compression(prevInt32Compression.name());
+      config.setTextCompression(prevTextCompression.name());
+    }
+  }
+
+  @Test
   public void endFileTest() throws IOException {
     TsFileSequenceReader reader = new TsFileSequenceReader(FILE_PATH);
 
     // magic_string
-    Assert.assertEquals(TSFileConfig.MAGIC_STRING, reader.readHeadMagic());
-    Assert.assertEquals(TSFileConfig.VERSION_NUMBER, reader.readVersionNumber());
-    Assert.assertEquals(TSFileConfig.MAGIC_STRING, reader.readTailMagic());
+    assertEquals(TSFileConfig.MAGIC_STRING, reader.readHeadMagic());
+    assertEquals(TSFileConfig.VERSION_NUMBER, reader.readVersionNumber());
+    assertEquals(TSFileConfig.MAGIC_STRING, reader.readTailMagic());
 
     reader.position(TSFileConfig.MAGIC_STRING.getBytes().length + 1);
 
@@ -114,39 +207,39 @@
     ChunkGroupHeader chunkGroupHeader;
     for (int i = 0; i < CHUNK_GROUP_NUM; i++) {
       // chunk group header
-      Assert.assertEquals(MetaMarker.CHUNK_GROUP_HEADER, reader.readMarker());
+      assertEquals(MetaMarker.CHUNK_GROUP_HEADER, reader.readMarker());
       chunkGroupHeader = reader.readChunkGroupHeader();
-      Assert.assertEquals(DEVICE_1, chunkGroupHeader.getDeviceID());
+      assertEquals(DEVICE_1, chunkGroupHeader.getDeviceID());
       // ordinary chunk header
-      Assert.assertEquals(MetaMarker.ONLY_ONE_PAGE_CHUNK_HEADER, reader.readMarker());
+      assertEquals(MetaMarker.ONLY_ONE_PAGE_CHUNK_HEADER, reader.readMarker());
       header = reader.readChunkHeader(MetaMarker.ONLY_ONE_PAGE_CHUNK_HEADER);
-      Assert.assertEquals(SENSOR_1, header.getMeasurementID());
+      assertEquals(SENSOR_1, header.getMeasurementID());
     }
 
     for (int i = 0; i < CHUNK_GROUP_NUM; i++) {
       // chunk group header
-      Assert.assertEquals(MetaMarker.CHUNK_GROUP_HEADER, reader.readMarker());
+      assertEquals(MetaMarker.CHUNK_GROUP_HEADER, reader.readMarker());
       chunkGroupHeader = reader.readChunkGroupHeader();
-      Assert.assertEquals(DEVICE_2, chunkGroupHeader.getDeviceID());
+      assertEquals(DEVICE_2, chunkGroupHeader.getDeviceID());
       // vector chunk header (time)
-      Assert.assertEquals(MetaMarker.ONLY_ONE_PAGE_TIME_CHUNK_HEADER, reader.readMarker());
+      assertEquals(MetaMarker.ONLY_ONE_PAGE_TIME_CHUNK_HEADER, reader.readMarker());
       header = reader.readChunkHeader(MetaMarker.ONLY_ONE_PAGE_CHUNK_HEADER);
-      Assert.assertEquals("", header.getMeasurementID());
+      assertEquals("", header.getMeasurementID());
       // vector chunk header (values)
-      Assert.assertEquals(MetaMarker.ONLY_ONE_PAGE_VALUE_CHUNK_HEADER, reader.readMarker());
+      assertEquals(MetaMarker.ONLY_ONE_PAGE_VALUE_CHUNK_HEADER, reader.readMarker());
       header = reader.readChunkHeader(MetaMarker.ONLY_ONE_PAGE_CHUNK_HEADER);
-      Assert.assertEquals("s1", header.getMeasurementID());
-      Assert.assertEquals(MetaMarker.ONLY_ONE_PAGE_VALUE_CHUNK_HEADER, reader.readMarker());
+      assertEquals("s1", header.getMeasurementID());
+      assertEquals(MetaMarker.ONLY_ONE_PAGE_VALUE_CHUNK_HEADER, reader.readMarker());
       header = reader.readChunkHeader(MetaMarker.ONLY_ONE_PAGE_CHUNK_HEADER);
-      Assert.assertEquals("s2", header.getMeasurementID());
+      assertEquals("s2", header.getMeasurementID());
     }
 
-    Assert.assertEquals(MetaMarker.OPERATION_INDEX_RANGE, reader.readMarker());
+    assertEquals(MetaMarker.OPERATION_INDEX_RANGE, reader.readMarker());
     reader.readPlanIndex();
-    Assert.assertEquals(100, reader.getMinPlanIndex());
-    Assert.assertEquals(10000, reader.getMaxPlanIndex());
+    assertEquals(100, reader.getMinPlanIndex());
+    assertEquals(10000, reader.getMaxPlanIndex());
 
-    Assert.assertEquals(MetaMarker.SEPARATOR, reader.readMarker());
+    assertEquals(MetaMarker.SEPARATOR, reader.readMarker());
 
     // make sure timeseriesMetadata is only
     Map<IDeviceID, List<TimeseriesMetadata>> deviceTimeseriesMetadataMap =
@@ -167,7 +260,7 @@
     for (MetadataIndexNode node : metaData.getTableMetadataIndexNodeMap().values()) {
       cnt += node.getChildren().size();
     }
-    Assert.assertEquals(2, cnt);
+    assertEquals(2, cnt);
   }
 
   private void writeChunkGroup(TsFileIOWriter writer, IMeasurementSchema measurementSchema)
@@ -200,7 +293,7 @@
       // vector chunk (time)
       writer.startFlushChunk(
           vectorMeasurementSchema.getMeasurementName(),
-          vectorMeasurementSchema.getCompressor(),
+          vectorMeasurementSchema.getTimeCompressor(),
           vectorMeasurementSchema.getType(),
           vectorMeasurementSchema.getTimeTSEncoding(),
           Statistics.getStatsByType(vectorMeasurementSchema.getType()),
@@ -216,7 +309,7 @@
         subStatistics.updateStats(0L, 0L);
         writer.startFlushChunk(
             vectorMeasurementSchema.getSubMeasurementsList().get(j),
-            vectorMeasurementSchema.getCompressor(),
+            vectorMeasurementSchema.getValueCompressor(j),
             vectorMeasurementSchema.getSubMeasurementsTSDataTypeList().get(j),
             vectorMeasurementSchema.getSubMeasurementsTSEncodingList().get(j),
             subStatistics,
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/write/TsFileReadWriteTest.java b/java/tsfile/src/test/java/org/apache/tsfile/write/TsFileReadWriteTest.java
index 7f74d16..015a8fb 100644
--- a/java/tsfile/src/test/java/org/apache/tsfile/write/TsFileReadWriteTest.java
+++ b/java/tsfile/src/test/java/org/apache/tsfile/write/TsFileReadWriteTest.java
@@ -42,6 +42,7 @@
 
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.io.File;
@@ -86,7 +87,6 @@
             TSEncoding.PLAIN,
             TSEncoding.RLE,
             TSEncoding.TS_2DIFF,
-            TSEncoding.REGULAR,
             TSEncoding.GORILLA,
             TSEncoding.ZIGZAG);
     for (TSEncoding encoding : encodings) {
@@ -102,12 +102,7 @@
   @Test
   public void longTest() throws IOException, WriteProcessException {
     List<TSEncoding> encodings =
-        Arrays.asList(
-            TSEncoding.PLAIN,
-            TSEncoding.RLE,
-            TSEncoding.TS_2DIFF,
-            TSEncoding.REGULAR,
-            TSEncoding.GORILLA);
+        Arrays.asList(TSEncoding.PLAIN, TSEncoding.RLE, TSEncoding.TS_2DIFF, TSEncoding.GORILLA);
     for (TSEncoding encoding : encodings) {
       longTest(encoding);
     }
@@ -151,7 +146,8 @@
             TSEncoding.RLE,
             TSEncoding.TS_2DIFF,
             TSEncoding.GORILLA_V1,
-            TSEncoding.GORILLA);
+            TSEncoding.GORILLA,
+            TSEncoding.CAMEL);
     for (TSEncoding encoding : encodings) {
       doubleTest(encoding);
     }
@@ -201,6 +197,7 @@
   }
 
   @Test
+  @Ignore
   public void readMeasurementWithRegularEncodingTest() throws IOException, WriteProcessException {
     TSFileDescriptor.getInstance().getConfig().setTimeEncoder("REGULAR");
     writeDataByTSRecord(
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/write/TsFileWriteApiTest.java b/java/tsfile/src/test/java/org/apache/tsfile/write/TsFileWriteApiTest.java
index 17a0d0f..264bf9e 100644
--- a/java/tsfile/src/test/java/org/apache/tsfile/write/TsFileWriteApiTest.java
+++ b/java/tsfile/src/test/java/org/apache/tsfile/write/TsFileWriteApiTest.java
@@ -26,12 +26,15 @@
 import org.apache.tsfile.file.MetaMarker;
 import org.apache.tsfile.file.header.ChunkHeader;
 import org.apache.tsfile.file.header.PageHeader;
+import org.apache.tsfile.file.metadata.AbstractAlignedChunkMetadata;
 import org.apache.tsfile.file.metadata.ChunkMetadata;
 import org.apache.tsfile.file.metadata.ColumnSchema;
 import org.apache.tsfile.file.metadata.IDeviceID;
+import org.apache.tsfile.file.metadata.StringArrayDeviceID;
 import org.apache.tsfile.file.metadata.TableSchema;
 import org.apache.tsfile.file.metadata.enums.TSEncoding;
 import org.apache.tsfile.fileSystem.FSFactoryProducer;
+import org.apache.tsfile.read.TsFileDeviceIterator;
 import org.apache.tsfile.read.TsFileReader;
 import org.apache.tsfile.read.TsFileSequenceReader;
 import org.apache.tsfile.read.common.Chunk;
@@ -41,6 +44,7 @@
 import org.apache.tsfile.read.query.dataset.ResultSet;
 import org.apache.tsfile.read.v4.ITsFileReader;
 import org.apache.tsfile.read.v4.TsFileReaderBuilder;
+import org.apache.tsfile.utils.Pair;
 import org.apache.tsfile.utils.TsFileGeneratorUtils;
 import org.apache.tsfile.write.chunk.AlignedChunkWriterImpl;
 import org.apache.tsfile.write.chunk.ChunkWriterImpl;
@@ -900,6 +904,181 @@
   }
 
   @Test
+  public void testWriteSomeColumnsOfTree() throws IOException, WriteProcessException {
+    List<IMeasurementSchema> fullMeasurementSchemas =
+        Arrays.asList(
+            new MeasurementSchema("s1", TSDataType.INT32),
+            new MeasurementSchema("s2", TSDataType.INT32),
+            new MeasurementSchema("s3", TSDataType.INT32));
+    List<IMeasurementSchema> measurementSchemas1 =
+        Arrays.asList(new MeasurementSchema("s1", TSDataType.INT32));
+    IDeviceID device = new StringArrayDeviceID("root.test.d1");
+    Tablet tablet1 =
+        new Tablet(
+            device,
+            IMeasurementSchema.getMeasurementNameList(fullMeasurementSchemas),
+            IMeasurementSchema.getDataTypeList(fullMeasurementSchemas));
+    Tablet tablet2 =
+        new Tablet(
+            device,
+            IMeasurementSchema.getMeasurementNameList(measurementSchemas1),
+            IMeasurementSchema.getDataTypeList(measurementSchemas1));
+    for (int i = 0; i < 1000; i++) {
+      tablet1.addTimestamp(i, i);
+      tablet1.addValue("s1", i, 1);
+      tablet1.addValue("s2", i, 1);
+      tablet1.addValue("s3", i, 1);
+    }
+    for (int i = 0; i < 1000; i++) {
+      tablet2.addTimestamp(i, i + 1005);
+      tablet2.addValue("s1", i, 0);
+    }
+    try (TsFileWriter writer = new TsFileWriter(f)) {
+      writer.registerAlignedTimeseries(device, fullMeasurementSchemas);
+      writer.setChunkGroupSizeThreshold(1);
+      writer.writeTree(tablet1);
+      writer.writeTree(tablet2);
+    }
+    try (TsFileSequenceReader reader = new TsFileSequenceReader(f.getPath())) {
+      TsFileDeviceIterator deviceIterator = reader.getAllDevicesIteratorWithIsAligned();
+      while (deviceIterator.hasNext()) {
+        Pair<IDeviceID, Boolean> pair = deviceIterator.next();
+        List<AbstractAlignedChunkMetadata> alignedChunkMetadataList =
+            reader.getAlignedChunkMetadataByMetadataIndexNode(
+                pair.getLeft(), deviceIterator.getFirstMeasurementNodeOfCurrentDevice(), false);
+        Assert.assertFalse(alignedChunkMetadataList.isEmpty());
+        Assert.assertEquals(3, alignedChunkMetadataList.get(0).getValueChunkMetadataList().size());
+        Assert.assertEquals(
+            1000,
+            alignedChunkMetadataList
+                .get(0)
+                .getValueChunkMetadataList()
+                .get(0)
+                .getStatistics()
+                .getCount());
+        Assert.assertEquals(
+            1000,
+            alignedChunkMetadataList
+                .get(0)
+                .getValueChunkMetadataList()
+                .get(1)
+                .getStatistics()
+                .getCount());
+        Assert.assertEquals(
+            1000,
+            alignedChunkMetadataList
+                .get(0)
+                .getValueChunkMetadataList()
+                .get(2)
+                .getStatistics()
+                .getCount());
+        Assert.assertEquals(3, alignedChunkMetadataList.get(1).getValueChunkMetadataList().size());
+        Assert.assertEquals(
+            1000,
+            alignedChunkMetadataList
+                .get(1)
+                .getValueChunkMetadataList()
+                .get(0)
+                .getStatistics()
+                .getCount());
+        Assert.assertNull(alignedChunkMetadataList.get(1).getValueChunkMetadataList().get(1));
+        Assert.assertNull(alignedChunkMetadataList.get(1).getValueChunkMetadataList().get(2));
+      }
+    }
+  }
+
+  @Test
+  public void testWriteSomeColumnsOfTable() throws IOException, WriteProcessException {
+    TableSchema tableSchema =
+        new TableSchema(
+            "t1",
+            Arrays.asList(
+                new MeasurementSchema("device", TSDataType.STRING),
+                new MeasurementSchema("s1", TSDataType.INT32),
+                new MeasurementSchema("s2", TSDataType.INT32),
+                new MeasurementSchema("s3", TSDataType.INT32)),
+            Arrays.asList(
+                ColumnCategory.TAG,
+                ColumnCategory.FIELD,
+                ColumnCategory.FIELD,
+                ColumnCategory.FIELD));
+    Tablet tablet1 =
+        new Tablet(
+            tableSchema.getTableName(),
+            Arrays.asList("device", "s1"),
+            Arrays.asList(TSDataType.STRING, TSDataType.INT32),
+            Arrays.asList(ColumnCategory.TAG, ColumnCategory.FIELD));
+    for (int i = 0; i < 1000; i++) {
+      tablet1.addTimestamp(i, i);
+      tablet1.addValue("s1", i, 0);
+    }
+    Tablet tablet2 =
+        new Tablet(
+            tableSchema.getTableName(),
+            IMeasurementSchema.getMeasurementNameList(tableSchema.getColumnSchemas()),
+            IMeasurementSchema.getDataTypeList(tableSchema.getColumnSchemas()),
+            tableSchema.getColumnTypes());
+    for (int i = 0; i < 1000; i++) {
+      tablet2.addTimestamp(i, 1005 + i);
+      tablet2.addValue("s1", i, 1);
+      tablet2.addValue("s2", i, 1);
+      tablet2.addValue("s3", i, 1);
+    }
+    try (TsFileWriter writer = new TsFileWriter(f)) {
+      writer.registerTableSchema(tableSchema);
+      writer.setChunkGroupSizeThreshold(1);
+      writer.writeTable(tablet1);
+      writer.writeTable(tablet2);
+    }
+    try (TsFileSequenceReader reader = new TsFileSequenceReader(f.getPath())) {
+      TsFileDeviceIterator deviceIterator = reader.getAllDevicesIteratorWithIsAligned();
+      while (deviceIterator.hasNext()) {
+        Pair<IDeviceID, Boolean> pair = deviceIterator.next();
+        List<AbstractAlignedChunkMetadata> alignedChunkMetadataList =
+            reader.getAlignedChunkMetadataByMetadataIndexNode(
+                pair.getLeft(), deviceIterator.getFirstMeasurementNodeOfCurrentDevice(), false);
+        Assert.assertFalse(alignedChunkMetadataList.isEmpty());
+        Assert.assertEquals(3, alignedChunkMetadataList.get(0).getValueChunkMetadataList().size());
+        Assert.assertEquals(
+            1000,
+            alignedChunkMetadataList
+                .get(0)
+                .getValueChunkMetadataList()
+                .get(0)
+                .getStatistics()
+                .getCount());
+        Assert.assertNull(alignedChunkMetadataList.get(0).getValueChunkMetadataList().get(1));
+        Assert.assertNull(alignedChunkMetadataList.get(0).getValueChunkMetadataList().get(2));
+        Assert.assertEquals(3, alignedChunkMetadataList.get(1).getValueChunkMetadataList().size());
+        Assert.assertEquals(
+            1000,
+            alignedChunkMetadataList
+                .get(1)
+                .getValueChunkMetadataList()
+                .get(0)
+                .getStatistics()
+                .getCount());
+        Assert.assertEquals(
+            1000,
+            alignedChunkMetadataList
+                .get(1)
+                .getValueChunkMetadataList()
+                .get(1)
+                .getStatistics()
+                .getCount());
+        Assert.assertEquals(
+            1000,
+            alignedChunkMetadataList
+                .get(1)
+                .getValueChunkMetadataList()
+                .get(2)
+                .getStatistics()
+                .getCount());
+      }
+    }
+  }
+
+  @Test
   public void writeTableTsFileWithUpperCaseColumns() throws IOException, WriteProcessException {
     setEnv(100 * 1024 * 1024, 10 * 1024);
     Tablet tablet =
diff --git a/java/tsfile/src/test/java/org/apache/tsfile/write/record/TabletTest.java b/java/tsfile/src/test/java/org/apache/tsfile/write/record/TabletTest.java
index 07aedad..2582840 100644
--- a/java/tsfile/src/test/java/org/apache/tsfile/write/record/TabletTest.java
+++ b/java/tsfile/src/test/java/org/apache/tsfile/write/record/TabletTest.java
@@ -20,10 +20,12 @@
 package org.apache.tsfile.write.record;
 
 import org.apache.tsfile.common.conf.TSFileConfig;
+import org.apache.tsfile.enums.ColumnCategory;
 import org.apache.tsfile.enums.TSDataType;
 import org.apache.tsfile.file.metadata.enums.TSEncoding;
 import org.apache.tsfile.utils.Binary;
 import org.apache.tsfile.utils.BitMap;
+import org.apache.tsfile.utils.Pair;
 import org.apache.tsfile.write.schema.IMeasurementSchema;
 import org.apache.tsfile.write.schema.MeasurementSchema;
 
@@ -32,13 +34,19 @@
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
 import java.time.LocalDate;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Random;
+import java.util.Set;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 public class TabletTest {
@@ -297,4 +305,310 @@
     Assert.assertEquals(tablet.getValue(1, 1), deserializeTablet.getValue(1, 1));
     Assert.assertTrue(deserializeTablet.isNull(1, 0));
   }
+
+  @Test
+  public void testAppendInconsistent() {
+    Tablet t1 =
+        new Tablet(
+            "table1",
+            Arrays.asList("tag1", "s1"),
+            Arrays.asList(TSDataType.STRING, TSDataType.INT32),
+            Arrays.asList(ColumnCategory.TAG, ColumnCategory.FIELD));
+
+    Tablet tWrongTable =
+        new Tablet(
+            "table2",
+            Arrays.asList("tag1", "s1"),
+            Arrays.asList(TSDataType.STRING, TSDataType.INT32),
+            Arrays.asList(ColumnCategory.TAG, ColumnCategory.FIELD));
+    assertFalse(t1.append(tWrongTable));
+
+    Tablet tWrongColName =
+        new Tablet(
+            "table1",
+            Arrays.asList("tag2", "s1"),
+            Arrays.asList(TSDataType.STRING, TSDataType.INT32),
+            Arrays.asList(ColumnCategory.TAG, ColumnCategory.FIELD));
+    assertFalse(t1.append(tWrongColName));
+
+    Tablet tWrongColType =
+        new Tablet(
+            "table1",
+            Arrays.asList("tag1", "s1"),
+            Arrays.asList(TSDataType.STRING, TSDataType.INT64),
+            Arrays.asList(ColumnCategory.TAG, ColumnCategory.FIELD));
+    assertFalse(t1.append(tWrongColType));
+
+    Tablet tWrongColCategory =
+        new Tablet(
+            "table1",
+            Arrays.asList("tag1", "s1"),
+            Arrays.asList(TSDataType.STRING, TSDataType.INT32),
+            Arrays.asList(ColumnCategory.TAG, ColumnCategory.TAG));
+    assertFalse(t1.append(tWrongColCategory));
+  }
+
+  private void fillTablet(Tablet t, int valueOffset, int length) {
+    for (int i = 0; i < length; i++) {
+      t.addTimestamp(i, i + valueOffset);
+      for (int j = 0; j < t.getSchemas().size(); j++) {
+        switch (t.getSchemas().get(j).getType()) {
+          case INT32:
+            t.addValue(i, j, i + valueOffset);
+            break;
+          case TIMESTAMP:
+          case INT64:
+            t.addValue(i, j, (long) (i + valueOffset));
+            break;
+          case FLOAT:
+            t.addValue(i, j, (i + valueOffset) * 1.0f);
+            break;
+          case DOUBLE:
+            t.addValue(i, j, (i + valueOffset) * 1.0);
+            break;
+          case BOOLEAN:
+            t.addValue(i, j, (i + valueOffset) % 2 == 0);
+            break;
+          case TEXT:
+          case STRING:
+          case BLOB:
+            t.addValue(i, j, String.valueOf(i + valueOffset));
+            break;
+          case DATE:
+            t.addValue(i, j, LocalDate.of(i + valueOffset, 1, 1));
+            break;
+        }
+      }
+    }
+  }
+
+  private final List<String> colNamesForAppendTest =
+      Arrays.asList(
+          "tag1",
+          TSDataType.INT32.name(),
+          TSDataType.INT64.name(),
+          TSDataType.FLOAT.name(),
+          TSDataType.DOUBLE.name(),
+          TSDataType.BOOLEAN.name(),
+          TSDataType.TEXT.name(),
+          TSDataType.STRING.name(),
+          TSDataType.BLOB.name(),
+          TSDataType.TIMESTAMP.name(),
+          TSDataType.DATE.name());
+  private final List<TSDataType> dataTypesForAppendTest =
+      Arrays.asList(
+          TSDataType.STRING,
+          TSDataType.INT32,
+          TSDataType.INT64,
+          TSDataType.FLOAT,
+          TSDataType.DOUBLE,
+          TSDataType.BOOLEAN,
+          TSDataType.TEXT,
+          TSDataType.STRING,
+          TSDataType.BLOB,
+          TSDataType.TIMESTAMP,
+          TSDataType.DATE);
+  private final List<ColumnCategory> categoriesForAppendTest =
+      Arrays.asList(
+          ColumnCategory.TAG,
+          ColumnCategory.FIELD,
+          ColumnCategory.FIELD,
+          ColumnCategory.FIELD,
+          ColumnCategory.FIELD,
+          ColumnCategory.FIELD,
+          ColumnCategory.FIELD,
+          ColumnCategory.FIELD,
+          ColumnCategory.FIELD,
+          ColumnCategory.FIELD,
+          ColumnCategory.FIELD);
+
+  @Test
+  public void testAppendNoNull() {
+    Tablet t1 =
+        new Tablet(
+            "table1", colNamesForAppendTest, dataTypesForAppendTest, categoriesForAppendTest);
+
+    int t1Size = 100;
+    fillTablet(t1, 0, t1Size);
+
+    int t2Size = 100;
+    Tablet t2 =
+        new Tablet(
+            "table1", colNamesForAppendTest, dataTypesForAppendTest, categoriesForAppendTest);
+    fillTablet(t2, t1Size, t2Size);
+
+    assertTrue(t1.append(t2));
+    checkAppendedTablet(t1, t1Size + t2Size, null);
+  }
+
+  @Test
+  public void testPreferredCapacity() {
+    Tablet t1 =
+        new Tablet(
+            "table1", colNamesForAppendTest, dataTypesForAppendTest, categoriesForAppendTest);
+
+    int t1Size = 100;
+    fillTablet(t1, 0, t1Size);
+
+    int t2Size = 100;
+    Tablet t2 =
+        new Tablet(
+            "table1", colNamesForAppendTest, dataTypesForAppendTest, categoriesForAppendTest);
+    fillTablet(t2, t1Size, t2Size);
+
+    assertTrue(t1.append(t2, 10000));
+    checkAppendedTablet(t1, t1Size + t2Size, null);
+    assertEquals(10000, t1.getMaxRowNumber());
+  }
+
+  @Test
+  public void testAppendNullPoints() {
+    Set<Pair<Integer, Integer>> nullPositions = new HashSet<>();
+    int nullPointNum = 10;
+    Random random = new Random();
+
+    Tablet t1 =
+        new Tablet(
+            "table1", colNamesForAppendTest, dataTypesForAppendTest, categoriesForAppendTest);
+
+    int t1Size = 100;
+    fillTablet(t1, 0, t1Size);
+    for (int i = 0; i < nullPointNum; i++) {
+      int rowIndex = random.nextInt(t1Size);
+      int columnIndex = random.nextInt(colNamesForAppendTest.size());
+      nullPositions.add(new Pair<>(rowIndex, columnIndex));
+      t1.getBitMaps()[columnIndex].mark(rowIndex);
+    }
+
+    int t2Size = 100;
+    Tablet t2 =
+        new Tablet(
+            "table1", colNamesForAppendTest, dataTypesForAppendTest, categoriesForAppendTest);
+    fillTablet(t2, t1Size, t2Size);
+    for (int i = 0; i < nullPointNum; i++) {
+      int rowIndex = random.nextInt(t1Size);
+      int columnIndex = random.nextInt(colNamesForAppendTest.size());
+      nullPositions.add(new Pair<>(rowIndex + t1Size, columnIndex));
+      t2.getBitMaps()[columnIndex].mark(rowIndex);
+    }
+
+    assertTrue(t1.append(t2));
+    checkAppendedTablet(t1, t1Size + t2Size, nullPositions);
+  }
+
+  @Test
+  public void testAppendNullBitMapColumn() {
+    int nullBitMapNum = 5;
+    Random random = new Random();
+
+    Tablet t1 =
+        new Tablet(
+            "table1", colNamesForAppendTest, dataTypesForAppendTest, categoriesForAppendTest);
+
+    int t1Size = 100;
+    fillTablet(t1, 0, t1Size);
+    for (int i = 0; i < nullBitMapNum; i++) {
+      int columnIndex = random.nextInt(colNamesForAppendTest.size());
+      t1.getBitMaps()[columnIndex] = null;
+    }
+
+    int t2Size = 100;
+    Tablet t2 =
+        new Tablet(
+            "table1", colNamesForAppendTest, dataTypesForAppendTest, categoriesForAppendTest);
+    fillTablet(t2, t1Size, t2Size);
+    for (int i = 0; i < nullBitMapNum; i++) {
+      int columnIndex = random.nextInt(colNamesForAppendTest.size());
+      t2.getBitMaps()[columnIndex] = null;
+    }
+
+    assertTrue(t1.append(t2));
+    assertEquals(t1Size + t2Size, t1.getRowSize());
+
+    checkAppendedTablet(t1, t1Size + t2Size, null);
+  }
+
+  @Test
+  public void testAppendThisNullBitMap() {
+
+    Tablet t1 =
+        new Tablet(
+            "table1", colNamesForAppendTest, dataTypesForAppendTest, categoriesForAppendTest);
+
+    int t1Size = 100;
+    fillTablet(t1, 0, t1Size);
+    t1.setBitMaps(null);
+
+    int t2Size = 100;
+    Tablet t2 =
+        new Tablet(
+            "table1", colNamesForAppendTest, dataTypesForAppendTest, categoriesForAppendTest);
+    fillTablet(t2, t1Size, t2Size);
+
+    assertTrue(t1.append(t2));
+    checkAppendedTablet(t1, t1Size + t2Size, null);
+  }
+
+  @Test
+  public void testMultipleAppend() {
+    List<Tablet> tablets = new ArrayList<>();
+    int tabletNum = 10;
+    int singleTabletSize = 100;
+    for (int i = 0; i < tabletNum; i++) {
+      Tablet tablet =
+          new Tablet(
+              "table1", colNamesForAppendTest, dataTypesForAppendTest, categoriesForAppendTest);
+      fillTablet(tablet, i * singleTabletSize, singleTabletSize);
+      tablets.add(tablet);
+    }
+    for (int i = 1; i < tabletNum; i++) {
+      assertTrue(tablets.get(0).append(tablets.get(i)));
+    }
+    checkAppendedTablet(tablets.get(0), singleTabletSize * tabletNum, null);
+  }
+
+  private void checkAppendedTablet(
+      Tablet result, int totalSize, Set<Pair<Integer, Integer>> nullPositions) {
+    assertEquals(totalSize, result.getRowSize());
+
+    for (int i = 0; i < totalSize; i++) {
+      assertEquals(i, result.getTimestamp(i));
+      for (int j = 0; j < result.getSchemas().size(); j++) {
+        if (nullPositions != null && nullPositions.contains(new Pair<>(i, j))) {
+          assertTrue(result.isNull(i, j));
+          continue;
+        }
+
+        assertFalse(result.isNull(i, j));
+        switch (result.getSchemas().get(j).getType()) {
+          case INT32:
+            assertEquals(i, result.getValue(i, j));
+            break;
+          case TIMESTAMP:
+          case INT64:
+            assertEquals((long) i, result.getValue(i, j));
+            break;
+          case FLOAT:
+            assertEquals(i * 1.0f, (float) result.getValue(i, j), 0.0001f);
+            break;
+          case DOUBLE:
+            assertEquals(i * 1.0, (double) result.getValue(i, j), 0.0001);
+            break;
+          case BOOLEAN:
+            assertEquals(i % 2 == 0, result.getValue(i, j));
+            break;
+          case TEXT:
+          case BLOB:
+          case STRING:
+            assertEquals(
+                new Binary(String.valueOf(i).getBytes(StandardCharsets.UTF_8)),
+                result.getValue(i, j));
+            break;
+          case DATE:
+            assertEquals(LocalDate.of(i, 1, 1), result.getValue(i, j));
+            break;
+        }
+      }
+    }
+  }
 }
diff --git a/pom.xml b/pom.xml
index 08fb365..51fbe31 100644
--- a/pom.xml
+++ b/pom.xml
@@ -28,7 +28,7 @@
     </parent>
     <groupId>org.apache.tsfile</groupId>
     <artifactId>tsfile-parent</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <packaging>pom</packaging>
     <name>Apache TsFile Project Parent POM</name>
     <properties>
@@ -37,8 +37,9 @@
         <argLine/>
         <spotless.skip>false</spotless.skip>
         <cmake.version>3.30.2-b1</cmake.version>
-        <spotless.version>2.43.0</spotless.version>
+        <spotless.version>2.44.3</spotless.version>
         <google.java.format.version>1.22.0</google.java.format.version>
+        <clang.format.version>17.0.6</clang.format.version>
         <drill.freemarker.maven.plugin.version>1.21.1</drill.freemarker.maven.plugin.version>
         <groovy.version>4.0.22</groovy.version>
     </properties>
@@ -138,33 +139,6 @@
                     </configuration>
                 </plugin>
                 <plugin>
-                    <groupId>com.diffplug.spotless</groupId>
-                    <artifactId>spotless-maven-plugin</artifactId>
-                    <version>${spotless.version}</version>
-                    <configuration>
-                        <java>
-                            <googleJavaFormat>
-                                <version>${google.java.format.version}</version>
-                                <style>GOOGLE</style>
-                            </googleJavaFormat>
-                            <importOrder>
-                                <order>org.apache.tsfile,,javax,java,\#</order>
-                            </importOrder>
-                            <removeUnusedImports/>
-                        </java>
-                        <lineEndings>UNIX</lineEndings>
-                    </configuration>
-                    <executions>
-                        <execution>
-                            <id>spotless-check</id>
-                            <goals>
-                                <goal>check</goal>
-                            </goals>
-                            <phase>validate</phase>
-                        </execution>
-                    </executions>
-                </plugin>
-                <plugin>
                     <groupId>org.eluder.coveralls</groupId>
                     <artifactId>coveralls-maven-plugin</artifactId>
                     <version>4.3.0</version>
@@ -226,6 +200,7 @@
             <plugin>
                 <groupId>com.diffplug.spotless</groupId>
                 <artifactId>spotless-maven-plugin</artifactId>
+                <version>${spotless.version}</version>
                 <configuration>
                     <skip>${spotless.skip}</skip>
                 </configuration>
diff --git a/python/README.md b/python/README.md
index dcfd24e..23af6eb 100644
--- a/python/README.md
+++ b/python/README.md
@@ -26,7 +26,7 @@
 \__    ___/____\_   _____/|__|  |   ____  
   |    | /  ___/|    __)  |  |  | _/ __ \ 
   |    | \___ \ |     \   |  |  |_\  ___/ 
-  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0-SNAPSHOT
+  |____|/____  >\___  /   |__|____/\___  >  version 2.1.0
              \/     \/                 \/  
 </pre>
 
diff --git a/python/examlpes.py b/python/examlpes.py
new file mode 100644
index 0000000..00ced4e
--- /dev/null
+++ b/python/examlpes.py
@@ -0,0 +1,82 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import numpy as np
+import pandas as pd
+import os
+
+import tsfile as ts
+
+
+# test writing data
+data_dir = os.path.join(os.path.dirname(__file__), "test.tsfile")
+DEVICE_NAME = "test_table"
+
+# 1000 rows data
+time = np.arange(1, 1001, dtype=np.int64)
+level = np.linspace(2000, 3000, num=1000, dtype=np.float32)
+num = np.arange(10000, 11000, dtype=np.int64)
+df = pd.DataFrame({"Time": time, "level": level, "num": num})
+
+if os.path.exists(data_dir):
+    os.remove(data_dir)
+ts.write_tsfile(data_dir, DEVICE_NAME, df)
+
+
+# read data we already wrote
+# with 20 chunksize
+tsfile_ret = ts.read_tsfile(data_dir, DEVICE_NAME, ["level", "num"], chunksize=20)
+print(tsfile_ret.shape)
+
+# with 100 chunksize
+tsfile_ret = ts.read_tsfile(data_dir, DEVICE_NAME, ["level", "num"], chunksize=100)
+print(tsfile_ret.shape)
+
+# get all data
+tsfile_ret = ts.read_tsfile(data_dir, DEVICE_NAME, ["level", "num"])
+print(tsfile_ret.shape)
+
+# with iterator
+with ts.read_tsfile(
+    data_dir, DEVICE_NAME, ["level", "num"], iterator=True, chunksize=100
+) as reader:
+    for chunk in reader:
+        print(chunk.shape)
+
+# with time scale and chunksize
+tsfile_ret = ts.read_tsfile(
+    data_dir, DEVICE_NAME, ["level"], start_time=50, end_time=100, chunksize=10
+)
+print(tsfile_ret.shape)
+
+# with time scale
+tsfile_ret = ts.read_tsfile(data_dir, DEVICE_NAME, ["num"], start_time=50, end_time=100)
+print(tsfile_ret.shape)
+
+# with time scale, iterator and chunksize
+with ts.read_tsfile(
+    data_dir,
+    DEVICE_NAME,
+    ["level", "num"],
+    iterator=True,
+    start_time=100,
+    end_time=500,
+    chunksize=100,
+) as reader:
+    for chunk in reader:
+        print(chunk.shape)
diff --git a/python/pom.xml b/python/pom.xml
index 88e4881..765084f 100644
--- a/python/pom.xml
+++ b/python/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <groupId>org.apache.tsfile</groupId>
         <artifactId>tsfile-parent</artifactId>
-        <version>2.1.0-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <artifactId>tsfile-python</artifactId>
     <packaging>pom</packaging>
diff --git a/python/test.py b/python/test.py
new file mode 100644
index 0000000..29fb6c2
--- /dev/null
+++ b/python/test.py
@@ -0,0 +1,165 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+import platform
+import shutil
+
+import unittest as ut
+import numpy as np
+import pandas as pd
+
+
+import tsfile as ts
+from tsfile.tsfile import EmptyFileError
+
+TABLE_NAME = "test_table"
+DATA_PATH = os.path.join(os.path.dirname(__file__), "target")
+
+
+# test writing data
+def test_write_tsfile():
+    # test write empty data
+    df = pd.DataFrame()
+    ts.write_tsfile(DATA_PATH + "/empty.tsfile", TABLE_NAME, df)
+    assert not os.path.exists(DATA_PATH + "/empty.tsfile")
+
+    # data without Time
+    # 1000 rows data
+    level = np.linspace(2000, 3000, num=1000, dtype=np.float32)
+    num = np.arange(10000, 11000, dtype=np.int64)
+    df = pd.DataFrame({"level": level, "num": num})
+    with ut.TestCase().assertRaises(AttributeError):
+        ts.write_tsfile(DATA_PATH + "/no_time.tsfile", TABLE_NAME, df)
+
+    # time with wrong type
+    time = np.arange(1, 1001, dtype=np.float32)
+    df = pd.DataFrame({"Time": time, "level": level, "num": num})
+    with ut.TestCase().assertRaises(TypeError):
+        ts.write_tsfile(DATA_PATH + "/wrong_time_type.tsfile", TABLE_NAME, df)
+    # TXT is not support yet
+    time = np.arange(1, 1001, dtype=np.int64)
+    text = np.random.choice(["a", "b", "c"], 1000)
+    df = pd.DataFrame({"Time": time, "text": text})
+    with ut.TestCase().assertRaises(TypeError):
+        ts.write_tsfile(DATA_PATH + "/txt.tsfile", TABLE_NAME, df)
+
+    # full datatypes test
+    time = np.arange(1, 1001, dtype=np.int64)  # int64
+    level = np.linspace(2000, 3000, num=1000, dtype=np.float32)  # float32
+    num = np.arange(10000, 11000, dtype=np.int64)  # int64
+    bools = np.random.choice([True, False], 1000)  # bool
+    double = np.random.rand(1000)  # double
+    df = pd.DataFrame(
+        {"Time": time, "level": level, "num": num, "bools": bools, "double": double}
+    )
+    ts.write_tsfile(DATA_PATH + "/full_datatypes.tsfile", TABLE_NAME, df)
+
+
+# test reading data
+def test_read_tsfile():
+    # skip test on windows because of the bug in the tsfile library
+    if platform.system() == "Windows":
+        return
+    # test read a non-existent file
+    with ut.TestCase().assertRaises(FileNotFoundError):
+        ts.read_tsfile(DATA_PATH + "/notexist.tsfile", TABLE_NAME, ["level", "num"])
+
+    # test read empty file
+    with open(DATA_PATH + "/empty.tsfile", "w", encoding="utf-8") as f:
+        pass
+
+    with ut.TestCase().assertRaises(EmptyFileError):
+        ts.read_tsfile(DATA_PATH + "/empty.tsfile", TABLE_NAME, ["level", "num"])
+
+    FILE_NAME = DATA_PATH + "/full_datatypes.tsfile"
+    # test read data
+    ## 1. read all data
+    df, _ = ts.read_tsfile(FILE_NAME, TABLE_NAME, ["level", "num", "bools", "double"])
+    assert df.shape == (1000, 5)
+    assert df["level"].dtype == np.float32
+    assert df["Time"].dtype == np.int64
+    assert df["num"].dtype == np.int64
+    assert df["bools"].dtype == np.bool_
+    assert df["double"].dtype == np.float64
+
+    ## 2. read with chunksize
+    df, _ = ts.read_tsfile(FILE_NAME, TABLE_NAME, ["level", "num"], chunksize=100)
+    assert df.shape == (100, 3)
+    assert df["level"].dtype == np.float32
+    assert df["Time"].sum() == np.arange(1, 101).sum()
+
+    ## 3. read with iterator
+    chunk_num = 0
+    with ts.read_tsfile(
+        FILE_NAME, TABLE_NAME, ["level", "num"], iterator=True, chunksize=100
+    ) as reader:
+        for chunk, _ in reader:
+            assert chunk.shape == (100, 3)
+            assert chunk["level"].dtype == np.float32
+            assert (
+                chunk["Time"].sum()
+                == np.arange(1 + chunk_num * 100, 101 + chunk_num * 100).sum()
+            )
+            chunk_num += 1
+    assert chunk_num == 10
+
+    ## 4. read with time scale
+    df, _ = ts.read_tsfile(FILE_NAME, TABLE_NAME, ["num"], start_time=50, end_time=99)
+    assert df.shape == (50, 2)
+    assert df["num"][0] == 10049
+    assert df["num"][9] == 10058
+
+    ## 5. read with time scale and chunksize
+    df, _ = ts.read_tsfile(
+        FILE_NAME, TABLE_NAME, ["num"], start_time=50, end_time=99, chunksize=10
+    )
+    assert df.shape == (10, 2)
+    assert df["num"][0] == 10049
+    assert df["num"][9] == 10058
+
+    ## 6. read with time scale and iterator
+    chunk_num = 0
+    with ts.read_tsfile(
+        FILE_NAME,
+        TABLE_NAME,
+        ["num"],
+        start_time=50,
+        end_time=99,
+        iterator=True,
+        chunksize=10,
+    ) as reader:
+        for chunk, _ in reader:
+            assert chunk.shape == (10, 2)
+            assert chunk["num"][0] == 10049 + chunk_num * 10
+            assert chunk["num"][9] == 10058 + chunk_num * 10
+            chunk_num += 1
+    assert chunk_num == 5
+
+
+if __name__ == "__main__":
+    if os.path.exists(DATA_PATH):
+        print("Remove old data")
+        shutil.rmtree(DATA_PATH)
+        os.makedirs(DATA_PATH)
+    else:
+        os.makedirs(DATA_PATH)
+    test_write_tsfile()
+    test_read_tsfile()
+    print("All tests passed")
+    shutil.rmtree(DATA_PATH)
diff --git a/python/tsfile/tsfile.pxd b/python/tsfile/tsfile.pxd
new file mode 100644
index 0000000..a41794e
--- /dev/null
+++ b/python/tsfile/tsfile.pxd
@@ -0,0 +1,104 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#cython: language_level=3
+cdef extern from "limits.h":
+    long long LLONG_MAX
+    long long LLONG_MIN
+
+cdef extern from "./TsFile-cwrapper.h":
+    # common
+    ctypedef int ErrorCode
+    ctypedef long long timestamp
+    ctypedef long long SchemaInfo
+
+
+
+    # for read data from tsfile
+    ctypedef void* CTsFileReader
+    ctypedef void* TsFileRowData
+    ctypedef void* QueryDataRetINTERNAL
+    ctypedef void* TimeFilterExpression
+
+    cdef struct query_data_ret:
+        char** column_names
+        int column_num
+        QueryDataRetINTERNAL data
+
+    ctypedef query_data_ret* QueryDataRet
+
+
+    # for writer data to tsfile
+    ctypedef void* CTsFileWriter
+    cdef struct column_schema:
+            char* name
+            SchemaInfo column_def
+    ctypedef column_schema ColumnSchema
+
+    cdef struct TableSchema:
+        char* table_name
+        ColumnSchema** column_schema
+        int column_num
+
+    cdef struct Tablet:
+        char* table_name
+        ColumnSchema** column_schema
+        int column_num
+        timestamp* times
+        bint** bitmap 
+        void** value
+        int cur_num
+        int max_capacity
+    
+    ctypedef Tablet DataResult
+    
+    # Function Declarations
+    # reader:tsfile reader
+    CTsFileReader ts_reader_open(const char* path, ErrorCode* err_code)
+    ErrorCode ts_reader_close(CTsFileReader reader)
+
+    # writer:tsfile writer
+    CTsFileWriter ts_writer_open(const char* path, ErrorCode* err_code)
+    ErrorCode ts_writer_close(CTsFileWriter writer)
+
+
+    # read tsfile data
+    QueryDataRet ts_reader_begin_end(CTsFileReader reader, const char* table_name,
+                                char** columns, int colum_num, timestamp start_time, timestamp end_time)
+    QueryDataRet ts_reader_read(CTsFileReader reader, const char* table_name,
+                                char** columns, int colum_num)
+    DataResult* ts_next(QueryDataRet data, int expect_line_count)
+    ErrorCode destory_query_dataret(QueryDataRet query_data_set)
+    ErrorCode destory_tablet(Tablet* tablet)
+
+    # writer tsfile data
+    ErrorCode tsfile_register_table(CTsFileWriter writer, TableSchema* schema)
+    ErrorCode tsfile_register_table_column(CTsFileWriter writer, const char* table_name, ColumnSchema* schema)
+    TsFileRowData create_tsfile_row(const char* table_name, timestamp timestamp, int column_length)
+    ErrorCode insert_data_into_tsfile_row_int32(TsFileRowData row_data, char* column_name, int value)
+    ErrorCode insert_data_into_tsfile_row_int64(TsFileRowData row_data, char* column_name, long long value)
+    ErrorCode insert_data_into_tsfile_row_float(TsFileRowData row_data,  char* column_name, float value)
+    ErrorCode insert_data_into_tsfile_row_double(TsFileRowData row_data, char* column_name, double value)
+    ErrorCode insert_data_into_tsfile_row_boolean(TsFileRowData row_data,  char* column_name, bint value)
+    ErrorCode tsfile_write_row_data(CTsFileWriter writer, TsFileRowData data);
+    ErrorCode tsfile_flush_data(CTsFileWriter writer)
+    ErrorCode destory_tsfile_row(TsFileRowData data)
+
+
+
+    
\ No newline at end of file
diff --git a/python/tsfile/tsfile.py b/python/tsfile/tsfile.py
new file mode 100644
index 0000000..3e92f05
--- /dev/null
+++ b/python/tsfile/tsfile.py
@@ -0,0 +1,145 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import os
+import platform
+import ctypes
+
+if platform.system() == "Windows":
+    ctypes.CDLL(os.path.join(os.path.dirname(__file__), "libtsfile.dll"), winmode=0)
+from .tsfile_pywrapper import tsfile_reader, tsfile_writer
+from typing import overload, Union
+from pandas import DataFrame
+
+TIMESTAMP_STR = "Time"
+
+class EmptyFileError(Exception):
+    def __init__(self, message="File is empty"):
+        self.message = message
+        super().__init__(self.message)
+
+
+# default case -> Dataframe
+@overload
+def read_tsfile(
+    file_path: str,
+    table_name: str,
+    columns: Union[list[str], str],
+) -> DataFrame: ...
+
+
+# case with filter -> Dataframe
+@overload
+def read_tsfile(
+    file_path: str,
+    table_name: str,
+    columns: Union[list[str], str],
+    filter: str,
+    start_time: int,
+    end_time: int,
+) -> DataFrame: ...
+
+
+# chunksize = int -> Dataframe
+@overload
+def read_tsfile(
+    file_path: str,
+    table_name: str,
+    columns: Union[list[str], str],
+    chunksize: int,
+) -> DataFrame: ...
+
+
+@overload
+def read_tsfile(
+    file_path: str,
+    table_name: str,
+    columns: Union[list[str], str],
+    filter: str,
+    start_time: int,
+    end_time: int,
+    chunksize: int,
+) -> DataFrame: ...
+
+
+# iterator = True -> Iterator
+@overload
+def read_tsfile(
+    file_path: str,
+    table_name: str,
+    columns: Union[list[str], str],
+    iterator: bool,
+    chunksize: int,
+) -> tsfile_reader: ...
+
+
+@overload
+def read_tsfile(
+    file_path: str,
+    table_name: str,
+    columns: Union[list[str], str],
+    start_time: int,
+    end_time: int,
+    iterator: bool,
+    chunksize: int,
+) -> tsfile_reader: ...
+
+
+def read_tsfile(
+    file_path: str,
+    table_name: str,
+    columns: Union[list[str], str],
+    start_time: int = None,
+    end_time: int = None,
+    chunksize: int = None,
+    iterator: bool = False,
+) -> Union[DataFrame, tsfile_reader]:
+    if not os.path.exists(file_path):
+        raise FileNotFoundError(f"File '{file_path}' does not exist")
+    if os.path.getsize(file_path) == 0:
+        raise EmptyFileError(f"File '{file_path}' is empty")
+    reader = tsfile_reader(
+        file_path, table_name, columns, start_time, end_time, chunksize
+    )
+    if iterator:
+        return reader
+    else:
+        return reader.read_tsfile()
+
+
+def write_tsfile(
+    file_path: str,
+    table_name: str,
+    data: DataFrame,
+):
+    if data.empty:
+        return
+    column_names = data.columns.tolist()
+    column_types = data.dtypes
+
+    if TIMESTAMP_STR not in column_names:
+        raise AttributeError("Time column is missing")
+    if column_types[TIMESTAMP_STR] != "int64":
+        raise TypeError("Time column must be of type int64")
+    allowed_types = {"int64", "int32", "bool", "float32", "float64"}
+
+    for col, dtype in column_types.items():
+        if dtype.name not in allowed_types:
+            raise TypeError(f"Column '{col}' has an invalid type '{dtype}'.")
+
+    writer = tsfile_writer(file_path)
+    writer.write_tsfile(table_name, data)
diff --git a/python/tsfile/tsfile_pywrapper.pyx b/python/tsfile/tsfile_pywrapper.pyx
new file mode 100644
index 0000000..a910a49
--- /dev/null
+++ b/python/tsfile/tsfile_pywrapper.pyx
@@ -0,0 +1,348 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#cython: language_level=3
+from libc.string cimport strcpy
+from libc.stdlib cimport malloc, free
+import pandas as pd
+from cpython.bytes cimport PyBytes_AsString
+cimport numpy as cnp
+import numpy as np
+from .tsfile cimport *
+
+TIMESTAMP_STR = "Time"
+TS_TYPE_INT32 = 1 << 8
+TS_TYPE_BOOLEAN = 1 << 9
+TS_TYPE_FLOAT = 1 << 10
+TS_TYPE_DOUBLE = 1 << 11
+TS_TYPE_INT64 = 1 << 12
+TS_TYPE_TEXT = 1 << 13
+
+type_mapping = {
+    'int32': TS_TYPE_INT32,
+    'bool': TS_TYPE_BOOLEAN,
+    'float32': TS_TYPE_FLOAT,
+    'float64': TS_TYPE_DOUBLE,
+    'int64': TS_TYPE_INT64
+}
+
+cdef class tsfile_reader:
+    
+    cdef CTsFileReader reader
+    cdef QueryDataRet ret
+    cdef int batch_size
+    cdef bint read_all_at_once
+    
+    
+
+    def __init__(self, pathname, table_name, columns, start_time=None, end_time=None, batch_size=None):
+        self.open_reader(pathname)
+        self.query_data_ret(table_name, columns, start_time, end_time)
+
+        if batch_size is not None:
+            self.batch_size = batch_size
+            self.read_all_at_once = False
+        else:
+            self.batch_size = 1024
+            self.read_all_at_once = True
+
+    cdef open_reader(self, pathname):
+        cdef ErrorCode err_code
+        err_code = 0
+        self.reader = ts_reader_open(pathname.encode('utf-8'), &err_code)
+        if (err_code != 0):
+            raise Exception("Failed to open tsfile: %s, %s" %( pathname, err_code))
+    
+    cdef query_data_ret(self, table_name, columns, start_time = None, end_time=None):
+        cdef bytes py_table_name
+        cdef char** c_columns
+        py_table_name = table_name.encode('utf-8')
+        c_table_name = PyBytes_AsString(py_table_name)
+        if isinstance(columns, str):
+            columns = [columns]
+        
+        c_columns = <char**>malloc(len(columns) * sizeof(char*))
+        if not c_columns:
+            raise MemoryError("Failed to allocate memory for columns")
+
+        for i in range(len(columns)):
+            c_columns[i] = <char*>malloc(len(columns[i]) + 1)
+            if not c_columns[i]:
+                for j in range(i):
+                    free(c_columns[j])
+                free(c_columns)
+                raise MemoryError("Failed to allocate memory for columns")
+            column_binary = columns[i].encode('utf-8')
+            column = PyBytes_AsString(column_binary)
+            strcpy(c_columns[i], column)
+        
+        # query data from tsfile
+        if start_time is not None or end_time is not None:
+            if start_time is None:
+                start_time = LLONG_MIN
+            if end_time is None:
+                end_time = LLONG_MAX
+            self.ret = ts_reader_begin_end(self.reader, c_table_name, c_columns, len(columns), start_time, end_time)
+        else:
+            self.ret = ts_reader_read(self.reader, table_name.encode('utf-8'), c_columns, len(columns))
+
+        for i in range(len(columns)):
+            free(c_columns[i])
+        free(c_columns)
+        
+        
+    def read_tsfile(self):
+        # open tsfile to read
+        res = pd.DataFrame()
+        not_null_maps = []
+        if self.read_all_at_once:
+            while True:
+                chunk, not_null_map = self.get_next_dataframe()
+                if chunk is not None:
+                    res = pd.concat([res, chunk])
+                    not_null_maps.append(not_null_map)
+                else:
+                    break
+        else:
+            res, not_null_map = self.get_next_dataframe()
+            not_null_maps.append(not_null_map)
+
+        self.free_resources()
+        not_null_map_all = None
+        if (not_null_maps != []):
+            not_null_map_all = np.vstack(not_null_maps)
+        return res, not_null_map_all
+
+    def __iter__(self):
+        return self
+    
+    def __next__(self):
+        res, not_null_map = self.get_next_dataframe()
+        if res is None:
+            raise StopIteration
+        return res, not_null_map
+
+    def get_next_dataframe(self):
+        cdef:
+            DataResult* result
+            ColumnSchema* schema = NULL
+            cnp.ndarray[cnp.int64_t, ndim=1, mode='c'] np_array_i64
+            cnp.ndarray[cnp.int32_t, ndim=1, mode='c'] np_array_i32
+            cnp.ndarray[cnp.float32_t, ndim=1, mode='c'] np_array_float
+            cnp.ndarray[cnp.float64_t, ndim=1, mode='c'] np_array_double
+            cnp.ndarray[bint, ndim=1, mode='c'] np_array_bool
+            cnp.npy_intp length 
+            bint has_null
+            bytes pystr
+            str py_string  
+        
+        res = {}
+        column_order = []
+        not_null_map = []
+
+        # Time column will be the first column
+        column_order.append(TIMESTAMP_STR)
+
+        for i in range(self.ret.column_num):
+            pystr = self.ret.column_names[i]
+            py_string = pystr.decode('utf-8', 'ignore')
+            column_order.append(py_string)
+            res[py_string] = []
+        
+        res[TIMESTAMP_STR] = []
+
+        if self.ret.data == NULL:
+            return None, None
+
+        result = ts_next(self.ret, self.batch_size)
+
+        # there is no data meet our requirement
+        if result.column_schema == NULL:
+            # free memory
+            if (destory_tablet(result) != 0):
+                raise Exception("Failed to destroy tablet")
+            return None, None
+
+        # time column
+        length = result.cur_num + 1
+        cdef cnp.ndarray[cnp.int64_t, ndim=1, mode='c'] data_array = \
+            cnp.PyArray_SimpleNewFromData(1, &length, cnp.NPY_INT64, result.times)
+        res[TIMESTAMP_STR] = np.array(data_array, dtype = np.int64)
+        
+        for i in range(result.column_num):
+
+            # column name
+            schema = result.column_schema[i]
+            pystr = schema.name
+            column_name = pystr.decode('utf-8')
+
+            # column bitmap
+            is_not_null = np.empty(length, dtype = bool)
+            bool_ptr = <char*> result.bitmap[i]
+            has_null = False
+            for j in range(length):
+                is_not_null[j] = bool_ptr[j]  != 0
+                if bool_ptr[j]  == 0 and ~has_null:
+                    has_null = True
+
+        
+            if schema.column_def == TS_TYPE_INT32:
+                np_array_i32 = cnp.PyArray_SimpleNewFromData(1, &length, cnp.NPY_INT32, result.value[i])
+                arr = np.array(np_array_i32, dtype = np.int32)
+
+            elif schema.column_def == TS_TYPE_BOOLEAN:
+                arr_bool_ = np.empty(length, dtype=np.bool_)
+                bool_ptr = <char*> result.value[i]
+                for j in range(length):
+                    arr_bool_[j] = bool_ptr[j] != 0
+                arr = np.array(arr_bool_, dtype = np.bool_)
+
+            elif schema.column_def  == TS_TYPE_FLOAT:
+                np_array_float = cnp.PyArray_SimpleNewFromData(1, &length, cnp.NPY_FLOAT32, result.value[i])
+                arr = np.array(np_array_float, dtype = np.float32)
+                arr = np.where(is_not_null, arr, np.nan)
+                res[column_name]=arr
+                continue
+
+            elif schema.column_def == TS_TYPE_DOUBLE:
+                np_array_double = cnp.PyArray_SimpleNewFromData(1, &length, cnp.NPY_FLOAT64, result.value[i])
+                arr= np.array(np_array_double, dtype = np.float64)
+                arr = np.where(is_not_null, arr, np.nan)
+                res[column_name]=arr
+                continue
+
+            elif schema.column_def == TS_TYPE_INT64:
+                np_array_i64 = cnp.PyArray_SimpleNewFromData(1, &length, cnp.NPY_INT64, result.value[i])
+                arr = np.array(np_array_i64, dtype = np.int64)
+            else:
+                raise Exception("UnSupport column type")
+            
+            if has_null:
+                tmp_array = np.full(length, np.nan, np.float64)
+                tmp_array[is_not_null] = arr[is_not_null]
+                if schema.column_def == TS_TYPE_INT32:
+                    arr = pd.Series(tmp_array).astype('Int32')
+                elif schema.column_def == TS_TYPE_BOOLEAN:
+                    arr = pd.Series(tmp_array).astype(np.bool_)
+                elif schema.column_def == TS_TYPE_INT64:
+                    arr = pd.Series(tmp_array).astype('Int64')
+
+            res[column_name] = arr
+            not_null_map.append(is_not_null)
+        if (destory_tablet(result) != 0):
+            raise Exception("Failed to destroy tablet")
+        return pd.DataFrame(res, columns = column_order), not_null_map
+
+    def __dealloc__(self):
+        self.free_resources()
+
+    def __enter__(self):
+        return self
+        
+    def __exit__(self, exc_type, exc_value, traceback):
+        self.free_resources()
+
+    cdef free_resources(self):
+        if self.reader:
+            if ts_reader_close(self.reader) != 0 :
+                raise Exception("Failed to close tsfile")
+        if self.ret:
+            if destory_query_dataret(self.ret) != 0:
+                raise Exception("Failed to free query data ret")
+        self.reader = NULL
+        self.ret = NULL
+
+cdef class tsfile_writer:
+    cdef CTsFileWriter writer
+    cdef TsFileRowData row_data
+
+    def __init__(self, pathname):
+        self.open_writer(pathname)
+
+
+    cdef open_writer(self, pathname):
+        cdef ErrorCode err_code
+        err_code = 0
+        self.writer = ts_writer_open(pathname.encode('utf-8'), &err_code)
+        if (err_code != 0):
+            raise Exception("Failed to open tsfile: %s, %s" %( pathname, err_code))
+    
+    def resister_timeseries(self, table_name, column_name, data_type):
+        cdef char* c_columns
+        cdef bytes py_table_name
+        cdef ColumnSchema schema
+        cdef bytes encoded_column_name = column_name.encode('utf-8')
+        py_table_name = table_name.encode('utf-8')
+        c_table_name = PyBytes_AsString(py_table_name)
+        schema.name = encoded_column_name
+        schema.column_def = data_type
+        if tsfile_register_table_column(self.writer, c_table_name, &schema) != 0:
+            raise Exception("Failed to register timeseries")
+    cdef create_row_data(self, table_name, time, column_length):
+        self.row_data = create_tsfile_row(table_name.encode('utf-8'), time, column_length)
+    def write_into_row_data(self, column_name, value, type):
+        cdef char* c_column_name = PyBytes_AsString(column_name.encode('utf-8'))
+        if type == TS_TYPE_INT32:
+            insert_data_into_tsfile_row_int32(self.row_data, c_column_name, value)
+        elif type == TS_TYPE_BOOLEAN:
+            insert_data_into_tsfile_row_boolean(self.row_data, c_column_name, value)
+        elif type == TS_TYPE_FLOAT:
+            insert_data_into_tsfile_row_float(self.row_data, c_column_name, value)
+        elif type == TS_TYPE_DOUBLE:
+            insert_data_into_tsfile_row_double(self.row_data, c_column_name, value)
+        elif type == TS_TYPE_INT64:
+            insert_data_into_tsfile_row_int64(self.row_data, c_column_name,  value)
+        else:
+            raise TypeError("Unknown column type")
+    def write_tsfile(self, table_name, df):
+        column_names = df.columns.tolist()
+        column_types = df.dtypes
+        column_ctypes = []
+        for i in range(len(column_names)):
+            column_type = column_types[i].name
+            if column_type in type_mapping:
+                column_ctypes.append(type_mapping[column_type])
+            else:
+                raise TypeError("Unknown column type")
+            
+            if (column_names[i] != TIMESTAMP_STR):
+                self.resister_timeseries(table_name, column_names[i], column_ctypes[i])
+
+                
+        for i in range(len(df)):
+            time = df.iloc[i][TIMESTAMP_STR]
+            self.create_row_data(table_name, time, len(column_names))
+            for j in range(1, len(column_names)):
+                column_name = column_names[j]
+                column_value = df.iloc[i][column_name]
+                column_ctype = column_ctypes[j]
+                self.write_into_row_data(column_name, column_value, column_ctype)
+            if tsfile_write_row_data(self.writer, self.row_data) != 0:
+                raise Exception("Failed to write row data")
+
+        if tsfile_flush_data(self.writer) != 0:
+            raise Exception("Failed to flush data")
+        self.row_data = NULL
+        self.free_resources()
+    def free_resources(self):
+        if self.writer != NULL:
+            if ts_writer_close(self.writer) != 0:
+                raise Exception("Failed to close tsfile")
+        self.writer = NULL
+    def __dealloc__(self):
+        self.free_resources()