MOD: move code to github
diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index c6b42b1..0000000
--- a/.gitignore
+++ /dev/null
@@ -1,40 +0,0 @@
-# Prerequisites
-*.d
-
-# Compiled Object files
-*.slo
-*.lo
-*.o
-*.obj
-
-# Precompiled Headers
-*.gch
-*.pch
-
-# Compiled Dynamic libraries
-*.so
-*.dylib
-*.dll
-
-# Fortran module files
-*.mod
-*.smod
-
-# Compiled Static libraries
-*.lai
-*.la
-*.a
-*.lib
-
-# Executables
-*.exe
-*.out
-*.app
-
-.idea/
-cmake-build-debug
-*.o
-*.pyc
-*.swp
-*.swo
-.DS_Store
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 614eba3..11d2dcc 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,19 +1,30 @@
 image: centos7-for-kvrocks
 # image: centos6-for-kvrocks
 stages:
+    - fetch_code
     - analysis
     - build
     - test
     - pkg
     - release
 
+fetch_code:
+    tags:
+        - dev
+    stage: fetch_code
+    script:
+        - git submodule init && git submodule update --remote
+
 code_analysis:
     tags:
         - dev
     stage: analysis
     script:
+        - cd kvrocks
         - sh cpplint.sh
         - sh cppcheck.sh
+    dependencies:
+        - fetch_code
 
 build:
     stage: build
@@ -21,13 +32,15 @@
         - dev
     script:
         - rm -rf /cache/unittest
+        - cd kvrocks
         - git submodule init && git submodule update
-        - mkdir build/
+        - mkdir -p build/
         - cd build && cmake -DCMAKE_BUILD_DIRECTORY=/cache/build -DCMAKE_BUILD_TYPE=Release .. && make -j4 && cd ..
-        - mkdir -p _build/bin && cp build/kvrocks _build/bin
-        - mkdir -p _build/conf && cp kvrocks.conf _build/conf
         - cp build/unittest /cache # reuse unittest in test stage
         - cp build/kvrocks /cache # reuse kvrocks in test stage
+        - cd $CI_PROJECT_DIR
+        - mkdir -p _build/bin && cp kvrocks/build/kvrocks _build/bin
+        - mkdir -p _build/conf && cp kvrocks/kvrocks.conf _build/conf
     artifacts:
         name: "kvrocks-${CI_PIPELINE_ID}-build"
         paths:
@@ -55,7 +68,9 @@
 
     script:
         - pip install nose && pip install git+https://github.com/andymccurdy/redis-py.git@2.10.6
+        - yum install -y nc
         - mkdir /data
+        - cd kvrocks
         - sh tests/scripts/setup-env.sh /cache
         - cd tests/functional && nosetests -v
 
diff --git a/.gitmodules b/.gitmodules
index 75684d0..67d19a9 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,21 +1,3 @@
-[submodule "external/glog"]
-	path = external/glog
-	url = https://github.com/google/glog.git
-[submodule "external/gflags"]
-	path = external/gflags
-	url = https://github.com/gflags/gflags.git
-[submodule "external/rocksdb"]
-	path = external/rocksdb
-	url = https://github.com/facebook/rocksdb.git
-[submodule "external/snappy"]
-	path = external/snappy
-	url = https://github.com/google/snappy.git
-[submodule "external/libevent"]
-	path = external/libevent
-	url = https://github.com/libevent/libevent.git
-[submodule "external/googletest"]
-	path = external/googletest
-	url = https://github.com/abseil/googletest.git
-[submodule "external/jemalloc"]
-	path = external/jemalloc
-	url = https://github.com/jemalloc/jemalloc.git
+[submodule "kvrocks"]
+	path = kvrocks
+	url = https://github.com/meitu/kvrocks
diff --git a/CMakeLists.txt b/CMakeLists.txt
deleted file mode 100644
index f410069..0000000
--- a/CMakeLists.txt
+++ /dev/null
@@ -1,268 +0,0 @@
-cmake_minimum_required(VERSION 3.10)
-project(kvrocks
-        VERSION 0.3.9
-        DESCRIPTION "Redis on rocksdb"
-        LANGUAGES CXX)
-
-# External Dependences
-include(ExternalProject)
-
-set(default_build_type "Release")
-if (NOT CMAKE_BUILD_DIRECTORY)
-    set(CMAKE_BUILD_DIRECTORY ${CMAKE_BINARY_DIR})
-endif()
-# GLIBC < 2.17 should explict specify the real time library when use clock_*
-find_library(REALTIME_LIB rt)
-if (REALTIME_LIB)
-    list(APPEND EXTERNAL_LIBS PRIVATE rt)
-endif()
-#list(APPEND EXTERNAL_LIBS PRIVATE jemalloc)
-
-include(cmake/jemalloc.cmake)
-list(APPEND EXTERNAL_LIBS PRIVATE ${jemalloc_LIBRARIES})
-list(APPEND EXTERNAL_INCS PRIVATE ${jemalloc_INCLUDE_DIRS})
-
-include(cmake/gflags.cmake)
-list(APPEND EXTERNAL_LIBS PRIVATE ${gflags_LIBRARIES})
-list(APPEND EXTERNAL_INCS PRIVATE ${gflags_INCLUDE_DIRS})
-
-include(cmake/glog.cmake)
-list(APPEND EXTERNAL_LIBS PRIVATE ${glog_LIBRARIES})
-list(APPEND EXTERNAL_INCS PRIVATE ${glog_INCLUDE_DIRS})
-
-include(cmake/snappy.cmake)
-list(APPEND EXTERNAL_LIBS PRIVATE ${snappy_LIBRARIES})
-list(APPEND EXTERNAL_INCS PRIVATE ${snappy_INCLUDE_DIRS})
-
-include(cmake/rocksdb.cmake)
-list(APPEND EXTERNAL_LIBS PRIVATE ${rocksdb_LIBRARIES})
-list(APPEND EXTERNAL_INCS PRIVATE ${rocksdb_INCLUDE_DIRS})
-
-include(cmake/libevent.cmake)
-list(APPEND EXTERNAL_LIBS PRIVATE ${libevent_LIBRARIES})
-list(APPEND EXTERNAL_INCS PRIVATE ${libevent_INCLUDE_DIRS})
-
-include(cmake/gtest.cmake)
-list(APPEND EXTERNAL_LIBS PRIVATE ${gtest_LIBRARIES})
-list(APPEND EXTERNAL_INCS PRIVATE ${gtest_INCLUDE_DIRS})
-# End dependences
-
-# Add git sha to version.h
-find_package(Git REQUIRED)
-execute_process(COMMAND git rev-parse --short HEAD OUTPUT_VARIABLE GIT_SHA)
-string(STRIP ${GIT_SHA} GIT_SHA)
-configure_file(src/version.h.in ${PROJECT_BINARY_DIR}/version.h)
-
-# Main target
-add_executable(kvrocks)
-target_compile_features(kvrocks PRIVATE cxx_std_11)
-target_compile_options(kvrocks PRIVATE -Wall -Wpedantic -g -Wsign-compare -Wreturn-type -fno-omit-frame-pointer)
-option(ENABLE_ASAN "enable ASAN santinizer" OFF)
-if(ENBALE_ASAN)
-    target_compile_options(kvrocks PRIVATE -fsanitize=address)
-    target_link_libraries(kvrocks PRIVATE -fsanitize=address)
-endif()
-add_dependencies(kvrocks jemalloc libevent gflags glog snappy rocksdb gtest)
-target_include_directories(kvrocks PRIVATE ${PROJECT_BINARY_DIR})
-target_include_directories(kvrocks ${EXTERNAL_INCS})
-target_link_libraries(kvrocks PRIVATE -fno-omit-frame-pointer)
-target_link_libraries(kvrocks ${EXTERNAL_LIBS} -pthread)
-target_sources(kvrocks PRIVATE
-        src/redis_db.cc
-        src/redis_db.h
-        src/redis_connection.cc
-        src/redis_connection.h
-        src/compact_filter.cc
-        src/compact_filter.h
-        src/worker.cc
-        src/worker.h
-        src/main.cc
-        src/redis_request.cc
-        src/redis_request.h
-        src/redis_cmd.cc
-        src/redis_cmd.h
-        src/util.cc
-        src/util.h
-        src/storage.cc
-        src/storage.h
-        src/status.h
-        src/redis_reply.h
-        src/redis_reply.cc
-        src/replication.cc
-        src/replication.h
-        src/task_runner.cc
-        src/task_runner.h
-        src/encoding.h
-        src/encoding.cc
-        src/redis_metadata.h
-        src/redis_metadata.cc
-        src/redis_string.h
-        src/redis_string.cc
-        src/redis_hash.h
-        src/redis_hash.cc
-        src/redis_list.h
-        src/redis_list.cc
-        src/redis_set.h
-        src/redis_set.cc
-        src/redis_zset.cc
-        src/redis_zset.h
-        src/redis_bitmap.cc
-        src/redis_bitmap.h
-        src/redis_pubsub.cc
-        src/redis_pubsub.h
-        src/lock_manager.cc
-        src/rocksdb_crc32c.h
-        src/config.cc
-        src/config.h
-        src/stats.cc
-        src/stats.h
-        src/server.cc
-        src/server.h
-        src/cron.cc
-        src/cron.h
-        src/event_listener.h
-        src/event_listener.cc)
-
-# kvrocks2redis sync tool
-add_executable(kvrocks2redis)
-target_compile_features(kvrocks2redis PRIVATE cxx_std_11)
-target_compile_options(kvrocks2redis PRIVATE -Wall -Wpedantic -g -Wsign-compare -Wreturn-type)
-option(ENABLE_ASAN "enable ASAN santinizer" OFF)
-if(ENBALE_ASAN)
-    target_compile_options(kvrocks2redis PRIVATE -fno-omit-frame-pointer -fsanitize=address)
-    target_link_libraries(kvrocks2redis PRIVATE -fno-omit-frame-pointer -fsanitize=address)
-endif()
-add_dependencies(kvrocks2redis libevent gflags glog rocksdb gtest)
-target_include_directories(kvrocks2redis PRIVATE ${PROJECT_BINARY_DIR})
-target_include_directories(kvrocks2redis ${EXTERNAL_INCS})
-target_link_libraries(kvrocks2redis ${EXTERNAL_LIBS} -pthread)
-target_sources(kvrocks2redis PRIVATE
-        src/redis_db.cc
-        src/redis_db.h
-        src/compact_filter.cc
-        src/compact_filter.h
-        src/worker.cc
-        src/worker.h
-        src/util.cc
-        src/util.h
-        src/redis_connection.cc
-        src/redis_connection.h
-        src/redis_request.cc
-        src/redis_request.h
-        src/redis_cmd.cc
-        src/redis_cmd.h
-        src/storage.cc
-        src/storage.h
-        src/status.h
-        src/redis_reply.h
-        src/redis_reply.cc
-        src/task_runner.cc
-        src/task_runner.h
-        src/encoding.h
-        src/encoding.cc
-        src/redis_metadata.h
-        src/redis_metadata.cc
-        src/redis_string.h
-        src/redis_string.cc
-        src/redis_hash.h
-        src/redis_hash.cc
-        src/redis_list.h
-        src/redis_list.cc
-        src/redis_set.h
-        src/redis_set.cc
-        src/redis_zset.cc
-        src/redis_zset.h
-        src/redis_bitmap.cc
-        src/redis_bitmap.h
-        src/redis_pubsub.cc
-        src/redis_pubsub.h
-        src/replication.cc
-        src/replication.h
-        src/lock_manager.cc
-        src/rocksdb_crc32c.h
-        src/config.cc
-        src/config.h
-        src/stats.cc
-        src/stats.h
-        src/server.cc
-        src/server.h
-        src/cron.cc
-        src/cron.h
-        src/event_listener.h
-        src/event_listener.cc
-        tools/kvrocks2redis/config.cc
-        tools/kvrocks2redis/config.h
-        tools/kvrocks2redis/main.cc
-        tools/kvrocks2redis/sync.cc
-        tools/kvrocks2redis/sync.h
-        tools/kvrocks2redis/util.cc
-        tools/kvrocks2redis/util.h
-        tools/kvrocks2redis/redis_writer.cc
-        tools/kvrocks2redis/redis_writer.h
-        tools/kvrocks2redis/writer.cc
-        tools/kvrocks2redis/writer.h
-        tools/kvrocks2redis/parser.cc
-        tools/kvrocks2redis/parser.h)
-
-add_executable(unittest
-        src/server.cc
-        src/server.h
-        src/config.cc
-        src/config.h
-        src/worker.cc
-        src/worker.h
-        src/redis_connection.cc
-        src/redis_connection.h
-        src/redis_pubsub.cc
-        src/redis_pubsub.h
-        src/redis_cmd.cc
-        src/redis_cmd.h
-        src/redis_request.cc
-        src/redis_request.h
-        src/replication.cc
-        src/replication.h
-        src/redis_reply.cc
-        src/redis_reply.h
-        src/redis_bitmap.cc
-        src/redis_bitmap.h
-        src/redis_metadata.cc
-        src/encoding.cc
-        src/redis_string.cc
-        src/redis_hash.cc
-        src/redis_list.cc
-        src/redis_set.cc
-        src/redis_zset.cc
-        src/util.cc
-        src/storage.cc
-        src/lock_manager.cc
-        src/stats.cc
-        src/event_listener.cc
-        src/task_runner.cc
-        src/cron.cc
-        src/compact_filter.cc
-        src/redis_db.cc
-        src/redis_db.h
-        tests/main.cc
-        tests/test_base.h
-        tests/t_string_test.cc
-        tests/t_encoding_test.cc
-        tests/t_list_test.cc
-        tests/t_hash_test.cc
-        tests/t_set_test.cc
-        tests/t_zset_test.cc
-        tests/t_metadata_test.cc
-        tests/string_util_test.cc
-        tests/rwlock_test.cc
-        tests/stats_test.cc
-        tests/cron_test.cc
-        tests/config_test.cc
-        tests/task_runner_test.cc
-        tests/t_bitmap_test.cc
-        tests/compact_test.cc)
-
-add_dependencies(unittest gflags glog rocksdb gtest)
-target_compile_features(unittest PRIVATE cxx_std_11)
-target_link_libraries(unittest ${EXTERNAL_LIBS})
-target_include_directories(unittest PRIVATE ${PROJECT_BINARY_DIR})
-target_include_directories(unittest ${EXTERNAL_INCS})
-target_include_directories(unittest PRIVATE src)
diff --git a/CONTRIBUTING b/CONTRIBUTING
deleted file mode 100644
index cdaae9c..0000000
--- a/CONTRIBUTING
+++ /dev/null
@@ -1,16 +0,0 @@
-# How to write commit message 
-
-1. Should start with prefix DOC|TST|MOD|ADD|FIX and then colon
-2. Present-tense summary under 50 characters
-3. Add issue number if exists
-
-For exmple:
-
-```
-FIX: remove unnecessary lines in readme. Fixes #12
-
-* More information about commit (under 72 characters)  
-* More information about commit (under 72 characters)
-```
-
-Thanks!
diff --git a/Changelog b/Changelog
deleted file mode 100644
index 6f4b255..0000000
--- a/Changelog
+++ /dev/null
@@ -1,126 +0,0 @@
-* Version 0.3.9(@2019-08-12)
-	- fix list trim range than the real list length may overwrite the incorrect list length 
-
-* Version 0.3.8(@2019-08-07)
-	- fix delete cf handlers should wait for rocksdb's compact threads, or it
-	may cause core
-
-* Version 0.3.7(@2019-08-07)
-	- fix zrevrangebyscore,zrevrank,zrevrange,zpopmax would return incorrent result
-	- fix zrangebyscore,zrangebylex would return incorrent result
-
-* Version 0.3.6(@2019-08-02)
-	- fix return error when the protocol is invalid
-	- lock the key to prevent the data race in set command
-
-* Version 0.3.5(@2019-07-30)
-	- fix ZREVRANK and ZRANK should return nil string instead of 0 if member wasn't exists 
-
-* Version 0.3.4(@2019-07-29)
-	- fix concurrent write command stats which may cause core
-
-* Version 0.3.3(@2019-07-05)
-	- add io limit for compaction and flush
-	- add some rocksdb options can be modifical in-flight
-	- add filter/index cache into the block cache 
-	- fix cleanup master host and port when replication thread was stopped
-
-* Version 0.3.2(@2019-06-18)
-	- fix disconnect all slaves and wait for inusing db_ pointer before restore the db from backup
-	- fix checkWALBoundary return incorrect status when iter was out of range
-	- fix allow auth command when loading
-
-* Version 0.3.1(@2019-06-17)
-	- fix open file with O_CREAT flag need to specify open mode
-	- fix backward compatible with old version that doesn't support replconf cmd
-
-* Version 0.3.0(@2019-06-13)
-	- supports redis sentinel
-	- add jemalloc submodule and use static
-
-* Version 0.2.3(@2019-06-06)
-	- use jemalloc as memory allocator in rocksdb
-	- supports modify the dump stats period online
-
-* Version 0.2.2(@2019-05-22)
-	- supports makefile
-
-* Version 0.2.1(@2019-05-22)
-	- fix subkey compact filter may delete the living data
-	- supports setxx command
-
-* Version 0.2.0(@2019-05-21)
-	- supports one replication thread per slave
-	- supports limit speed of fetching file in master
-
-* Version 0.1.18(@2019-05-17)
-	- supports pubsub/monitor commands
-	- fix some memory/fd leak
-
-* Version 0.1.17(@2019-05-07)
-	- supports brpop/blpop/randomkey command
-	- fix client kill didn't work when the connection was doing the streaming batch
-
-* Version 0.1.16(@2019-04-25)
-	- fix core dump when execute `client kill id=3`
-
-* Version 0.1.15(@2019-04-25)
-	- use no-omit-frame-pointer when compile
-
-* Version 0.1.14(@2019-04-23)
-	- fix Guard against discrete WAL log sequence in replication
-	- increase the sleep time from 1ms to 100ms
-
-* Version 0.1.13(@2019-04-18)
-	- supports configure slowlog-log-slower-than and slowlog-max-len through config command
-	- print the flush information in flush event lisntener
-
-* Version 0.1.12(@2019-04-17)
-	- supports configure the compression type
-	- print segment fault coredump stackstrace to log file
-	- fix some memory leak when shutdown
-
-* Version 0.1.11(@2019-04-15)
-	- supports limit the size of db
-	- supoorts configure the metadata/subkey block cache size 
-	- fix config set maxclients would reset the timeout with maxclients 
-
-* Version 0.1.10(@2019-04-04)
-    - supports linsert/lrem commands
-	- add compact/pipeline test in CI
-	- fix unexpected expired behavior
-	- fix warning
-
-* Version 0.1.9(@2019-04-01)
-	- add replication test in CI
-	- fix parameter should not be converted to lower case when loading from config file
-
-* Version 0.1.8(@2019-03-19)
-	- remove the pidfile option in config
-
-* Version 0.1.7(@2019-03-19)
-	- fix memory leak which detected by valgrind
-
-* Version 0.1.6(@2019-03-18)
-	- supports append/getrange/expireat/pexpire/pexpireat/pttl commands
-	- fix trivials logical bugs in redis commands
-	- add functional test in CI
-
-* Version 0.1.5(@2019-03-08)
-	- speedup the gitlab ci build
-	- fix return error bit value in getbit
-	- fix config rewrite wrong master port
-
-* Version 0.1.4(@2019-03-06)
-	- supports pid file to store the pid
-
-* Version 0.1.3(@2019-03-05)
-	- fix some redis commands bug
-	- supports bitmap
-
-* Version 0.1.2 (@2019-01-18)
-	- supports bits command
-
-* Version 0.1.0 (@2019-01-18)
-	- init
-
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 21214f6..0000000
--- a/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-BUILD_DIR=./build
-INSTALL_DIR=/usr/local
-BIN_DIR=$(INSTALL_DIR)/bin
-INSTALL=/usr/bin/install
-
-all: kvrocks
-.PHONY: all test
-
-kvrocks:
-	@mkdir -p $(BUILD_DIR)
-	@sh ./build.sh $(BUILD_DIR)
-	@echo "" 
-	@echo "Hint: It's a good idea to run 'make test' ;)"
-	@echo ""
-
-test:
-	@./$(BUILD_DIR)/unittest
-
-clean:
-	@cd $(BUILD_DIR) && make clean
-
-distclean:
-	@rm -rf $(BUILD_DIR)/*
-
-install:
-	mkdir -p $(BIN_DIR)
-	$(INSTALL) $(BUILD_DIR)/kvrocks $(BIN_DIR)
-	$(INSTALL) $(BUILD_DIR)/kvrocks2redis $(BIN_DIR)
-	@echo ""
-	@echo "Installed success, everying is ok!"
-	@echo ""
diff --git a/README.md b/README.md
index 1c6d178..87b0888 100644
--- a/README.md
+++ b/README.md
@@ -1,37 +1,3 @@
 # kvrocks
 
-The kvrocks(kv-rocks) is SSD NoSQL which based on rocksdb, and compatible with the MySQL protocol, intention to decrease the cost of memory and increase the capability.
-
-***Features:***
-* Redis API, see the [docs/support-commands](https://gitlab.meitu.com/platform/kvrocks/blob/master/docs/support-commands.md)
-* Replication, similar to redis's master-slave
-* Compatible with redis-sentinel
-
-## Dependencies
-
-* gcc-g++ (required by c++11, version >= 4.8)
-* autoconf (required by jemalloc)
-
-## How to install
-
-```shell
-$ git clone --recursive https://gitlab.meitu.com/platform/kvrocks.git
-$ cd kvrocks
-$ mkdir build; cd build
-$ cmake ..
-$ make -j2
-```
-
-## Docs
-
-* [support commands](https://gitlab.meitu.com/platform/kvrocks/blob/master/docs/support-commands.md)
-* [replication design](https://gitlab.meitu.com/platform/kvrocks/blob/master/docs/replication-design.md)
-* [metadata design](https://gitlab.meitu.com/platform/kvrocks/blob/master/docs/metadata-design.md)
-
-## Benchmark
-
-## Tools
-
-* kvrocks2redis - migrate key value from kvrocks to redis online
-
-## Performance
+kvrocks gitlab ci packager
diff --git a/build.sh b/build.sh
deleted file mode 100755
index 3dbf51e..0000000
--- a/build.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env bash
-
-if [ "$#" -ne 1 ]; then
-  echo "Usage: $0 BUILD_DIR" >&2
-  exit 1
-fi
-
-BUILD_DIR=$1
-WORKING_DIR=$(pwd)
-CMAKE_INSTALL_DIR=$WORKING_DIR/$BUILD_DIR/cmake
-CMAKE_REQUIRE_VERSION="3.13.0"
-
-RED='\033[0;31m'
-YELLOW='\033[1;33m'
-NC='\033[0m' # No Color
-
-if [ ! -x "$(command -v autoconf)" ]; then
-    printf ${RED}"The autoconf was required to build jemalloc\n"${NC}
-    printf ${YELLOW}"Please use 'yum install -y autoconf automake libtool' in centos/redhat, or use 'apt-get install autoconf automake libtool' in debian/ubuntu"${NC}"\n"
-    exit 1
-fi
-
-if [ -x "$(command -v cmake)" ]; then
-    CMAKE_BIN=$(command -v cmake)
-fi
-
-if [ -x "$CMAKE_INSTALL_DIR/bin/cmake" ]; then
-    CMAKE_BIN=$CMAKE_INSTALL_DIR/bin/cmake
-fi
-
-if [ -f "$CMAKE_BIN" ]; then
-    CMAKE_VERSION=`$CMAKE_BIN -version | head -n 1 | sed 's/[^0-9.]*//g'`
-else
-    CMAKE_VERSION=0
-fi
-
-if [ "$(printf '%s\n' "$CMAKE_REQUIRE_VERSION" "$CMAKE_VERSION" | sort -V | head -n1)" != "$CMAKE_REQUIRE_VERSION" ]; then
-    printf ${YELLOW}"CMake $CMAKE_REQUIRE_VERSION or higher is required. Trying to install CMake $CMAKE_REQUIRE_VERSION ..."${NC}"\n"
-    if [ ! -x "$(command -v curl)" ]; then
-        printf ${RED}"Please install the curl first to download the cmake"${NC}"\n"
-    fi
-    mkdir -p $BUILD_DIR/cmake
-    cd $BUILD_DIR
-    curl -O -L https://github.com/Kitware/CMake/releases/download/v3.13.2/cmake-3.13.2.tar.gz
-    tar -zxf cmake-3.13.2.tar.gz && cd cmake-3.13.2
-    ./bootstrap --prefix=$CMAKE_INSTALL_DIR && make && make install && cd ../..
-    CMAKE_BIN=$CMAKE_INSTALL_DIR/bin/cmake
-fi
-
-cd $BUILD_DIR && $CMAKE_BIN -DCMAKE_BUILD_TYPE=Release .. && make -j4
diff --git a/cmake/gflags.cmake b/cmake/gflags.cmake
deleted file mode 100644
index 6b82d07..0000000
--- a/cmake/gflags.cmake
+++ /dev/null
@@ -1,43 +0,0 @@
-if (NOT __GFLAGS_INCLUDED) # guard against multiple includes
-    set(__GFLAGS_INCLUDED TRUE)
-
-    # gflags will use pthreads if it's available in the system, so we must link with it
-    find_package(Threads)
-
-    # build directory
-    set(gflags_PREFIX ${CMAKE_BUILD_DIRECTORY}/external/gflags-prefix)
-    # install directory
-    set(gflags_INSTALL ${CMAKE_BUILD_DIRECTORY}/external/gflags-install)
-
-    if (UNIX)
-        set(GFLAGS_EXTRA_COMPILER_FLAGS "-fPIC")
-    endif()
-
-    set(GFLAGS_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${GFLAGS_EXTRA_COMPILER_FLAGS})
-    set(GFLAGS_C_FLAGS ${CMAKE_C_FLAGS} ${GFLAGS_EXTRA_COMPILER_FLAGS})
-
-    ExternalProject_Add(gflags
-        PREFIX ${gflags_PREFIX}
-        #GIT_REPOSITORY "https://github.com/gflags/gflags.git"
-        #GIT_TAG "v2.2.1"
-        SOURCE_DIR ${PROJECT_SOURCE_DIR}/external/gflags
-        INSTALL_DIR ${gflags_INSTALL}
-        CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
-                   -DCMAKE_INSTALL_PREFIX=${gflags_INSTALL}
-                   -DBUILD_SHARED_LIBS=OFF
-                   -DBUILD_STATIC_LIBS=ON
-                   -DBUILD_PACKAGING=OFF
-                   -DBUILD_TESTING=OFF
-                   -DBUILD_NC_TESTS=OFF
-                   -DBUILD_CONFIG_TESTS=OFF
-                   -DINSTALL_HEADERS=ON
-                   -DCMAKE_C_FLAGS=${GFLAGS_C_FLAGS}
-                   -DCMAKE_CXX_FLAGS=${GFLAGS_CXX_FLAGS}
-        LOG_DOWNLOAD 1
-        LOG_INSTALL 1
-        )
-
-    set(gflags_FOUND TRUE)
-    set(gflags_INCLUDE_DIRS ${gflags_INSTALL}/include)
-    set(gflags_LIBRARIES ${gflags_INSTALL}/lib/libgflags.a ${CMAKE_THREAD_LIBS_INIT})
-endif()
diff --git a/cmake/glog.cmake b/cmake/glog.cmake
deleted file mode 100644
index 0bae263..0000000
--- a/cmake/glog.cmake
+++ /dev/null
@@ -1,46 +0,0 @@
-# glog depends on gflags
-include("cmake/gflags.cmake")
-
-if (NOT __GLOG_INCLUDED)
-    set(__GLOG_INCLUDED TRUE)
-    # build directory
-    set(glog_PREFIX ${CMAKE_BUILD_DIRECTORY}/external/glog-prefix)
-    # install directory
-    set(glog_INSTALL ${CMAKE_BUILD_DIRECTORY}/external/glog-install)
-
-    if (UNIX)
-        set(GLOG_EXTRA_COMPILER_FLAGS "-fPIC")
-    endif()
-
-    set(GLOG_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${GLOG_EXTRA_COMPILER_FLAGS}")
-    set(GLOG_C_FLAGS "${CMAKE_C_FLAGS} ${GLOG_EXTRA_COMPILER_FLAGS}")
-
-    ExternalProject_Add(glog
-        DEPENDS gflags
-        PREFIX ${glog_PREFIX}
-        #GIT_REPOSITORY "https://github.com/google/glog"
-        #GIT_TAG "v0.3.5"
-        SOURCE_DIR ${PROJECT_SOURCE_DIR}/external/glog
-        INSTALL_DIR ${glog_INSTALL}
-        CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
-                   -DCMAKE_INSTALL_PREFIX=${glog_INSTALL}
-                   -DBUILD_SHARED_LIBS=OFF
-                   -DBUILD_STATIC_LIBS=ON
-                   -DBUILD_PACKAGING=OFF
-                   -DBUILD_TESTING=OFF
-                   -DBUILD_NC_TESTS=OFF
-                   -DBUILD_CONFIG_TESTS=OFF
-                   -DINSTALL_HEADERS=ON
-                   -DCMAKE_C_FLAGS=${GLOG_C_FLAGS}
-                   -DCMAKE_CXX_FLAGS=${GLOG_CXX_FLAGS}
-                   -DCMAKE_PREFIX_PATH=${gflags_INSTALL}
-        LOG_DOWNLOAD 1
-        LOG_CONFIGURE 1
-        LOG_INSTALL 1
-        )
-
-    set(glog_FOUND TRUE)
-    set(glog_INCLUDE_DIRS ${glog_INSTALL}/include)
-    set(glog_LIBRARIES ${glog_INSTALL}/lib/libglog.a)
-endif()
-
diff --git a/cmake/gtest.cmake b/cmake/gtest.cmake
deleted file mode 100644
index 30ca321..0000000
--- a/cmake/gtest.cmake
+++ /dev/null
@@ -1,35 +0,0 @@
-if (NOT __GTEST_INCLUDED) # guard against multiple includes
-    set(__GTEST_INCLUDED TRUE)
-
-    # gtest will use pthreads if it's available in the system, so we must link with it
-    find_package(Threads)
-
-    # build directory
-    set(gtest_PREFIX ${CMAKE_BUILD_DIRECTORY}/external/gtest-prefix)
-    # install directory
-    set(gtest_INSTALL ${CMAKE_BUILD_DIRECTORY}/external/gtest-install)
-
-    if (UNIX)
-        set(GTEST_EXTRA_COMPILER_FLAGS "-fPIC")
-    endif()
-
-    set(GTEST_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${GTEST_EXTRA_COMPILER_FLAGS})
-    set(GTEST_C_FLAGS ${CMAKE_C_FLAGS} ${GTEST_EXTRA_COMPILER_FLAGS})
-
-    ExternalProject_Add(gtest
-        PREFIX ${gtest_PREFIX}
-        SOURCE_DIR ${PROJECT_SOURCE_DIR}/external/googletest
-        INSTALL_DIR ${gtest_INSTALL}
-        CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
-                   -DCMAKE_INSTALL_PREFIX=${gtest_INSTALL}
-                   -DCMAKE_C_FLAGS=${GTEST_C_FLAGS}
-                   -DCMAKE_CXX_FLAGS=${GTEST_CXX_FLAGS}
-                   -DBUILD_GMOCK=OFF
-        LOG_DOWNLOAD 1
-        LOG_INSTALL 1
-        )
-
-    set(gtest_FOUND TRUE)
-    set(gtest_INCLUDE_DIRS ${gtest_INSTALL}/include)
-    set(gtest_LIBRARIES ${gtest_INSTALL}/${CMAKE_INSTALL_LIBDIR}/libgtest.a ${CMAKE_THREAD_LIBS_INIT})
-endif()
diff --git a/cmake/jemalloc.cmake b/cmake/jemalloc.cmake
deleted file mode 100644
index a655d07..0000000
--- a/cmake/jemalloc.cmake
+++ /dev/null
@@ -1,34 +0,0 @@
-if (NOT __JEMALLOC_INCLUDED) # guard against multiple includes
-    set(__JEMALLOC_INCLUDED TRUE)
-
-    # build directory
-    set(jemalloc_PREFIX ${CMAKE_BUILD_DIRECTORY}/external/jemalloc-prefix)
-    # install directory
-    set(jemalloc_INSTALL ${CMAKE_BUILD_DIRECTORY}/external/jemalloc-install)
-    set(JEMALLOC_SOURCE_DIR "${PROJECT_SOURCE_DIR}/external/jemalloc")
-
-    if (UNIX)
-        set(JEMALLOC_EXTRA_COMPILER_FLAGS "-fPIC")
-    endif()
-
-    set(JEMALLOC_CXX_FLAGS ${CMAKE_CXX_FLAGS} ${JEMALLOC_EXTRA_COMPILER_FLAGS})
-    set(JEMALLOC_C_FLAGS ${CMAKE_C_FLAGS} ${JEMALLOC_EXTRA_COMPILER_FLAGS})
-
-    ExternalProject_Add(jemalloc
-        SOURCE_DIR ${JEMALLOC_SOURCE_DIR}
-        PREFIX ${jemalloc_PREFIX}
-        INSTALL_DIR ${jemalloc_INSTALL}
-        CONFIGURE_COMMAND ${JEMALLOC_SOURCE_DIR}/configure --enable-autogen --disable-libdl --prefix=${jemalloc_INSTALL}
-        BUILD_COMMAND make
-        INSTALL_COMMAND make dist COMMAND make install
-    )
-    ExternalProject_Add_Step(jemalloc autoconf
-        COMMAND autoconf
-        WORKING_DIRECTORY ${JEMALLOC_SOURCE_DIR}
-        COMMENT  "Jemalloc autoconf"
-        LOG 1
-    )
-    set(jemalloc_FOUND TRUE)
-    set(jemalloc_INCLUDE_DIRS ${jemalloc_INSTALL}/include)
-    set(jemalloc_LIBRARIES ${jemalloc_INSTALL}/lib/libjemalloc.a ${CMAKE_THREAD_LIBS_INIT})
-endif()
diff --git a/cmake/libevent.cmake b/cmake/libevent.cmake
deleted file mode 100644
index 3209558..0000000
--- a/cmake/libevent.cmake
+++ /dev/null
@@ -1,36 +0,0 @@
-if (NOT __LIBEVENT_INCLUDED)
-    set(__LIBEVENT_INCLUDED TRUE)
-    # build directory
-    set(libevent_PREFIX ${CMAKE_BUILD_DIRECTORY}/external/libevent-prefix)
-    # install directory
-    set(libevent_INSTALL ${CMAKE_BUILD_DIRECTORY}/external/libevent-install)
-
-    if (UNIX)
-        set(LIBEVENT_EXTRA_COMPILER_FLAGS "-fPIC")
-    endif()
-
-    set(LIBEVENT_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${LIBEVENT_EXTRA_COMPILER_FLAGS}")
-    set(LIBEVENT_C_FLAGS "${CMAKE_C_FLAGS} ${LIBEVENT_EXTRA_COMPILER_FLAGS}")
-
-    ExternalProject_Add(libevent
-        PREFIX ${libevent_PREFIX}
-        SOURCE_DIR ${PROJECT_SOURCE_DIR}/external/libevent
-        INSTALL_DIR ${libevent_INSTALL}
-        CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-                   -DCMAKE_INSTALL_PREFIX=${libevent_INSTALL}
-                   -DCMAKE_C_FLAGS=${LIBEVENT_C_FLAGS}
-                   -DCMAKE_CXX_FLAGS=${LIBEVENT_CXX_FLAGS}
-                   -DEVENT__DISABLE_TESTS=ON
-                   -DEVENT__DISABLE_REGRESS=ON
-                   -DEVENT__DISABLE_SAMPLES=ON
-                   -DEVENT__DISABLE_OPENSSL=ON
-                   -DEVENT__LIBRARY_TYPE=STATIC
-        LOG_DOWNLOAD 1
-        LOG_CONFIGURE 1
-        LOG_INSTALL 1
-        )
-
-    set(libevent_FOUND TRUE)
-    set(libevent_INCLUDE_DIRS ${libevent_INSTALL}/include)
-    set(libevent_LIBRARIES ${libevent_INSTALL}/lib/libevent.a ${libevent_INSTALL}/lib/libevent_pthreads.a)
-endif()
diff --git a/cmake/rocksdb.cmake b/cmake/rocksdb.cmake
deleted file mode 100644
index a5a6a3e..0000000
--- a/cmake/rocksdb.cmake
+++ /dev/null
@@ -1,51 +0,0 @@
-if (NOT __ROCKSDB_INCLUDED)
-  set(__ROCKSDB_INCLUDED TRUE)
-  # build directory
-  set(rocksdb_PREFIX ${CMAKE_BUILD_DIRECTORY}/external/rocksdb-prefix)
-  # install directory
-  set(rocksdb_INSTALL ${CMAKE_BUILD_DIRECTORY}/external/rocksdb-install)
-
-  if (UNIX)
-      set(ROCKSDB_EXTRA_COMPILER_FLAGS "-fPIC")
-  endif()
-
-  set(ROCKSDB_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${ROCKSDB_EXTRA_COMPILER_FLAGS}")
-  set(ROCKSDB_C_FLAGS "${CMAKE_C_FLAGS} ${ROCKSDB_EXTRA_COMPILER_FLAGS}")
-  set(JEMALLOC_ROOT_DIR ${jemalloc_INSTALL})
-  ExternalProject_Add(rocksdb
-      DEPENDS gflags jemalloc snappy
-      PREFIX ${rocksdb_PREFIX}
-      #GIT_REPOSITORY "https://github.com/facebook/rocksdb"
-      #GIT_TAG "v5.15.10"
-      SOURCE_DIR ${PROJECT_SOURCE_DIR}/external/rocksdb
-      INSTALL_DIR ${rocksdb_INSTALL}
-      CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-                 -DCMAKE_INSTALL_PREFIX=${rocksdb_INSTALL}
-                 -DBUILD_SHARED_LIBS=OFF
-                 -DBUILD_STATIC_LIBS=ON
-                 -DBUILD_PACKAGING=OFF
-                 -DBUILD_TESTING=OFF
-                 -DBUILD_NC_TESTS=OFF
-                 -DBUILD_CONFIG_TESTS=OFF
-                 -DINSTALL_HEADERS=ON
-                 -DCMAKE_C_FLAGS=${ROCKSDB_C_FLAGS}
-                 -DCMAKE_CXX_FLAGS=${ROCKSDB_CXX_FLAGS}
-                 -DCMAKE_PREFIX_PATH=${snappy_INSTALL}
-                 -DJEMALLOC_ROOT_DIR=${JEMALLOC_ROOT_DIR}
-                 -DFAIL_ON_WARNINGS=OFF
-                 -DWITH_TESTS=OFF
-                 -DWITH_SNAPPY=ON
-                 -DWITH_TOOLS=OFF
-                 -DUSE_RTTI=ON
-                 -DWITH_JEMALLOC=ON
-      LOG_DOWNLOAD 1
-      LOG_CONFIGURE 1
-      LOG_INSTALL 1
-      )
-
-  include(GNUInstallDirs)
-  set(rocksdb_FOUND TRUE)
-  set(rocksdb_INCLUDE_DIRS ${rocksdb_INSTALL}/include)
-  set(rocksdb_LIBRARIES ${rocksdb_INSTALL}/${CMAKE_INSTALL_LIBDIR}/librocksdb.a)
-endif()
-
diff --git a/cmake/snappy.cmake b/cmake/snappy.cmake
deleted file mode 100644
index 1f3bbd6..0000000
--- a/cmake/snappy.cmake
+++ /dev/null
@@ -1,43 +0,0 @@
-if (NOT __SNAPPY_INCLUDED)
-  set(__SNAPPY_INCLUDED TRUE)
-  # build directory
-  set(snappy_PREFIX ${CMAKE_BUILD_DIRECTORY}/external/snappy-prefix)
-  # install directory
-  set(snappy_INSTALL ${CMAKE_BUILD_DIRECTORY}/external/snappy-install)
-
-  if (UNIX)
-      set(SNAPPY_EXTRA_COMPILER_FLAGS "-fPIC")
-  endif()
-
-  set(SNAPPY_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SNAPPY_EXTRA_COMPILER_FLAGS}")
-  set(SNAPPY_C_FLAGS "${CMAKE_C_FLAGS} ${SNAPPY_EXTRA_COMPILER_FLAGS}")
-
-  ExternalProject_Add(snappy
-      PREFIX ${snappy_PREFIX}
-      #GIT_REPOSITORY "https://github.com/google/snappy"
-      #GIT_TAG "1.1.7"
-      SOURCE_DIR ${PROJECT_SOURCE_DIR}/external/snappy
-      INSTALL_DIR ${snappy_INSTALL}
-      CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-                 -DCMAKE_INSTALL_PREFIX=${snappy_INSTALL}
-                 -DBUILD_SHARED_LIBS=OFF
-                 -DBUILD_STATIC_LIBS=ON
-                 -DBUILD_PACKAGING=OFF
-                 -DBUILD_TESTING=OFF
-                 -DBUILD_NC_TESTS=OFF
-                 -DBUILD_CONFIG_TESTS=OFF
-                 -DINSTALL_HEADERS=ON
-                 -DCMAKE_C_FLAGS=${SNAPPY_C_FLAGS}
-                 -DCMAKE_CXX_FLAGS=${SNAPPY_CXX_FLAGS}
-                 -DSNAPPY_BUILD_TESTS=OFF
-      LOG_DOWNLOAD 1
-      LOG_CONFIGURE 1
-      LOG_INSTALL 1
-      )
-
-  include(GNUInstallDirs)
-  set(snappy_FOUND TRUE)
-  set(snappy_INCLUDE_DIRS ${snappy_INSTALL}/include)
-  set(snappy_LIBRARIES ${snappy_INSTALL}/${CMAKE_INSTALL_LIBDIR}/libsnappy.a)
-endif()
-
diff --git a/cppcheck.sh b/cppcheck.sh
deleted file mode 100755
index 5ab18eb..0000000
--- a/cppcheck.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-CHECK_TYPES="warning,performance,portability,information,missingInclude"
-STANDARD=c++11
-ERROR_EXITCODE=1
-LANG=c++
-cppcheck --enable=${CHECK_TYPES} -U__GNUC__ -x ${LANG}  src --std=${STANDARD} --error-exitcode=${ERROR_EXITCODE}
diff --git a/cpplint.sh b/cpplint.sh
deleted file mode 100755
index 9c519af..0000000
--- a/cpplint.sh
+++ /dev/null
@@ -1 +0,0 @@
-cpplint --linelength=120 --filter=-build/include_subdir,-legal/copyright,-build/c++11 src/*.h src/*.cc
diff --git a/docs/images/chart-commands.png b/docs/images/chart-commands.png
deleted file mode 100644
index d9a250a..0000000
--- a/docs/images/chart-commands.png
+++ /dev/null
Binary files differ
diff --git a/docs/images/chart-threads.png b/docs/images/chart-threads.png
deleted file mode 100644
index 5912636..0000000
--- a/docs/images/chart-threads.png
+++ /dev/null
Binary files differ
diff --git a/docs/images/chart-values.png b/docs/images/chart-values.png
deleted file mode 100644
index f7e0d70..0000000
--- a/docs/images/chart-values.png
+++ /dev/null
Binary files differ
diff --git a/docs/metadata-design.md b/docs/metadata-design.md
deleted file mode 100644
index 23e95b8..0000000
--- a/docs/metadata-design.md
+++ /dev/null
@@ -1,168 +0,0 @@
-# Design Complex Structure On Rocksdb
-
-kvrocks use the rocksdb as storage, it's developed by facebook which built on LevelDB with many extra features supports, like column family, transaction, backup, see the rocksdb wiki: [Features Not In LevelDB](https://github.com/facebook/rocksdb/wiki/Features-Not-in-LevelDB). the basic operations in rocksdb are `Put(key, value)`, `Get(key)`, `Delete(key)`, other complex structures weren't supported. the main goal of this doc would explain how we built the Redis hash/list/set/zset/bitmap on rocksdb. most of the design was derived from Qihoo360 `Blackwidow`, but with little modified, like the bitmap design, it's really interesting part.
-
-## String
-
-Redis string is key-value with expire time, so it's very easy to translate the Redis string into rocksdb key-value. 
-
-```shell
-        +----------+------------+--------------------+
-key =>  |  flags   |  expire    |       payload      |
-        | (1byte)  | (4byte)    |       (Nbyte)      |
-        +----------+------------+--------------------+
-```
-
-we prepend 1-byte `flags` and 4-bytes expire before the user's value:
-
--  `flags`  is used to tell the kvrocks which type of this key-value,  maybe `string`/`hash`/`list`/`zset`/`bitmap`
-- `expire` is store the absolute time of key should be expired, if zero means the key-value would never be expired
-- `payload` is the user's raw value
-
-## Hash
-
-Redis hashmap(dict) was like the hashmap in many languages, it was used to implement an associative array abstract data type, a structure that can map keys to values.  the direct way to implement the hash in rocksdb is serialized the keys/values into one value and store it like the string, but the drawback is performance impact when the keys/values grew bigger. so we split the hash sub keys/values into single key-value in rocksdb, track it with metadata.
-
-#### hash metadata
-
-```shell
-        +----------+------------+-----------+-----------+
-key =>  |  flags   |  expire    |  version  |  size     |
-        | (1byte)  | (4byte)    |  (8byte)  | (8byte)   |
-        +----------+------------+-----------+-----------+
-```
-
-the value of key we call it metadata here, it stored the metadata of hash key includes:
-
-- `flags` like the string, the field was used to tell which type of this key
-- `expire ` is same with string type, record the expire time
-- `version`  is used to accomplish fast delete when the number of sub keys/values grew bigger
-- `size` records the number sub of keys/values in this hash key
-
-#### hash sub keys-values
-
-we use extra keys-values to store the hash keys-values,  the format like this:
-
-```shell
-                     +---------------+
-key|version|field => |     value     |
-                     +---------------+
-```
-
-we prepend the hash `key` and `version` before the hash field, the value of  `version`  was from the metdata.  for exmple, when request  `hget h1 f1` was  received,  kvrocks would fetch the metadata by hash key(here is `h1`) first, and concat the hash key, version, field as new key, then fetch the value with new key.
-
-
-
-***Question1:  why store version in metadata***
-
-> we store the hash keys/values into single key-value, if the store millions of sub keys-values in one hash key . if user delete this key, the kvrocks must iterator millions of sub keys-values and delete, and it would cause performance problem.  with version we can fast delete the metadata and then recycle the others keys-values in compaction background threads. the cost is those tombstone key would take some disk stroage. you can regard the version as atomic increment number, but it's combined with timestamp.
-
-
-
-***Question2:  what can we do if the user key was conflicted with composed key?***
-
-> we store the metadata key and composed key in different  column families, so it wouldn't happend
-
-## Set
-
-Redis set can be regared as hash with value of sub-key always be null, the metadata was same with the hash:
-
-```shell
-        +----------+------------+-----------+-----------+
-key =>  |  flags   |  expire    |  version  |  size     |
-        | (1byte)  | (4byte)    |  (8byte)  | (8byte)   |
-        +----------+------------+-----------+-----------+
-```
-
-and the sub keys-values in rocksdb would be:
-
-```shell
-                      +---------------+
-key|version|member => |     NULL      |
-                      +---------------+
-```
-
-## List
-
-#### list metadata
-
-Redis list also organized by metadata and sub keys-values, and sub key is index instead of user key.  metadata like below:
-
-```shell
-        +----------+------------+-----------+-----------+-----------+-----------+
-key =>  |  flags   |  expire    |  version  |  size     |  head     |  tail     |
-        | (1byte)  | (4byte)    |  (8byte)  | (8byte)   | (8byte)   | (8byte)   |
-        +----------+------------+-----------+-----------+-----------+-----------+         
-```
-
-- `head` was used to indicate the start position of list head
-- `tail` was used to indicate the stop position of list tail
-
-the meaning of other fields were same with other types, just add extra head/tail to record the boundary of list.
-
-#### list sub keys-values
-
-the sub key in list was compsed by list key,version,index, and index was calculated from metadata's head or tail. for example, when user request the `rpush list elem`,  kvrocks would fetch fetch the metadata with list key first, and  generate the sub key with list key, version, and tail, simply increase the tail, then write the medata and sub key value back to rocksdb.
-
-```shell
-                     +---------------+
-key|version|index => |     value     |
-                     +---------------+
-```
-
-## ZSet
-
-Redis zset was set with sorted property, so it's a little different with other types. it must be able to search with member, as well as retrieve members with score range.
-
-#### zset metadata
-
-the metadata of zset was still same with set, like below
-
-```shell
-        +----------+------------+-----------+-----------+
-key =>  |  flags   |  expire    |  version  |  size     |
-        | (1byte)  | (4byte)    |  (8byte)  | (8byte)   |
-        +----------+------------+-----------+-----------+
-```
-
-#### zset sub keys-values
-
-the sub keys-values was different, except the value of sub key isn't null, we should have the way to range the members with the score. so the zset has two types of sub keys-values, one for map the members-scores, and one for score range.
-
-```shell
-                            +---------------+
-key|version|member       => |     score     |   (1)
-                            +---------------+
-                            
-                            +---------------+
-key|version|score|member => |     NULL      |   (2)
-                            +---------------+                     
-
-```
-
-if user want to get the score of member or  check the member exists or not, it would try first one.
-
-## Bitmap
-
-Redis bitmap is the most interesting part in kvrocks design, while unlike other types, it's not subkey and the value would be very large if the user treats it as a sparse array. it's apparent that the things would break down if store the bitmap into a single value, so we should break the bitmap value into multi fragments. another behavior of bitmap is the position would write always arbitrary, it's very similar to access model of Linux virtual memory, so the idea of the bitmap design came from that.
-
-#### bitmap metadata
-
-```shell
-        +----------+------------+-----------+-----------+
-key =>  |  flags   |  expire    |  version  |  size     |
-        | (1byte)  | (4byte)    |  (8byte)  | (8byte)   |
-        +----------+------------+-----------+-----------+
-```
-
-#### bitmap sub keys-values
-
-we break the bitmap values into fragments(1KiB, 8096 bit/fragment), and subkey is the index of the fragment. for example, when the request to set the bit of 1024 would locate in the first fragment with index 0 if set bit of 80970 would locate in 10th fragment with index 10. 
-
-```shell
-                     +---------------+
-key|version|index => |     fragment  |
-                     +---------------+
-```
-
-when the user requests to get it of position P, kvrocks would first fetch the metadata with bitmap's key and calculate the index of the fragment with bit position, then fetch the bitmap fragment with composed key and find the bit was set or not in fragment offset. for example, if getbit bitmap 8097, so the index is `1` (8097/8096) and subkey is `bitmap|1|1` (assume the version is 1), then fetch the subkey from rocksdb and check the bit of offset `1`(8097%8096) was set or not.
diff --git a/docs/multi-tenant.CN.md b/docs/multi-tenant.CN.md
deleted file mode 100644
index 252aa3f..0000000
--- a/docs/multi-tenant.CN.md
+++ /dev/null
@@ -1,31 +0,0 @@
-Redis 里面通过多个 DB 来实现数据的隔离,业务通过 `select` 指令来选择读写的 DB。 这种方式存在一些问题:
-
-* 多租户实现同一个认证密码,虽然数据是隔离的,但权限没法隔离
-* DB 数量固定且编号无法标识业务,固定数量的 DB 并不是太大的问题(16 个也足够),但 DB 编号无法标识业务在我们看来其实是不太方便的
-
-
-## kvrocks 的实现
-
-kvrocks 允许通过一个实例在线增加或者减少 token, 每个 token 之间的数据是隔离的。 实现上也比较简单,在分配 token 的时候关联一个业务标识(Namespace), 在业务写入数据的时候会自动在 key 前面增加这个标识,读取数据 key 的时候自动去掉标识。
-
-下面是在线管理 token 的命令,也可以在配置文件里面指定:
-
-```shell
-namespace add ${namespace} ${token} # 新增一个 token, token 和 namespace 不能是已经存在
-
-namespace set ${namespace} ${token} # 修改 namespace 对应的 token
-
-namespace del ${namespace} # 删除 namespace 对应的 token, 注意这里不会删除数据
-
-namespace get ${namespace} # 获取 namespace 对应的 token
-
-namespace get * # 获取所有 namespace 和 token
- 
-```
-
-> 注意: 如果修改了 namespace 之后需要执行 `confing rewrite` 命令将配置持久化到本地,同时如果有多个实例也需要同步修改
-
-
-假设我们配置了一个 namespace N1 对应 token T1, 当业务使用这个 token 执行 `SET a 1`, 那么在存储里面实际上会变成类似 `T1|a => 1` ,读取数据 key 的时候会自动去掉 T1。
-
-这种实现方式有一个问题就是每个 key 都增加一个 namespace 信息会额外占用一些空间。另外一种方式是通过 rocksdb 的 column family 来实现数据隔离,但从实现的复杂度和必要性层面我们还是选择了前者
diff --git a/docs/replication-design.md b/docs/replication-design.md
deleted file mode 100644
index cb150dc..0000000
--- a/docs/replication-design.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# Replication of rocksdb data
-
-A instance is turned into a slave role when `SLAVEOF` cmd is received. Slave will
-try to do a partial synchronization (AKA. incremental replication) if it is viable,
-Otherwise, slave will do a full-sync by copying all the rocksdb's latest backup files.
-After the full-sync is finished, the slave's DB will be erased and restored using
-the backup files downloaded from master, then partial-sync is triggered again.
-
-If everything go OK, the partial-sync is a ongoing procedure that keep receiving
-every batch the master gets.
-
-## Replication State Machine
-
-A state machine is used in the slave's replication thread to accommodate the complexity.
-
-On the slave side, replication is composed of the following steps:
-
-  1. Send Auth
-  2. Send db\_name to check if the master has the right DB
-  3. Try PSYNC, if succeeds, slave is in the loop of receiving batches; if not, go to `4`
-  4. Do FULLSYNC
-    4.1. send _fetch_meta to get the latest backup meta data
-    4.2. send _fetch_file to get all the backup files listed in the meta
-    4.3. restore slave's DB using the backup
-  5. goto `1`
-
-## Partial Synchronization (PSYNC)
-
-PSYNC takes advantage of the rocksdb's WAL iterator. If the PSYNC's requesting sequence
-number is in the range of the WAL files, PSYNC is considered viable.
-
-PSYNC is a command implemented on master role instance. Unlike other commands (eg. GET),
-PSYNC cmd is not a REQ-RESP command, but a REQ-RESP-RESP style. That's the response never
-ends once the req is accepted.
-
-so PSYNC has two main parts in the code:
-- A: libevent callback for sending the batches when the WAL iterator has new data.
-- B: timer callback, when A quited because of the exhaustion of the WAL data, timer cb
-  will check if WAL has new data available from time to time, so to awake the A again.
-
-## Full Synchronization
-
-On the master side, to support full synchronization, master must create a rocksdb backup
-every time the `_fetch_meta` request is received.
-
-On the slave side, after retrieving the meta data, the slave can fetch every file listed in
-the meta data (skip if already existed), and restore the backup. to accelerate a bit, file
-fetching is executed in parallel.
-
diff --git a/docs/support-commands.md b/docs/support-commands.md
deleted file mode 100644
index e283441..0000000
--- a/docs/support-commands.md
+++ /dev/null
@@ -1,181 +0,0 @@
-# Support Commands
-
-## String Commands
-
-| Command     | Supported OR Not | Desc                 |
-| ----------- | ---------------- | -------------------- |
-| get         | √                |                      |
-| getrange    | √                |                      |
-| getset      | √                |                      |
-| incr        | √                |                      |
-| incrby      | √                |                      |
-| incrbyfloat | √                |                      |
-| mget        | √                |                      |
-| mset        | √                |                      |
-| msetnx      | √                |                      |
-| psetex      | √                | only supports second |
-| set         | √                |                      |
-| setex       | √                |                      |
-| setnx       | √                |                      |
-| setrange    | √                |                      |
-| strlen      | √                |                      |
-
-## Hash Commands
-
-| Command      | Supported OR Not | Desc |
-| ------------ | ---------------- | ---- |
-| hdel         | √                |      |
-| hexists      | √                |      |
-| hget         | √                |      |
-| hgetall      | √                |      |
-| hincrby      | √                |      |
-| hincrbyfloat | √                |      |
-| hkeys        | √                |      |
-| hlen         | √                |      |
-| hmget        | √                |      |
-| hmset        | √                |      |
-| hset         | √                |      |
-| hsetnx       | √                |      |
-| hstrlen      | √                |      |
-| hvals        | √                |      |
-| hscan        | √                |      |
-
-## List Commands
-
-| Command    | Supported OR Not | Desc                                                         |
-| ---------- | ---------------- | ------------------------------------------------------------ |
-| blpop      | √                |                                                              |
-| brpop      | √                |                                                              |
-| brpoplpush | X                |                                                              |
-| lindex     | √                | Caution: linsert is O(N) operation, don't use it when list was extreme long |
-| linsert    | √                |                                                              |
-| llen       | √                |                                                              |
-| lpop       | √                |                                                              |
-| lpush      | √                |                                                              |
-| lpushx     | √                |                                                              |
-| lrange     | √                |                                                              |
-| lrem       | √                | Caution: lrem is O(N) operation, don't use it when list was extreme long |
-| lset       | √                |                                                              |
-| ltrim      | √                | Caution: ltrim is O(N) operation, don't use it when list was extreme long |
-| rpop       | √                |                                                              |
-| rpoplpush  | √                |                                                              |
-| rpush      | √                |                                                              |
-| rpushx     | √                |                                                              |
-
-
-
-## Set Commands
-
-| Command     | Supported OR Not | Desc                                  |
-| ----------- | ---------------- | ------------------------------------- |
-| sadd        | √                |                                       |
-| scard       | √                |                                       |
-| sdiff       | √                |                                       |
-| sdiffstore  | √                |                                       |
-| sinter      | √                |                                       |
-| sinterstore | √                |                                       |
-| sismember   | √                |                                       |
-| smembers    | √                |                                       |
-| smove       | √                |                                       |
-| spop        | √                | pop the member with key oreder        |
-| srandmember | √                | always first N members if not changed |
-| srem        | √                |                                       |
-| sunion      | √                |                                       |
-| sunionstore | √                |                                       |
-| sscan       | √                |                                       |
-
-## ZSet Commands
-
-| Command          | Supported OR Not | Desc |
-| ---------------- | ---------------- | ---- |
-| bzpopmin         | X                |      |
-| bzpopmax         | X                |      |
-| zadd             | √                |      |
-| zcard            | √                |      |
-| zcount           | √                |      |
-| zincrby          | √                |      |
-| zinterstore      | √                |      |
-| zlexcount        | √                |      |
-| zpopmin          | √                |      |
-| zpopmax          | √                |      |
-| zrange           | √                |      |
-| zrangebylex      | √                |      |
-| zrangebyscore    | √                |      |
-| zrank            | √                |      |
-| zrem             | √                |      |
-| zremrangebylex   | √                |      |
-| zremrangebyrank  | √                |      |
-| zremrangebyscore | √                |      |
-| zrevrange        | √                |      |
-| zrevrangebylex   | X                |      |
-| zrevrangebyscore | √                |      |
-| zscan            | √                |      |
-| zscore           | √                |      |
-| zunionscore      | √                |      |
-
-## Key Commands
-
-| Command   | Supported OR Not | Desc                 |
-| --------- | ---------------- | -------------------- |
-| del       | √                |                      |
-| dump      | √                |                      |
-| exists    | √                |                      |
-| expire    | √                |                      |
-| expireat  | √                |                      |
-| keys      | √                |                      |
-| persist   | √                |                      |
-| pexpire   | √                | precision is seconds |
-| pexpireat | √                | precision is seconds |
-| pttl      | √                |                      |
-| ttl       | √                |                      |
-| type      | √                |                      |
-| scan      | √                |                      |
-| rename    | X                |                      |
-| randomkey | √                |                      |
-
-## Bit Commands
-
-| Command  | Supported OR Not | Desc |
-| -------- | ---------------- | ---- |
-| getbit   | √                |      |
-| setbit   | √                |      |
-| bitcount | √                |      |
-| bitpos   | √                |      |
-| bitfield | X                |      |
-| bitop    | X                |      |
-
-**NOTE : String and Bitmap is different type in kvrocks, so you can't do bit with string, vice versa.**
-
-
-
-## Pub/Sub Commands
-
-| Command      | Supported OR Not | Desc |
-| ------------ | ---------------- | ---- |
-| psubscribe   | √                |      |
-| publish      | √                |      |
-| pubsub       | √                |      |
-| punsubscribe | √                |      |
-| subscribe    | √                |      |
-| unsubscribe  | √                |      |
-
-## Administrator Commands
-
-| Command      | Supported OR Not | Desc |
-| ------------ | ---------------- | ---- |
-| monitor      | √                |      |
-| info         | √                |      |
-| config       | √                |      |
-| dbsize       | √                |      |
-| namespace    | √                |      |
-| flushdb      | √                |      |
-
-**NOTE : The db size was updated async after execute `dbsize scan` command**
-
-## GEO Commands
-
-**Not Supported**
-
-## Hyperloglog Commands
-
-**Not Supported**
diff --git a/external/gflags b/external/gflags
deleted file mode 160000
index e171aa2..0000000
--- a/external/gflags
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit e171aa2d15ed9eb17054558e0b3a6a413bb01067
diff --git a/external/glog b/external/glog
deleted file mode 160000
index a6a166d..0000000
--- a/external/glog
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit a6a166db069520dbbd653c97c2e5b12e08a8bb26
diff --git a/external/googletest b/external/googletest
deleted file mode 160000
index 2fe3bd9..0000000
--- a/external/googletest
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 2fe3bd994b3189899d93f1d5a881e725e046fdc2
diff --git a/external/jemalloc b/external/jemalloc
deleted file mode 160000
index b0b3e49..0000000
--- a/external/jemalloc
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit b0b3e49a54ec29e32636f4577d9d5a896d67fd20
diff --git a/external/libevent b/external/libevent
deleted file mode 160000
index 1d2ef90..0000000
--- a/external/libevent
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 1d2ef90032bc842bc2e295ee4adce3408b6d85da
diff --git a/external/rocksdb b/external/rocksdb
deleted file mode 160000
index 7e1f37e..0000000
--- a/external/rocksdb
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 7e1f37eb4fc711dbf3ecc9610178931f00754de8
diff --git a/external/snappy b/external/snappy
deleted file mode 160000
index b02bfa7..0000000
--- a/external/snappy
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit b02bfa754ebf27921d8da3bd2517eab445b84ff9
diff --git a/kvrocks b/kvrocks
new file mode 160000
index 0000000..637291d
--- /dev/null
+++ b/kvrocks
@@ -0,0 +1 @@
+Subproject commit 637291df05cd729c6cab074e11699d189739507d
diff --git a/kvrocks.conf b/kvrocks.conf
deleted file mode 100644
index f535e9f..0000000
--- a/kvrocks.conf
+++ /dev/null
@@ -1,260 +0,0 @@
-################################ GENERAL #####################################
-
-# By default kvrocks listens for connections from all the network interfaces
-# available on the server. It is possible to listen to just one or multiple
-# interfaces using the "bind" configuration directive, followed by one or
-# more IP addresses.
-#
-# Examples:
-#
-# bind 192.168.1.100 10.0.0.1
-# bind 127.0.0.1
-bind 0.0.0.0
-
-# Accept connections on the specified port, default is 6666.
-port 6666
-
-# Close the connection after a client is idle for N seconds (0 to disable)
-timeout 0
-
-# The number of worker's threads, increase or decrease it would effect the performance.
-workers 8
-
-# The number of replication worker's threads, increase or decrease it would effect the replication performance.
-# default is 1
-repl-workers 1
-
-# The value should be INFO, WARNING, ERROR, FATAL
-# default is INFO
-loglevel INFO
-
-# By default kvrocks does not run as a daemon. Use 'yes' if you need it.
-# Note that kvrocks will write a pid file in /var/run/kvrocks.pid when daemonized.
-daemonize no
-
-# Require clients to issue AUTH <PASSWORD> before processing any other
-# commands.  This might be useful in environments in which you do not trust
-# others with access to the host running kvrocks.
-#
-# This should stay commented out for backward compatibility and because most
-# people do not need auth (e.g. they run their own servers).
-#
-# Warning: since kvrocks is pretty fast an outside user can try up to
-# 150k passwords per second against a good box. This means that you should
-# use a very strong password otherwise it will be very easy to break.
-#
-# requirepass foobared
-
-# If the master is password protected (using the "requirepass" configuration
-# directive below) it is possible to tell the slave to authenticate before
-# starting the replication synchronization process, otherwise the master will
-# refuse the slave request.
-#
-# masterauth foobared
-
-# Master-Salve replication would check db name is matched. if not, the slave should
-# refuse to sync the db from master. Don't use default value, set the db-name to identify
-# the cluster.
-db-name change.me.db
-
-# The working directory
-#
-# The DB will be written inside this directory
-# Note that you must specify a directory here, not a file name.
-dir .
-
-# The backup directory
-#
-# The DB will be written inside this directory
-# Note that you must specify a directory here, not a file name.
-# backup-dir /tmp/kvrocks/backup
-
-# When running daemonized, kvrocks writes a pid file in ${CONFIG_DIR}/kvrocks.pid by
-# default. You can specify a custom pid file location here.
-# pidfile /var/run/kvrocks.pid
-
-# You can configure a slave instance to accept writes or not. Writing against
-# a slave instance may be useful to store some ephemeral data (because data
-# written on a slave will be easily deleted after resync with the master) but
-# may also cause problems if clients are writing to it because of a
-# misconfiguration.
-slave-read-only yes
-
-# The slave priority is an integer number published by Kvrocks in the INFO output.
-# It is used by Redis Sentinel in order to select a slave to promote into a
-# master if the master is no longer working correctly.
-#
-# A slave with a low priority number is considered better for promotion, so
-# for instance if there are three slave with priority 10, 100, 25 Sentinel will
-# pick the one with priority 10, that is the lowest.
-#
-# However a special priority of 0 marks the replica as not able to perform the
-# role of master, so a slave with priority of 0 will never be selected by
-# Redis Sentinel for promotion.
-#
-# By default the priority is 100.
-slave-priority 100
-
-# TCP listen() backlog.
-#
-# In high requests-per-second environments you need an high backlog in order
-# to avoid slow clients connections issues. Note that the Linux kernel
-# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
-# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
-# in order to Get the desired effect.
-tcp-backlog 511
-
-#
-# repl-bind 192.168.1.100 10.0.0.1
-# repl-bind 127.0.0.1
-repl-bind 0.0.0.0
-
-# Master-Slave replication. Use slaveof to make a kvrocks instance a copy of
-# another kvrocks server. A few things to understand ASAP about kvrocks replication.
-#
-# 1) Kvrocks replication is asynchronous, but you can configure a master to
-#    stop accepting writes if it appears to be not connected with at least
-#    a given number of slaves.
-# 2) Kvrocks slaves are able to perform a partial resynchronization with the
-#    master if the replication link is lost for a relatively small amount of
-#    time. You may want to configure the replication backlog size (see the next
-#    sections of this file) with a sensible value depending on your needs.
-# 3) Replication is automatic and does not need user intervention. After a
-#    network partition slaves automatically try to reconnect to masters
-#    and resynchronize with them.
-#
-# slaveof <masterip> <masterport>
-# slaveof 127.0.0.1 6379
-
-# The maximum allowed rate (in MB/s) that should be used by Replication.
-# If the rate exceeds max-replication-mb, replication will slow down.
-# Default: 0 (i.e. no limit)
-max-replication-mb 0
-
-# The maximum allowed aggregated write rate of flush and compaction (in MB/s).
-# If the rate exceeds max-io-mb, io will slow down.
-# 0 is no limit
-# Default: 500
-max-io-mb 500
-
-# The maximum allowed space (in GB) that should be used by RocksDB.
-# If the total size of the SST files exceeds max_allowed_space, writes to RocksDB will fail.
-# Please see: https://github.com/facebook/rocksdb/wiki/Managing-Disk-Space-Utilization
-# Default: 0 (i.e. no limit)
-max-db-size 0
-
-# The maximum backup to keep, server cron would run every minutes to check the num of current
-# backup, and purge the old backup if exceed the max backup num to keep. If num-backup-to-keep
-# is 0, no backup would be keep.
-max-backup-to-keep 1
-
-# The maximum hours to keep the backup. If max-backup-keep-hours is 0, wouldn't purge any backup.
-# default is 168, 1 week
-max-backup-keep-hours 168
-
-
-################################## SLOW LOG ###################################
-
-# The Kvrocks Slow Log is a system to log queries that exceeded a specified
-# execution time. The execution time does not include the I/O operations
-# like talking with the client, sending the reply and so forth,
-# but just the time needed to actually execute the command (this is the only
-# stage of command execution where the thread is blocked and can not serve
-# other requests in the meantime).
-#
-# You can configure the slow log with two parameters: one tells Kvrocks
-# what is the execution time, in microseconds, to exceed in order for the
-# command to get logged, and the other parameter is the length of the
-# slow log. When a new command is logged the oldest one is removed from the
-# queue of logged commands.
-
-# The following time is expressed in microseconds, so 1000000 is equivalent
-# to one second. Note that a negative number disables the slow log, while
-# a value of zero forces the logging of every command.
-slowlog-log-slower-than 100000
-
-# There is no limit to this length. Just be aware that it will consume memory.
-# You can reclaim memory used by the slow log with SLOWLOG RESET.
-slowlog-max-len 128
-
-################################## CRON ###################################
-
-# Compact Scheduler, auto compact at schedule time
-# time expression format is the same as crontab(currently only support * and int)
-# e.g. compact-cron 0 3 * * * 0 4 * * *
-# would compact the db at 3am and 4am everyday
-compact-cron 0 3 * * *
-
-# Backup Scheduler, auto backup at schedule time
-# time expression format is the same as compact-cron
-# e.g. compact-cron 0 3 * * * 0 4 * * *
-# would backup the db at 3am and 4am everyday
-# bgsave-cron 0 4 * * *
-
-################################ ROCKSDB #####################################
-
-# Specify the capacity  of metadata column family block cache. Larger block cache
-# may make request faster while more keys would be cached. Max Size is 200*1024.
-# unit is MiB, default 4096
-rocksdb.metadata_block_cache_size 4096
-
-# Specify the capacity  of subkey column family block cache. Larger block cache
-# may make request faster while more keys would be cached. Max Size is 200*1024.
-# unit is MiB, default 8192
-rocksdb.subkey_block_cache_size 8192
-
-# Number of open files that can be used by the DB.  You may need to
-# increase this if your database has a large working set. Value -1 means
-# files opened are always kept open. You can estimate number of files based
-# on target_file_size_base and target_file_size_multiplier for level-based
-# compaction. For universal-style compaction, you can usually set it to -1.
-rocksdb.max_open_files 8096
-
-# Amount of data to build up in memory (backed by an unsorted log
-# on disk) before converting to a sorted on-disk file.
-#
-# Larger values increase performance, especially during bulk loads.
-# Up to max_write_buffer_number write buffers may be held in memory
-# at the same time,
-# so you may wish to adjust this parameter to control memory usage.
-# Also, a larger write buffer will result in a longer recovery time
-# the next time the database is opened.
-#
-# Note that write_buffer_size is enforced per column family.
-# See db_write_buffer_size for sharing memory across column families.
-
-# default is 256MB
-rocksdb.write_buffer_size 256
-
-# The maximum number of write buffers that are built up in memory.
-# The default and the minimum number is 2, so that when 1 write buffer
-# is being flushed to storage, new writes can continue to the other
-# write buffer.
-# If max_write_buffer_number > 3, writing will be slowed down to
-# options.delayed_write_rate if we are writing to the last write buffer
-# allowed.
-rocksdb.max_write_buffer_number 2
-
-# Maximum number of concurrent background compaction jobs, submitted to
-# the default LOW priority thread pool.
-rocksdb.max_background_compactions 2
-
-# Maximum number of concurrent background memtable flush jobs, submitted by
-# default to the HIGH priority thread pool. If the HIGH priority thread pool
-# is configured to have zero threads, flush jobs will share the LOW priority
-# thread pool with compaction jobs.
-rocksdb.max_background_flushes 2
-
-# This value represents the maximum number of threads that will
-# concurrently perform a compaction job by breaking it into multiple,
-# smaller ones that are run simultaneously.
-# Default: 1 (i.e. no subcompactions)
-rocksdb.max_sub_compactions 1
-
-# Specify the compression to use.
-# Accept value: "no", "snappy"
-# default snappy
-rocksdb.compression snappy
-
-################################ NAMESPACE #####################################
-# namespace.test change.me
diff --git a/kvrocks2redis.conf b/kvrocks2redis.conf
deleted file mode 100644
index a13e1fc..0000000
--- a/kvrocks2redis.conf
+++ /dev/null
@@ -1,27 +0,0 @@
-################################ GENERAL #####################################
-
-# The value should be INFO, WARNING, ERROR, FATAL
-# default is INFO
-loglevel INFO
-
-# By default kvrocks2redis does not run as a daemon. Use 'yes' if you need it.
-# Note that kvrocks2redis will write a pid file in /var/run/kvrocks2redis.pid when daemonized.
-daemonize no
-
-# The working directory
-#
-# The kvrocks node db directory
-# Note that you must specify a directory here, not a file name.
-dir /tmp/kvrocks
-
-# Sync kvrocks node. Use the node's Psync command to get the newest wal raw write_batch
-#
-# kvrocks <kvrocks_ip> <kvrocks_port> <kvrocks_auth>
-kvrocks 127.0.0.1 6666 foobared
-
-
-################################ NAMESPACE AND Sync Target Redis #####################################
-# namespace.{namespace} <redis_ip> <redis_port> <auth>
-namespace.test 127.0.0.1 6379 foobared
-namespace.__namespace 127.0.0.1 6379 foobared
-
diff --git a/package.sh b/package.sh
deleted file mode 100644
index 657f31f..0000000
--- a/package.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env bash
-NAME="kvrocks"
-VERSION=`_build/bin/kvrocks -v|awk '{printf $2;}'`
-STAGE=${STAGE:-release}
-fpm -f -s dir -t rpm --prefix '/www/kvrocks'  -n ${NAME} --epoch 7 \
-    --config-files /www/kvrocks/conf/kvrocks.conf \
-    -v ${VERSION} --iteration ${CI_PIPELINE_ID}.${STAGE} -C ./_build \
-    --verbose --category 'Meitu/Projects' --description 'kvrocks' \
-    --url 'http://www.meitu.com' --license 'Commerial' -m 'linty@meitu.com'
-
diff --git a/src/compact_filter.cc b/src/compact_filter.cc
deleted file mode 100644
index 71c03c0..0000000
--- a/src/compact_filter.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-#include "compact_filter.h"
-#include <glog/logging.h>
-#include <string>
-#include <utility>
-
-#include "redis_bitmap.h"
-
-namespace Engine {
-using rocksdb::Slice;
-
-bool MetadataFilter::Filter(int level,
-                                    const Slice &key,
-                                    const Slice &value,
-                                    std::string *new_value,
-                                    bool *modified) const {
-  std::string ns, user_key, bytes = value.ToString();
-  Metadata metadata(kRedisNone);
-  rocksdb::Status s = metadata.Decode(bytes);
-  ExtractNamespaceKey(key, &ns, &user_key);
-  if (!s.ok()) {
-    LOG(WARNING) << "[compact_filter/metadata] Failed to decode,"
-                 << ", namespace: " << ns
-                 << ", key: " << user_key
-                 << ", err: " << s.ToString();
-    return false;
-  }
-  DLOG(INFO) << "[compact_filter/metadata] "
-             << "namespace: " << ns
-             << ", key: " << user_key
-             << ", result: " << (metadata.Expired() ? "deleted" : "reserved");
-  return metadata.Expired();
-}
-
-bool SubKeyFilter::IsKeyExpired(const InternalKey &ikey, const Slice &value) const {
-  std::string metadata_key;
-
-  auto db = stor_->GetDB();
-  auto cf_handles = stor_->GetCFHandles();
-  // storage close the would delete the column familiy handler and DB
-  if (!db || cf_handles.size() < 2)  return false;
-
-  ComposeNamespaceKey(ikey.GetNamespace(), ikey.GetKey(), &metadata_key);
-  if (cached_key_.empty() || metadata_key != cached_key_) {
-    std::string bytes;
-    if (!stor_->IncrDBRefs().IsOK()) {  // the db is closing, don't use DB and cf_handles
-      return false;
-    }
-    rocksdb::Status s = db->Get(rocksdb::ReadOptions(), cf_handles[1], metadata_key, &bytes);
-    stor_->DecrDBRefs();
-    cached_key_ = std::move(metadata_key);
-    if (s.ok()) {
-      cached_metadata_ = std::move(bytes);
-    } else if (s.IsNotFound()) {
-      // metadata was deleted(perhaps compaction or manual)
-      // clear the metadata
-      cached_metadata_.clear();
-      return true;
-    } else {
-      LOG(ERROR) << "[compact_filter/subkey] Failed to fetch metadata"
-                 << ", namespace: " << ikey.GetNamespace().ToString()
-                 << ", key: " << ikey.GetKey().ToString()
-                 << ", err: " << s.ToString();
-      cached_key_.clear();
-      cached_metadata_.clear();
-      return false;
-    }
-  }
-  // the metadata was not found
-  if (cached_metadata_.empty()) return true;
-  // the metadata is cached
-  Metadata metadata(kRedisNone);
-  rocksdb::Status s = metadata.Decode(cached_metadata_);
-  if (!s.ok()) {
-    cached_key_.clear();
-    LOG(ERROR) << "[compact_filter/subkey] Failed to decode metadata"
-               << ", namespace: " << ikey.GetNamespace().ToString()
-               << ", key: " << ikey.GetKey().ToString()
-               << ", err: " << s.ToString();
-    return false;
-  }
-  if (metadata.Type() == kRedisString  // metadata key was overwrite by set command
-      || metadata.Expired()
-      || ikey.GetVersion() != metadata.version) {
-    return true;
-  }
-  return metadata.Type() == kRedisBitmap && Redis::Bitmap::IsEmptySegment(value);
-}
-
-bool SubKeyFilter::Filter(int level,
-                                  const Slice &key,
-                                  const Slice &value,
-                                  std::string *new_value,
-                                  bool *modified) const {
-  InternalKey ikey(key);
-  bool result = IsKeyExpired(ikey, value);
-  DLOG(INFO) << "[compact_filter/subkey] "
-             << "namespace: " << ikey.GetNamespace().ToString()
-             << ", metadata key: " << ikey.GetKey().ToString()
-             << ", subkey: " << ikey.GetSubKey().ToString()
-             << ", verison: " << ikey.GetVersion()
-             << ", result: " << (result ? "deleted" : "reserved");
-  return result;
-}
-}  // namespace Engine
diff --git a/src/compact_filter.h b/src/compact_filter.h
deleted file mode 100644
index e4a13cf..0000000
--- a/src/compact_filter.h
+++ /dev/null
@@ -1,80 +0,0 @@
-#pragma once
-
-#include <rocksdb/db.h>
-#include <rocksdb/compaction_filter.h>
-#include <vector>
-#include <memory>
-#include <string>
-#include "redis_metadata.h"
-#include "storage.h"
-
-namespace Engine {
-class MetadataFilter : public rocksdb::CompactionFilter {
- public:
-  const char *Name() const override { return "MetadataFilter"; }
-  bool Filter(int level, const Slice &key, const Slice &value,
-              std::string *new_value, bool *modified) const override;
-};
-
-class MetadataFilterFactory : public rocksdb::CompactionFilterFactory {
- public:
-  MetadataFilterFactory() = default;
-  const char *Name() const override { return "MetadataFilterFactory"; }
-  std::unique_ptr<rocksdb::CompactionFilter> CreateCompactionFilter(
-      const rocksdb::CompactionFilter::Context &context) override {
-    return std::unique_ptr<rocksdb::CompactionFilter>(new MetadataFilter());
-  }
-};
-
-class SubKeyFilter : public rocksdb::CompactionFilter {
- public:
-  explicit SubKeyFilter(Storage *storage)
-      : cached_key_(""),
-        cached_metadata_(""),
-        stor_(storage) {}
-
-  const char *Name() const override { return "SubkeyFilter"; }
-  bool IsKeyExpired(const InternalKey &ikey, const Slice &value) const;
-  bool Filter(int level, const Slice &key, const Slice &value,
-              std::string *new_value, bool *modified) const override;
-
- protected:
-  mutable std::string cached_key_;
-  mutable std::string cached_metadata_;
-  Engine::Storage *stor_;
-};
-
-class SubKeyFilterFactory : public rocksdb::CompactionFilterFactory {
- public:
-  explicit SubKeyFilterFactory(Engine::Storage *storage) {
-    stor_ = storage;
-  }
-
-  const char *Name() const override { return "SubKeyFilterFactory"; }
-  std::unique_ptr<rocksdb::CompactionFilter> CreateCompactionFilter(
-      const rocksdb::CompactionFilter::Context &context) override {
-    return std::unique_ptr<rocksdb::CompactionFilter>(
-        new SubKeyFilter(stor_));
-  }
-
- private:
-  Engine::Storage *stor_ = nullptr;
-};
-
-class PubSubFilter : public rocksdb::CompactionFilter {
- public:
-  const char *Name() const override { return "PubSubFilter"; }
-  bool Filter(int level, const Slice &key, const Slice &value,
-              std::string *new_value, bool *modified) const override { return true; }
-};
-
-class PubSubFilterFactory : public rocksdb::CompactionFilterFactory {
- public:
-  PubSubFilterFactory() = default;
-  const char *Name() const override { return "PubSubFilterFactory"; }
-  std::unique_ptr<rocksdb::CompactionFilter> CreateCompactionFilter(
-      const rocksdb::CompactionFilter::Context &context) override {
-    return std::unique_ptr<rocksdb::CompactionFilter>(new PubSubFilter());
-  }
-};
-}  // namespace Engine
diff --git a/src/config.cc b/src/config.cc
deleted file mode 100644
index 2541f9f..0000000
--- a/src/config.cc
+++ /dev/null
@@ -1,720 +0,0 @@
-#include <fcntl.h>
-#include <string.h>
-#include <strings.h>
-#include <glog/logging.h>
-#include <rocksdb/env.h>
-
-#include <fstream>
-#include <iostream>
-#include <sstream>
-#include <vector>
-#include <utility>
-
-#include "config.h"
-#include "util.h"
-#include "status.h"
-#include "cron.h"
-#include "server.h"
-
-const char *kDefaultNamespace = "__namespace";
-static const char *kLogLevels[] = {"info", "warning", "error", "fatal"};
-static const size_t kNumLogLevel = sizeof(kLogLevels)/ sizeof(kLogLevels[0]);
-static const char *kCompressionType[] = {"no", "snappy"};
-static const size_t kNumCompressionType = sizeof(kCompressionType) / sizeof(kCompressionType[0]);
-
-void Config::incrOpenFilesLimit(rlim_t maxfiles) {
-  struct rlimit limit;
-
-  rlim_t old_limit, best_limit = maxfiles, decr_step = 16;
-  if (getrlimit(RLIMIT_NOFILE, &limit) < 0 || best_limit <= limit.rlim_cur) {
-    return;
-  }
-  old_limit = limit.rlim_cur;
-  while (best_limit > old_limit) {
-    limit.rlim_cur = best_limit;
-    limit.rlim_max = best_limit;
-    if (setrlimit(RLIMIT_NOFILE, &limit) != -1) break;
-    /* We failed to set file limit to 'bestlimit'. Try with a
-     * smaller limit decrementing by a few FDs per iteration. */
-    if (best_limit < decr_step) break;
-    best_limit -= decr_step;
-  }
-}
-
-void Config::array2String(const std::vector<std::string> &array,
-                          const std::string &delim, std::string *output) {
-  output->clear();
-  for (size_t i = 0; i < array.size(); i++) {
-    output->append(array[i]);
-    if (i != array.size()-1) output->append(delim);
-  }
-}
-
-int Config::yesnotoi(std::string input) {
-  if (strcasecmp(input.data(), "yes") == 0) {
-    return 1;
-  } else if (strcasecmp(input.data(), "no") == 0) {
-    return 0;
-  }
-  return -1;
-}
-
-Status Config::parseRocksdbOption(const std::string &key, std::string value) {
-  if (key == "compression") {
-    for (size_t i = 0; i < kNumCompressionType; i++) {
-      if (Util::ToLower(value) == kCompressionType[i]) {
-        rocksdb_options.compression = static_cast<rocksdb::CompressionType >(i);
-        break;
-      }
-    }
-  } else if (key == "enable_pipelined_write")  {
-    rocksdb_options.enable_pipelined_write = value == "yes";
-  } else {
-    return parseRocksdbIntOption(key, value);
-  }
-  return Status::OK();
-}
-
-Status Config::parseRocksdbIntOption(std::string key, std::string value) {
-  int64_t n;
-  auto s = Util::StringToNum(value, &n);
-  if (key == "max_open_files") {
-    rocksdb_options.max_open_files = static_cast<int>(n);
-  } else if (!strncasecmp(key.data(), "write_buffer_size" , strlen("write_buffer_size"))) {
-    rocksdb_options.write_buffer_size = static_cast<size_t>(n) * MiB;
-  }  else if (key == "max_write_buffer_number") {
-    rocksdb_options.max_write_buffer_number = static_cast<int>(n);
-  }  else if (key == "write_buffer_size") {
-    rocksdb_options.write_buffer_size = static_cast<uint64_t>(n);
-  }  else if (key == "target_file_size_base") {
-    rocksdb_options.target_file_size_base = static_cast<uint64_t>(n);
-  }  else if (key == "max_background_compactions") {
-    rocksdb_options.max_background_compactions = static_cast<int>(n);
-  }  else if (key == "max_background_flushes") {
-    rocksdb_options.max_background_flushes = static_cast<int>(n);
-  }  else if (key == "max_sub_compactions") {
-    rocksdb_options.max_sub_compactions = static_cast<uint32_t>(n);
-  } else if (key == "metadata_block_cache_size") {
-    rocksdb_options.metadata_block_cache_size = static_cast<size_t>(n) * MiB;
-  } else if (key == "subkey_block_cache_size") {
-    rocksdb_options.subkey_block_cache_size = static_cast<size_t>(n) * MiB;
-  } else if (key == "delayed_write_rate") {
-    rocksdb_options.delayed_write_rate = static_cast<uint64_t>(n);
-  } else if (key == "compaction_readahead_size") {
-    rocksdb_options.compaction_readahead_size = static_cast<size_t>(n);
-  } else if (key == "wal_ttl_seconds") {
-    rocksdb_options.WAL_ttl_seconds = static_cast<uint64_t>(n);
-  } else if (key == "wal_size_limit_mb") {
-    rocksdb_options.WAL_size_limit_MB = static_cast<uint64_t>(n);
-  } else if (key == "level0_slowdown_writes_trigger") {
-    rocksdb_options.level0_slowdown_writes_trigger = static_cast<int>(n);
-    rocksdb_options.level0_stop_writes_trigger = static_cast<int>(n*2);
-  } else {
-    return Status(Status::NotOK, "Bad directive or wrong number of arguments");
-  }
-  return Status::OK();
-}
-
-Status Config::parseConfigFromString(std::string input) {
-  std::vector<std::string> args;
-  Util::Split(input, " \t\r\n", &args);
-  // omit empty line and comment
-  if (args.empty() || args[0].front() == '#') return Status::OK();
-
-  args[0] = Util::ToLower(args[0]);
-  size_t size = args.size();
-  if (size == 2 && args[0] == "port") {
-    port = std::stoi(args[1]);
-    repl_port = port + 1;
-  } else if (size == 2 && args[0] == "timeout") {
-    timeout = std::stoi(args[1]);
-  } else if (size == 2 && args[0] == "workers") {
-    workers = std::stoi(args[1]);
-    if (workers < 1 || workers > 1024) {
-      return Status(Status::NotOK, "too many worker threads");
-    }
-  } else if (size == 2 && args[0] == "repl-workers") {
-    repl_workers = std::stoi(args[1]);
-    if (workers < 1 || workers > 1024) {
-      return Status(Status::NotOK, "too many replication worker threads");
-    }
-  } else if (size >= 2 && args[0] == "bind") {
-    binds.clear();
-    for (unsigned i = 1; i < args.size(); i++) {
-      binds.emplace_back(args[i]);
-    }
-  } else if (size >= 2 && args[0] == "repl-bind") {
-    repl_binds.clear();
-    for (unsigned i = 1; i < args.size(); i++) {
-      repl_binds.emplace_back(args[i]);
-    }
-  } else if (size == 2 && args[0] == "daemonize") {
-    int i;
-    if ((i = yesnotoi(args[1])) == -1) {
-      return Status(Status::NotOK, "argument must be 'yes' or 'no'");
-    }
-    daemonize = (i == 1);
-  } else if (size == 2 && args[0] == "slave-read-only") {
-    int i;
-    if ((i = yesnotoi(args[1])) == -1) {
-      return Status(Status::NotOK, "argument must be 'yes' or 'no'");
-    }
-    slave_readonly = (i == 1);
-  } else if (size == 2 && args[0] == "slave-priority") {
-    slave_priority = std::stoi(args[1]);
-  } else if (size == 2 && args[0] == "tcp-backlog") {
-    backlog = std::stoi(args[1]);
-  } else if (size == 2 && args[0] == "dir") {
-    dir = args[1];
-    db_dir = dir + "/db";
-    pidfile = dir + "/kvrocks.pid";
-  } else if (size == 2 && args[0] == "backup-dir") {
-    backup_dir = args[1];
-  } else if (size == 2 && args[0] == "maxclients") {
-    maxclients = std::stoi(args[1]);
-    if (maxclients > 0) incrOpenFilesLimit(static_cast<rlim_t >(maxclients));
-  } else if (size == 2 && args[0] == "db-name") {
-    db_name = args[1];
-  } else if (size == 2 && args[0] == "masterauth") {
-    masterauth = args[1];
-  } else if (size == 2 && args[0] == "max-backup-to-keep") {
-    max_backup_to_keep = static_cast<uint32_t>(std::stoi(args[1]));
-  } else if (size == 2 && args[0] == "max-backup-keep-hours") {
-    max_backup_keep_hours = static_cast<uint32_t>(std::stoi(args[1]));
-  } else if (size == 2 && args[0] == "requirepass") {
-    requirepass = args[1];
-  } else if (size == 2 && args[0] == "pidfile") {
-    pidfile = args[1];
-  } else if (size == 2 && args[0] == "loglevel") {
-    for (size_t i = 0; i < kNumLogLevel; i++) {
-      if (Util::ToLower(args[1]) == kLogLevels[i]) {
-        loglevel = static_cast<int>(i);
-        break;
-      }
-    }
-  } else if (size == 3 && args[0] == "slaveof") {
-    if (args[1] != "no" && args[2] != "one") {
-      master_host = args[1];
-      master_port = std::stoi(args[2]);
-      if (master_port <= 0 || master_port >= 65535) {
-        return Status(Status::NotOK, "master port range should be between 0 and 65535");
-      }
-    }
-  } else if (size == 2 && args[0] == "max-db-size") {
-    max_db_size = static_cast<uint32_t>(std::stoi(args[1]));
-  } else if (size == 2 && args[0] == "max-replication-mb") {
-    max_replication_mb = static_cast<uint64_t>(std::stoi(args[1]));
-  } else if (size == 2 && args[0] == "max-io-mb") {
-    max_io_mb = static_cast<uint64_t>(std::stoi(args[1]));
-  } else if (size >= 2 && args[0] == "compact-cron") {
-    args.erase(args.begin());
-    Status s = compact_cron.SetScheduleTime(args);
-    if (!s.IsOK()) {
-      return Status(Status::NotOK, "compact-cron time expression format error : "+s.Msg());
-    }
-  } else if (size >=2 && args[0] == "bgsave-cron") {
-    args.erase(args.begin());
-    Status s = bgsave_cron.SetScheduleTime(args);
-    if (!s.IsOK()) {
-      return Status(Status::NotOK, "bgsave-cron time expression format error : " + s.Msg());
-    }
-  } else if (size == 2 && args[0] == "profiling-sample-ratio") {
-    profiling_sample_ratio = std::stoi(args[1]);
-    if (profiling_sample_ratio < 0 || profiling_sample_ratio > 100) {
-      return Status(Status::NotOK, "profiling_sample_ratio value should between 0 and 100");
-    }
-  } else if (size == 2 && args[0] == "profiling-sample-record-max-len") {
-    profiling_sample_record_max_len = std::stoi(args[1]);
-  } else if (size == 2 && args[0] == "profiling-sample-record-threshold-ms") {
-    profiling_sample_record_threshold_ms = std::stoi(args[1]);
-  } else if (size == 2 && args[0] == "profiling-sample-commands") {
-    std::vector<std::string> cmds;
-    Util::Split(args[1], ",", &cmds);
-    for (auto const &cmd : cmds) {
-      if (cmd == "*") {
-        profiling_sample_all_commands = true;
-        profiling_sample_commands.clear();
-        break;
-      }
-      if (!Redis::IsCommandExists(cmd)) {
-        return Status(Status::NotOK, "invalid command: "+cmd+" in profiling-sample-commands");
-      }
-      profiling_sample_commands.insert(cmd);
-    }
-  } else if (size == 2 && !strncasecmp(args[0].data(), "rocksdb.", 8)) {
-    return parseRocksdbOption(args[0].substr(8, args[0].size() - 8), args[1]);
-  } else if (size == 2 && !strncasecmp(args[0].data(), "namespace.", 10)) {
-    std::string ns = args[0].substr(10, args.size()-10);
-    if (ns.size() > INT8_MAX) {
-      return Status(Status::NotOK, std::string("namespace size exceed limit ")+std::to_string(INT8_MAX));
-    }
-    tokens[args[1]] = ns;
-  } else if (size == 2 && !strcasecmp(args[0].data(), "slowlog-log-slower-than")) {
-    slowlog_log_slower_than = std::stoll(args[1]);
-  } else if (size == 2 && !strcasecmp(args[0].data(), "slowlog-max-len")) {
-    slowlog_max_len = std::stoi(args[1]);
-
-  } else {
-    return Status(Status::NotOK, "Bad directive or wrong number of arguments");
-  }
-  return Status::OK();
-}
-
-Status Config::Load(std::string path) {
-  path_ = std::move(path);
-  std::ifstream file(path_);
-  if (!file.is_open()) {
-    return Status(Status::NotOK, strerror(errno));
-  }
-
-  std::string line;
-  int line_num = 1;
-  while (!file.eof()) {
-    std::getline(file, line);
-    Status s = parseConfigFromString(line);
-    if (!s.IsOK()) {
-      file.close();
-      return Status(Status::NotOK, "at line: #L" + std::to_string(line_num) + ", err: " + s.Msg());
-    }
-    line_num++;
-  }
-  if (backup_dir.empty()) {  // backup-dir was not assigned in config file
-    backup_dir = dir+"/backup";
-  }
-  if (!tokens.empty() && requirepass.empty()) {
-    file.close();
-    return Status(Status::NotOK, "requirepass was required when namespace isn't empty");
-  }
-  auto s = rocksdb::Env::Default()->CreateDirIfMissing(dir);
-  if (!s.ok()) {
-    file.close();
-    return Status(Status::NotOK, s.ToString());
-  }
-  s = rocksdb::Env::Default()->CreateDirIfMissing(backup_dir);
-  if (!s.ok()) {
-    file.close();
-    return Status(Status::NotOK, s.ToString());
-  }
-  file.close();
-  return Status::OK();
-}
-
-void Config::Get(std::string key, std::vector<std::string> *values) {
-  key = Util::ToLower(key);
-  values->clear();
-  bool is_all = key == "*";
-
-#define PUSH_IF_MATCH(name, value) do { \
-  if ((is_all) || (key) == (name)) { \
-    values->emplace_back((name)); \
-    values->emplace_back((value)); \
-  } \
-} while (0);
-
-  std::string master_str;
-  if (!master_host.empty()) {
-    master_str = master_host+" "+ std::to_string(master_port);
-  }
-  std::string binds_str;
-  array2String(binds, ",", &binds_str);
-  std::string sample_commands_str;
-  if (profiling_sample_all_commands) {
-    sample_commands_str = "*";
-  } else {
-    for (const auto &cmd : profiling_sample_commands) {
-      sample_commands_str.append(cmd);
-      sample_commands_str.append(",");
-    }
-    if (!sample_commands_str.empty()) sample_commands_str.pop_back();
-  }
-  PUSH_IF_MATCH("dir", dir);
-  PUSH_IF_MATCH("db-dir", db_dir);
-  PUSH_IF_MATCH("backup-dir", backup_dir);
-  PUSH_IF_MATCH("port", std::to_string(port));
-  PUSH_IF_MATCH("workers", std::to_string(workers));
-  PUSH_IF_MATCH("timeout", std::to_string(timeout));
-  PUSH_IF_MATCH("tcp-backlog", std::to_string(backlog));
-  PUSH_IF_MATCH("daemonize", (daemonize ? "yes" : "no"));
-  PUSH_IF_MATCH("maxclients", std::to_string(maxclients));
-  PUSH_IF_MATCH("slave-read-only", (slave_readonly ? "yes" : "no"));
-  PUSH_IF_MATCH("slave-priority", std::to_string(slave_priority));
-  PUSH_IF_MATCH("max-backup-to-keep", std::to_string(max_backup_to_keep));
-  PUSH_IF_MATCH("max-backup-keep-hours", std::to_string(max_backup_keep_hours));
-  PUSH_IF_MATCH("compact-cron", compact_cron.ToString());
-  PUSH_IF_MATCH("bgsave-cron", bgsave_cron.ToString());
-  PUSH_IF_MATCH("loglevel", kLogLevels[loglevel]);
-  PUSH_IF_MATCH("requirepass", requirepass);
-  PUSH_IF_MATCH("masterauth", masterauth);
-  PUSH_IF_MATCH("slaveof", master_str);
-  PUSH_IF_MATCH("pidfile", pidfile);
-  PUSH_IF_MATCH("db-name", db_name);
-  PUSH_IF_MATCH("binds", binds_str);
-  PUSH_IF_MATCH("max-io-mb", std::to_string(max_io_mb));
-  PUSH_IF_MATCH("max-db-size", std::to_string(max_db_size));
-  PUSH_IF_MATCH("slowlog-max-len", std::to_string(slowlog_max_len));
-  PUSH_IF_MATCH("max-replication-mb", std::to_string(max_replication_mb));
-  PUSH_IF_MATCH("profiling-sample-commands", sample_commands_str);
-  PUSH_IF_MATCH("profiling-sample-ratio", std::to_string(profiling_sample_ratio));
-  PUSH_IF_MATCH("profiling-sample-record-max-len", std::to_string(profiling_sample_record_max_len));
-  PUSH_IF_MATCH("profiling-sample-record-threshold-ms", std::to_string(profiling_sample_record_threshold_ms));
-  PUSH_IF_MATCH("slowlog-log-slower-than", std::to_string(slowlog_log_slower_than));
-  PUSH_IF_MATCH("rocksdb.max_open_files", std::to_string(rocksdb_options.max_open_files));
-  PUSH_IF_MATCH("rocksdb.write_buffer_size", std::to_string(rocksdb_options.write_buffer_size/MiB));
-  PUSH_IF_MATCH("rocksdb.max_write_buffer_number", std::to_string(rocksdb_options.max_write_buffer_number));
-  PUSH_IF_MATCH("rocksdb.max_background_compactions", std::to_string(rocksdb_options.max_background_compactions));
-  PUSH_IF_MATCH("rocksdb.metadata_block_cache_size", std::to_string(rocksdb_options.metadata_block_cache_size/MiB));
-  PUSH_IF_MATCH("rocksdb.subkey_block_cache_size", std::to_string(rocksdb_options.subkey_block_cache_size/MiB));
-  PUSH_IF_MATCH("rocksdb.compaction_readahead_size", std::to_string(rocksdb_options.compaction_readahead_size));
-  PUSH_IF_MATCH("rocksdb.max_background_flushes", std::to_string(rocksdb_options.max_background_flushes));
-  PUSH_IF_MATCH("rocksdb.enable_pipelined_write", (rocksdb_options.enable_pipelined_write ? "yes": "no"))
-  PUSH_IF_MATCH("rocksdb.stats_dump_period_sec", std::to_string(rocksdb_options.stats_dump_period_sec));
-  PUSH_IF_MATCH("rocksdb.max_sub_compactions", std::to_string(rocksdb_options.max_sub_compactions));
-  PUSH_IF_MATCH("rocksdb.delayed_write_rate", std::to_string(rocksdb_options.delayed_write_rate));
-  PUSH_IF_MATCH("rocksdb.wal_ttl_seconds", std::to_string(rocksdb_options.WAL_ttl_seconds));
-  PUSH_IF_MATCH("rocksdb.wal_size_limit_mb", std::to_string(rocksdb_options.WAL_size_limit_MB));
-  PUSH_IF_MATCH("rocksdb.target_file_size_base", std::to_string(rocksdb_options.target_file_size_base));
-  PUSH_IF_MATCH("rocksdb.level0_slowdown_writes_trigger",
-                std::to_string(rocksdb_options.level0_slowdown_writes_trigger));
-  PUSH_IF_MATCH("rocksdb.level0_stop_writes_trigger", std::to_string(rocksdb_options.level0_stop_writes_trigger));
-  PUSH_IF_MATCH("rocksdb.compression", kCompressionType[rocksdb_options.compression]);
-}
-
-Status Config::setRocksdbOption(Engine::Storage *storage, const std::string &key, const std::string &value) {
-  int64_t i;
-  bool is_cf_mutal_option = false;
-  auto db = storage->GetDB();
-  auto s = Util::StringToNum(value, &i, 0);
-  if (!s.IsOK()) return s;
-  if (key == "stats_dump_period_sec") {
-    rocksdb_options.stats_dump_period_sec = static_cast<int>(i);
-  } else if (key == "max_open_files") {
-    rocksdb_options.max_open_files = static_cast<int>(i);
-  } else if (key == "delayed_write_rate") {
-    rocksdb_options.delayed_write_rate = static_cast<uint64_t>(i);
-  } else if (key == "max_background_compactions") {
-    rocksdb_options.max_background_compactions = static_cast<int>(i);
-  } else if (key == "max_background_flushes") {
-    rocksdb_options.max_background_flushes = static_cast<int>(i);
-  } else if (key == "compaction_readahead_size") {
-    rocksdb_options.compaction_readahead_size = static_cast<size_t>(i);
-  } else if (key == "target_file_size_base") {
-    is_cf_mutal_option = true;
-    rocksdb_options.target_file_size_base = static_cast<uint64_t>(i);
-  } else if (key == "write_buffer_size") {
-    is_cf_mutal_option = true;
-    rocksdb_options.write_buffer_size = static_cast<uint64_t>(i*MiB);
-  } else if (key == "max_write_buffer_number") {
-    is_cf_mutal_option = true;
-    rocksdb_options.max_write_buffer_number =  static_cast<int>(i);
-  } else if (key == "level0_slowdown_writes_trigger") {
-    is_cf_mutal_option = true;
-    rocksdb_options.level0_slowdown_writes_trigger = static_cast<int>(i);
-    rocksdb_options.level0_stop_writes_trigger = static_cast<int>(i * 2);
-  } else {
-    return Status(Status::NotOK, "option can't be set in-flight");
-  }
-  rocksdb::Status r_status;
-  if (!is_cf_mutal_option) {
-    r_status = db->SetDBOptions({{key, value}});
-  } else {
-    auto cf_handles = storage->GetCFHandles();
-    for (auto & cf_handle : cf_handles) {
-      r_status = db->SetOptions(cf_handle, {{key, value}});
-      if (!r_status.ok()) break;
-    }
-  }
-  if (r_status.ok()) return Status::OK();
-  return Status(Status::NotOK, r_status.ToString());
-}
-
-Status Config::Set(std::string key, const std::string &value, Server *svr) {
-  key = Util::ToLower(key);
-  if (key == "timeout") {
-    timeout = std::stoi(value);
-    return Status::OK();
-  }
-  if (key == "backup-dir") {
-    auto s = rocksdb::Env::Default()->CreateDirIfMissing(value);
-    if (!s.ok()) return Status(Status::NotOK, s.ToString());
-    backup_dir = value;
-    return Status::OK();
-  }
-  if (key == "maxclients") {
-    maxclients = std::stoi(value);
-    return Status::OK();
-  }
-  if (key == "max-backup-to-keep") {
-    max_backup_to_keep = static_cast<uint32_t>(std::stoi(value));
-    return Status::OK();
-  }
-  if (key == "max-backup-keep-hours") {
-    max_backup_keep_hours = static_cast<uint32_t>(std::stoi(value));
-    return Status::OK();
-  }
-  if (key == "masterauth") {
-    masterauth = value;
-    return Status::OK();
-  }
-  if (key == "requirepass") {
-    if (requirepass.empty() && !tokens.empty()) {
-      return Status(Status::NotOK, "don't clear the requirepass while the namespace wasn't empty");
-    }
-    requirepass = value;
-    return Status::OK();
-  }
-  if (key == "slave-read-only") {
-    int i;
-    if ((i = yesnotoi(value)) == -1) {
-      return Status(Status::NotOK, "argument must be 'yes' or 'no'");
-    }
-    slave_readonly = (i == 1);
-    return Status::OK();
-  }
-  if (key == "slave-priority") {
-    slave_priority = std::stoi(value);
-    return Status::OK();
-  }
-  if (key == "loglevel") {
-    for (size_t i = 0; i < kNumLogLevel; i++) {
-      if (Util::ToLower(value) == kLogLevels[i]) {
-        loglevel = static_cast<int>(i);
-        break;
-      }
-    }
-    return Status(Status::NotOK, "loglevel should be info,warning,error,fatal");
-  }
-  if (key == "compact-cron") {
-    std::vector<std::string> args;
-    Util::Split(value, " ", &args);
-    return compact_cron.SetScheduleTime(args);
-  }
-  if (key == "bgsave-cron") {
-    std::vector<std::string> args;
-    Util::Split(value, " ", &args);
-    return bgsave_cron.SetScheduleTime(args);
-  }
-  if (key == "slowlog-log-slower-than") {
-    slowlog_log_slower_than = std::stoll(value);
-    return Status::OK();
-  }
-  if (key == "slowlog-max-len") {
-    slowlog_max_len = std::stoi(value);
-    return Status::OK();
-  }
-  if (key == "max-db-size") {
-    try {
-      int32_t i = std::stoi(value);
-      if (i < 0) {
-        return Status(Status::RedisParseErr, "value should be >= 0");
-      }
-      max_db_size = static_cast<uint32_t>(i);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, "value is not an integer or out of range");
-    }
-    svr->storage_->CheckDBSizeLimit();
-    return Status::OK();
-  }
-  if (key == "max-replication-mb") {
-    int64_t i;
-    auto s = Util::StringToNum(value, &i, 0);
-    if (!s.IsOK()) return s;
-    svr->SetReplicationRateLimit(static_cast<uint64_t>(i));
-    return Status::OK();
-  }
-  if (key == "max-io-mb") {
-    int64_t i;
-    auto s = Util::StringToNum(value, &i, 0);
-    if (!s.IsOK()) return s;
-    max_io_mb = i;
-    svr->storage_->SetIORateLimit(static_cast<uint64_t>(i));
-    return Status::OK();
-  }
-  if (key == "profiling-sample-ratio") {
-    int64_t i;
-    auto s = Util::StringToNum(value, &i, 0, 100);
-    if (!s.IsOK()) return s;
-    profiling_sample_ratio = static_cast<int>(i);
-    return Status::OK();
-  }
-  if (key == "profiling-sample-record-threshold-ms") {
-    int64_t i;
-    auto s = Util::StringToNum(value, &i, 0, INT_MAX);
-    if (!s.IsOK()) return s;
-    profiling_sample_record_threshold_ms = static_cast<int>(i);
-    return Status::OK();
-  }
-  if (key == "profiling-sample-record-max-len") {
-    int64_t i;
-    auto s = Util::StringToNum(value, &i, 0, INT_MAX);
-    if (!s.IsOK()) return s;
-    profiling_sample_record_max_len = static_cast<int>(i);
-    svr->GetPerfLog()->SetMaxEntries(profiling_sample_record_max_len);
-    return Status::OK();
-  }
-  if (key == "profiling-sample-commands") {
-    std::vector<std::string> cmds;
-    Util::Split(value, ",", &cmds);
-    for (auto const &cmd : cmds) {
-      if (!Redis::IsCommandExists(cmd) && cmd != "*") {
-        return Status(Status::NotOK, "invalid command: "+cmd+" in profiling-sample-commands");
-      }
-    }
-    profiling_sample_all_commands = false;
-    profiling_sample_commands.clear();
-    for (auto const &cmd : cmds) {
-      if (cmd == "*") {
-        profiling_sample_all_commands = true;
-        profiling_sample_commands.clear();
-        break;
-      }
-      profiling_sample_commands.insert(cmd);
-    }
-    return Status::OK();
-  }
-  if (!strncasecmp(key.c_str(), "rocksdb.", 8)) {
-    return setRocksdbOption(svr->storage_, key.substr(8, key.size()-8), value);
-  }
-  return Status(Status::NotOK, "Unsupported CONFIG parameter");
-}
-
-Status Config::Rewrite() {
-  std::string tmp_path = path_+".tmp";
-  remove(tmp_path.data());
-  std::ofstream output_file(tmp_path, std::ios::out);
-
-  std::ostringstream string_stream;
-#define WRITE_TO_FILE(key, value) do { \
-  string_stream << (key) << " " << (value) <<  "\n"; \
-} while (0)
-
-  std::string binds_str, repl_binds_str, sample_commands_str;
-  array2String(binds, ",", &binds_str);
-  array2String(repl_binds, ",", &repl_binds_str);
-  if (profiling_sample_all_commands) {
-    sample_commands_str = "*";
-  } else {
-    for (const auto &cmd : profiling_sample_commands) {
-      sample_commands_str.append(cmd);
-      sample_commands_str.append(",");
-    }
-    if (!sample_commands_str.empty()) sample_commands_str.pop_back();
-  }
-  string_stream << "################################ GERNERAL #####################################\n";
-  WRITE_TO_FILE("bind", binds_str);
-  WRITE_TO_FILE("port", port);
-  WRITE_TO_FILE("repl-bind", repl_binds_str);
-  WRITE_TO_FILE("timeout", timeout);
-  WRITE_TO_FILE("workers", workers);
-  WRITE_TO_FILE("maxclients", maxclients);
-  WRITE_TO_FILE("repl-workers", repl_workers);
-  WRITE_TO_FILE("loglevel", kLogLevels[loglevel]);
-  WRITE_TO_FILE("daemonize", (daemonize?"yes":"no"));
-  WRITE_TO_FILE("db-name", db_name);
-  WRITE_TO_FILE("dir", dir);
-  WRITE_TO_FILE("backup-dir", backup_dir);
-  WRITE_TO_FILE("tcp-backlog", backlog);
-  WRITE_TO_FILE("slave-read-only", (slave_readonly? "yes":"no"));
-  WRITE_TO_FILE("slave-priority", slave_priority);
-  WRITE_TO_FILE("slowlog-max-len", slowlog_max_len);
-  WRITE_TO_FILE("slowlog-log-slower-than", slowlog_log_slower_than);
-  WRITE_TO_FILE("max-backup-to-keep", max_backup_to_keep);
-  WRITE_TO_FILE("max-backup-keep-hours", max_backup_keep_hours);
-  WRITE_TO_FILE("max-db-size", max_db_size);
-  WRITE_TO_FILE("max-replication-mb", max_replication_mb);
-  WRITE_TO_FILE("max-io-mb", max_io_mb);
-  if (!requirepass.empty()) WRITE_TO_FILE("requirepass", requirepass);
-  if (!masterauth.empty()) WRITE_TO_FILE("masterauth", masterauth);
-  if (!master_host.empty())  WRITE_TO_FILE("slaveof", master_host+" "+std::to_string(master_port));
-  if (compact_cron.IsEnabled()) WRITE_TO_FILE("compact-cron", compact_cron.ToString());
-  if (bgsave_cron.IsEnabled()) WRITE_TO_FILE("bgave-cron", bgsave_cron.ToString());
-  WRITE_TO_FILE("profiling-sample-ratio", profiling_sample_ratio);
-  if (!sample_commands_str.empty()) WRITE_TO_FILE("profiling-sample-commands", sample_commands_str);
-  WRITE_TO_FILE("profiling-sample-record-max-len", profiling_sample_record_max_len);
-  WRITE_TO_FILE("profiling-sample-record-threshold-ms", profiling_sample_record_threshold_ms);
-
-  string_stream << "\n################################ ROCKSDB #####################################\n";
-  WRITE_TO_FILE("rocksdb.max_open_files", rocksdb_options.max_open_files);
-  WRITE_TO_FILE("rocksdb.write_buffer_size", rocksdb_options.write_buffer_size/MiB);
-  WRITE_TO_FILE("rocksdb.max_write_buffer_number", rocksdb_options.max_write_buffer_number);
-  WRITE_TO_FILE("rocksdb.max_background_compactions", rocksdb_options.max_background_compactions);
-  WRITE_TO_FILE("rocksdb.metadata_block_cache_size", rocksdb_options.metadata_block_cache_size/MiB);
-  WRITE_TO_FILE("rocksdb.subkey_block_cache_size", rocksdb_options.subkey_block_cache_size/MiB);
-  WRITE_TO_FILE("rocksdb.max_background_flushes", rocksdb_options.max_background_flushes);
-  WRITE_TO_FILE("rocksdb.max_sub_compactions", rocksdb_options.max_sub_compactions);
-  WRITE_TO_FILE("rocksdb.compression", kCompressionType[rocksdb_options.compression]);
-  WRITE_TO_FILE("rocksdb.enable_pipelined_write", (rocksdb_options.enable_pipelined_write ? "yes" : "no"));
-  WRITE_TO_FILE("rocksdb.delayed_write_rate", rocksdb_options.delayed_write_rate);
-  WRITE_TO_FILE("rocksdb.compaction_readahead_size", rocksdb_options.compaction_readahead_size);
-  WRITE_TO_FILE("rocksdb.target_file_size_base", rocksdb_options.target_file_size_base);
-  WRITE_TO_FILE("rocksdb.level0_slowdown_writes_trigger", rocksdb_options.level0_slowdown_writes_trigger);
-  WRITE_TO_FILE("rocksdb.wal_ttl_seconds", rocksdb_options.WAL_ttl_seconds);
-  WRITE_TO_FILE("rocksdb.wal_size_limit_mb", rocksdb_options.WAL_size_limit_MB);
-
-  string_stream << "\n################################ Namespace #####################################\n";
-  for (const auto &iter : tokens) {
-    WRITE_TO_FILE("namespace."+iter.second, iter.first);
-  }
-  output_file.write(string_stream.str().c_str(), string_stream.str().size());
-  output_file.close();
-  if (rename(tmp_path.data(), path_.data()) < 0) {
-    return Status(Status::NotOK, std::string("unable to rename config file, err: ")+strerror(errno));
-  }
-  return Status::OK();
-}
-
-void Config::GetNamespace(const std::string &ns, std::string *token) {
-  for (const auto &iter : tokens) {
-    if (iter.second == ns) {
-      *token = iter.first;
-    }
-  }
-}
-
-Status Config::SetNamespace(const std::string &ns, const std::string &token) {
-  if (ns == kDefaultNamespace) {
-    return Status(Status::NotOK, "can't set the default namespace");
-  }
-  if (tokens.find(token) != tokens.end()) {
-    return Status(Status::NotOK, "the token has already exists");
-  }
-  for (const auto &iter : tokens) {
-    if (iter.second == ns) {
-      tokens.erase(iter.first);
-      tokens[token] = ns;
-      return Status::OK();
-    }
-  }
-  return Status(Status::NotOK, "the namespace was not found");
-}
-
-Status Config::AddNamespace(const std::string &ns, const std::string &token) {
-  if (requirepass.empty()) {
-    return Status(Status::NotOK, "forbid to add new namespace while the requirepass is empty");
-  }
-  if (ns.size() > 255) {
-    return Status(Status::NotOK, "the namespace size exceed limit " + std::to_string(INT8_MAX));
-  }
-  if (tokens.find(token) != tokens.end()) {
-    return Status(Status::NotOK, "the token has already exists");
-  }
-  for (const auto &iter : tokens) {
-    if (iter.second == ns) {
-      return Status(Status::NotOK, "the namespace has already exists");
-    }
-  }
-  tokens[token] = ns;
-  return Status::OK();
-}
-
-Status Config::DelNamespace(const std::string &ns) {
-  if (ns == kDefaultNamespace) {
-    return Status(Status::NotOK, "can't del the default namespace");
-  }
-  for (const auto &iter : tokens) {
-    if (iter.second == ns) {
-      tokens.erase(iter.first);
-      return Status::OK();
-    }
-  }
-  return Status(Status::NotOK, "the namespace was not found");
-}
diff --git a/src/config.h b/src/config.h
deleted file mode 100644
index 610c6d1..0000000
--- a/src/config.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#pragma once
-
-#include <sys/resource.h>
-#include <rocksdb/options.h>
-
-#include <string>
-#include <map>
-#include <vector>
-#include <set>
-
-#include "status.h"
-#include "cron.h"
-
-// forward declaration
-class Server;
-namespace Engine {
-class Storage;
-}
-
-extern const char *kDefaultNamespace;
-
-const size_t KiB = 1024L;
-const size_t MiB = 1024L * KiB;
-const size_t GiB = 1024L * MiB;
-
-struct Config{
- public:
-  int port = 6666;
-  int repl_port = port + 1;
-  int workers = 4;
-  int repl_workers = 1;
-  int timeout = 0;
-  int loglevel = 0;
-  int backlog = 1024;
-  int maxclients = 10240;
-  uint32_t max_backup_to_keep = 1;
-  uint32_t max_backup_keep_hours = 0;
-  int64_t slowlog_log_slower_than = 200000;  // 200ms
-  unsigned int slowlog_max_len = 0;
-  bool daemonize = false;
-  bool slave_readonly = true;
-  uint32_t slave_priority = 100;
-  uint32_t max_db_size = 0;  // unit is GB
-  uint64_t max_replication_mb = 0;  // unit is MB
-  uint64_t max_io_mb = 500;  // unit is MB
-
-  std::vector<std::string> binds{"127.0.0.1"};
-  std::vector<std::string> repl_binds{"127.0.0.1"};
-  std::string dir = "/tmp/ev";
-  std::string db_dir = dir+"/db";
-  std::string backup_dir;
-  std::string pidfile = dir+"/kvrocks.pid";
-  std::string db_name = "changeme.name";
-  std::string masterauth;
-  std::string requirepass;
-  std::string master_host;
-  int master_port = 0;
-  Cron compact_cron;
-  Cron bgsave_cron;
-  std::map<std::string, std::string> tokens;
-
-  // profiling
-  int profiling_sample_ratio = 0;
-  int profiling_sample_record_threshold_ms = 0;
-  int profiling_sample_record_max_len = 256;
-  std::set<std::string> profiling_sample_commands;
-  bool profiling_sample_all_commands = false;
-
-  struct {
-    size_t metadata_block_cache_size = 4 * GiB;
-    size_t subkey_block_cache_size = 8 * GiB;
-    int max_open_files = 4096;
-    uint64_t write_buffer_size = 256 * MiB;
-    int max_write_buffer_number = 2;
-    int max_background_compactions = 2;
-    int max_background_flushes = 2;
-    uint32_t max_sub_compactions = 1;
-    rocksdb::CompressionType compression = rocksdb::kSnappyCompression;  // default: snappy
-    int stats_dump_period_sec = 0;
-    bool enable_pipelined_write = true;
-    uint64_t delayed_write_rate = 0;
-    size_t compaction_readahead_size = 2 * MiB;
-    uint64_t target_file_size_base = 256 * MiB;
-    uint64_t WAL_ttl_seconds = 7 * 24 * 3600;
-    uint64_t WAL_size_limit_MB = 5 * 1024;
-    int level0_slowdown_writes_trigger = 20;
-    int level0_stop_writes_trigger = 36;
-  } rocksdb_options;
-
- public:
-  Status Rewrite();
-  Status Load(std::string path);
-  void Get(std::string key, std::vector<std::string> *values);
-  Status Set(std::string key, const std::string &value, Server *svr);
-  Status setRocksdbOption(Engine::Storage *storage, const std::string &key, const std::string &value);
-  void GetNamespace(const std::string &ns, std::string *token);
-  Status AddNamespace(const std::string &ns, const std::string &token);
-  Status SetNamespace(const std::string &ns, const std::string &token);
-  Status DelNamespace(const std::string &ns);
-  Config() = default;
-  ~Config() = default;
-
- private:
-  std::string path_;
-  int yesnotoi(std::string input);
-  void incrOpenFilesLimit(rlim_t maxfiles);
-  Status parseConfigFromString(std::string input);
-  Status parseRocksdbOption(const std::string &key, std::string value);
-  Status parseRocksdbIntOption(std::string key, std::string value);
-  void array2String(const std::vector<std::string> &array, const std::string &delim, std::string *output);
-};
diff --git a/src/cron.cc b/src/cron.cc
deleted file mode 100644
index 320af80..0000000
--- a/src/cron.cc
+++ /dev/null
@@ -1,109 +0,0 @@
-#include "cron.h"
-#include <stdexcept>
-#include <utility>
-
-std::string Scheduler::ToString() const {
-  auto param2String = [](int n)->std::string {
-    return n == -1 ? "*" : std::to_string(n);
-  };
-  return param2String(minute) + " " +
-      param2String(hour) + " " +
-      param2String(mday) + " " +
-      param2String(month) + " " +
-      param2String(wday);
-}
-
-Status Cron::SetScheduleTime(const std::vector<std::string> &args) {
-  if (args.empty()) {
-    schedulers_.clear();
-    return Status::OK();
-  }
-  if (args.size() % 5 != 0) {
-    return Status(Status::NotOK, "time expression format error,should only contain 5x fields");
-  }
-
-  std::vector<Scheduler> new_schedulers;
-  Scheduler st;
-  for (size_t i = 0; i < args.size(); i += 5) {
-    Status s = convertToScheduleTime(args[i], args[i+1], args[i+2], args[i+3], args[i+4], &st);
-    if (!s.IsOK()) {
-      return Status(Status::NotOK, "time expression format error : " + s.Msg());
-    }
-    new_schedulers.push_back(st);
-  }
-  schedulers_ = std::move(new_schedulers);
-  return Status::OK();
-}
-
-bool Cron::IsTimeMatch(struct tm *tm) {
-  if (tm->tm_min == last_tm_.tm_min &&
-      tm->tm_hour == last_tm_.tm_hour &&
-      tm->tm_mday == last_tm_.tm_mday &&
-      tm->tm_mon == last_tm_.tm_mon &&
-      tm->tm_wday == last_tm_.tm_wday) {
-    return false;
-  }
-  for (const auto &st : schedulers_) {
-    if ((st.minute == -1 || tm->tm_min == st.minute) &&
-        (st.hour == -1 || tm->tm_hour == st.hour) &&
-        (st.mday == -1 || tm->tm_mday == st.mday) &&
-        (st.month == -1 || (tm->tm_mon + 1) == st.month) &&
-        (st.wday == -1 || tm->tm_wday == st.wday)) {
-      last_tm_ = *tm;
-      return true;
-    }
-  }
-  return false;
-}
-
-bool Cron::IsEnabled() {
-  return !schedulers_.empty();
-}
-
-std::string Cron::ToString() {
-  std::string ret;
-  for (size_t i = 0; i < schedulers_.size(); i++) {
-    ret += schedulers_[i].ToString();
-    if (i != schedulers_.size()-1) ret += " ";
-  }
-  return ret;
-}
-
-Status Cron::convertToScheduleTime(const std::string &minute,
-                                   const std::string &hour,
-                                   const std::string &mday,
-                                   const std::string &month,
-                                   const std::string &wday,
-                                   Scheduler *st) {
-  Status s;
-  s = convertParam(minute, 0, 59, &st->minute);
-  if (!s.IsOK()) return s;
-  s = convertParam(hour, 0, 23, &st->hour);
-  if (!s.IsOK()) return s;
-  s = convertParam(mday, 1, 31, &st->mday);
-  if (!s.IsOK()) return s;
-  s = convertParam(month, 1, 12, &st->month);
-  if (!s.IsOK()) return s;
-  s = convertParam(wday, 0, 6, &st->wday);
-  return s;
-}
-
-Status Cron::convertParam(const std::string &param, int lower_bound, int upper_bound, int *value) {
-  if (param == "*") {
-    *value = -1;
-    return Status::OK();
-  }
-
-  try {
-    *value = std::stoi(param);
-  } catch (const std::invalid_argument &e) {
-    return Status(Status::NotOK, "malformed token(`" + param + "`) not an integer or *");
-  } catch (const std::out_of_range &e) {
-    return Status(Status::NotOK, "malformed token(`" + param + "`) not convertable to int");
-  }
-  if (*value < lower_bound || *value > upper_bound) {
-    return Status(Status::NotOK, "malformed token(`" + param + "`) out of bound");
-  }
-  return Status::OK();
-}
-
diff --git a/src/cron.h b/src/cron.h
deleted file mode 100644
index e30b328..0000000
--- a/src/cron.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#pragma once
-
-#include <ctime>
-#include <iostream>
-#include <vector>
-#include <string>
-#include "status.h"
-
-struct Scheduler {
-  int minute;
-  int hour;
-  int mday;
-  int month;
-  int wday;
-
-  std::string ToString() const;
-};
-
-class Cron {
- public:
-  Cron() = default;
-  ~Cron() = default;
-
-  Status SetScheduleTime(const std::vector<std::string> &args);
-  bool IsTimeMatch(struct tm *tm);
-  std::string ToString();
-  bool IsEnabled();
-
- private:
-  std::vector<Scheduler> schedulers_;
-  struct tm last_tm_ = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, nullptr};
-
-  Status convertToScheduleTime(
-      const std::string &minute,
-      const std::string &hour,
-      const std::string &mday,
-      const std::string &month,
-      const std::string &wday,
-      Scheduler *st);
-  Status convertParam(const std::string &param, int lower_bound, int upper_bound, int *value);
-};
diff --git a/src/encoding.cc b/src/encoding.cc
deleted file mode 100644
index 64a1a39..0000000
--- a/src/encoding.cc
+++ /dev/null
@@ -1,217 +0,0 @@
-#include "encoding.h"
-
-#include <limits.h>
-#include <float.h>
-#include <unistd.h>
-#include <stdint.h>
-#include <string.h>
-/* Byte ordering detection */
-#include <sys/types.h> /* This will likely define BYTE_ORDER */
-
-#include <string>
-#include <utility>
-
-#ifndef BYTE_ORDER
-#if (BSD >= 199103)
-# include <machine/endian.h>
-#else
-#if defined(linux) || defined(__linux__)
-# include <endian.h>
-#else
-#define LITTLE_ENDIAN   1234    /* least-significant byte first (vax, pc) */
-#define BIG_ENDIAN  4321    /* most-significant byte first (IBM, net) */
-#define PDP_ENDIAN  3412    /* LSB first in word, MSW first in long (pdp)*/
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__amd64__) || \
-  defined(vax) || defined(ns32000) || defined(sun386) || \
-  defined(MIPSEL) || defined(_MIPSEL) || defined(BIT_ZERO_ON_RIGHT) || \
-  defined(__alpha__) || defined(__alpha)
-#define BYTE_ORDER    LITTLE_ENDIAN
-#endif
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__amd64__) || \
-  defined(vax) || defined(ns32000) || defined(sun386) || \
-  defined(MIPSEL) || defined(_MIPSEL) || defined(BIT_ZERO_ON_RIGHT) || \
-  defined(__alpha__) || defined(__alpha)
-#define BYTE_ORDER    LITTLE_ENDIAN
-#endif
-
-#if defined(sel) || defined(pyr) || defined(mc68000) || defined(sparc) || \
-  defined(is68k) || defined(tahoe) || defined(ibm032) || defined(ibm370) || \
-  defined(MIPSEB) || defined(_MIPSEB) || defined(_IBMR2) || defined(DGUX) ||\
-  defined(apollo) || defined(__convex__) || defined(_CRAY) || \
-  defined(__hppa) || defined(__hp9000) || \
-  defined(__hp9000s300) || defined(__hp9000s700) || \
-  defined(BIT_ZERO_ON_LEFT) || defined(m68k) || defined(__sparc)
-#define BYTE_ORDER  BIG_ENDIAN
-#endif
-#endif /* linux */
-#endif /* BSD */
-#endif /* BYTE_ORDER */
-
-/* Sometimes after including an OS-specific header that defines the
- * endianess we end with __BYTE_ORDER but not with BYTE_ORDER that is what
- * the Redis code uses. In this case let's define everything without the
- * underscores. */
-#ifndef BYTE_ORDER
-#ifdef __BYTE_ORDER
-#if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN)
-#ifndef LITTLE_ENDIAN
-#define LITTLE_ENDIAN __LITTLE_ENDIAN
-#endif
-#ifndef BIG_ENDIAN
-#define BIG_ENDIAN __BIG_ENDIAN
-#endif
-#if (__BYTE_ORDER == __LITTLE_ENDIAN)
-#define BYTE_ORDER LITTLE_ENDIAN
-#else
-#define BYTE_ORDER BIG_ENDIAN
-#endif
-#endif
-#endif
-#endif
-
-#if !defined(BYTE_ORDER) || \
-    (BYTE_ORDER != BIG_ENDIAN && BYTE_ORDER != LITTLE_ENDIAN)
-/* you must determine what the correct bit order is for
-     * your compiler - the next line is an intentional error
-     * which will force your compiles to bomb until you fix
-     * the above macros.
-     */
-#error "Undefined or invalid BYTE_ORDER"
-#endif
-
-void EncodeFixed8(char *buf, uint8_t value) {
-  buf[0] = static_cast<uint8_t>(value & 0xff);
-}
-
-void EncodeFixed32(char *buf, uint32_t value) {
-  if (BYTE_ORDER == BIG_ENDIAN) {
-    memcpy(buf, &value, sizeof(value));
-  } else {
-    buf[0] = static_cast<uint8_t>((value >> 24) & 0xff);
-    buf[1] = static_cast<uint8_t>((value >> 16) & 0xff);
-    buf[2] = static_cast<uint8_t>((value >> 8) & 0xff);
-    buf[3] = static_cast<uint8_t>(value & 0xff);
-  }
-}
-
-void EncodeFixed64(char *buf, uint64_t value) {
-  if (BYTE_ORDER == BIG_ENDIAN) {
-    memcpy(buf, &value, sizeof(value));
-  } else {
-    buf[0] = static_cast<uint8_t>((value >> 56) & 0xff);
-    buf[1] = static_cast<uint8_t>((value >> 48) & 0xff);
-    buf[2] = static_cast<uint8_t>((value >> 40) & 0xff);
-    buf[3] = static_cast<uint8_t>((value >> 32) & 0xff);
-    buf[4] = static_cast<uint8_t>((value >> 24) & 0xff);
-    buf[5] = static_cast<uint8_t>((value >> 16) & 0xff);
-    buf[6] = static_cast<uint8_t>((value >> 8) & 0xff);
-    buf[7] = static_cast<uint8_t>(value & 0xff);
-  }
-}
-
-void PutFixed8(std::string *dst, uint8_t value) {
-  char buf[1];
-  buf[0] = static_cast<uint8_t>(value & 0xff);
-  dst->append(buf, 1);
-}
-
-void PutFixed32(std::string *dst, uint32_t value) {
-  char buf[sizeof(value)];
-  EncodeFixed32(buf, value);
-  dst->append(buf, sizeof(buf));
-}
-
-void PutFixed64(std::string *dst, uint64_t value) {
-  char buf[sizeof(value)];
-  EncodeFixed64(buf, value);
-  dst->append(buf, sizeof(buf));
-}
-
-void PutDouble(std::string *dst, double value) {
-  uint64_t u64;
-  memcpy(&u64, &value, sizeof(value));
-  auto ptr = &u64;
-  if ((*ptr >> 63) == 1) {
-    // signed bit would be zero
-    *ptr ^= 0xffffffffffffffff;
-  } else {
-    // signed bit would be one
-    *ptr |= 0x8000000000000000;
-  }
-  PutFixed64(dst, *ptr);
-}
-
-bool GetFixed8(rocksdb::Slice *input, uint8_t *value) {
-  const char *data;
-  if (input->size() < sizeof(uint8_t)) {
-    return false;
-  }
-  data = input->data();
-  *value = static_cast<uint8_t>(data[0] & 0xff);
-  input->remove_prefix(sizeof(uint8_t));
-  return true;
-}
-
-bool GetFixed64(rocksdb::Slice *input, uint64_t *value) {
-  if (input->size() < sizeof(uint64_t)) {
-    return false;
-  }
-  *value = DecodeFixed64(input->data());
-  input->remove_prefix(sizeof(uint64_t));
-  return true;
-}
-
-bool GetFixed32(rocksdb::Slice *input, uint32_t *value) {
-  if (input->size() < sizeof(uint32_t)) {
-    return false;
-  }
-  *value = DecodeFixed32(input->data());
-  input->remove_prefix(sizeof(uint32_t));
-  return true;
-}
-
-bool GetDouble(rocksdb::Slice *input, double *value) {
-  if (input->size() < sizeof(double)) return false;
-  *value = DecodeDouble(input->data());
-  input->remove_prefix(sizeof(double));
-  return true;
-}
-
-uint32_t DecodeFixed32(const char *ptr) {
-  if (BYTE_ORDER == BIG_ENDIAN) {
-    uint32_t value;
-    memcpy(&value, ptr, sizeof(value));
-    return value;
-  } else {
-    return ((static_cast<uint32_t>(static_cast<uint8_t>(ptr[3])))
-        | (static_cast<uint32_t>(static_cast<uint8_t>(ptr[2])) << 8)
-        | (static_cast<uint32_t>(static_cast<uint8_t>(ptr[1])) << 16)
-        | (static_cast<uint32_t>(static_cast<uint8_t>(ptr[0])) << 24));
-  }
-}
-
-uint64_t DecodeFixed64(const char *ptr) {
-  if (BYTE_ORDER == BIG_ENDIAN) {
-    uint64_t value;
-    memcpy(&value, ptr, sizeof(value));
-    return value;
-  } else {
-    uint64_t hi = DecodeFixed32(ptr);
-    uint64_t lo = DecodeFixed32(ptr+4);
-    return (hi << 32) | lo;
-  }
-}
-
-double DecodeDouble(const char *ptr) {
-  uint64_t decoded = DecodeFixed64(ptr);
-  if ((decoded>>63) == 0) {
-    decoded ^= 0xffffffffffffffff;
-  } else {
-    decoded &= 0x7fffffffffffffff;
-  }
-  double value;
-  memcpy(&value, &decoded, sizeof(value));
-  return value;
-}
diff --git a/src/encoding.h b/src/encoding.h
deleted file mode 100644
index 8b39135..0000000
--- a/src/encoding.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#pragma once
-
-#include <unistd.h>
-#include <rocksdb/slice.h>
-#include <string>
-
-bool GetFixed8(rocksdb::Slice *input, uint8_t *value);
-bool GetFixed32(rocksdb::Slice *input, uint32_t *value);
-bool GetFixed64(rocksdb::Slice *input, uint64_t *value);
-bool GetDouble(rocksdb::Slice *input, double *value);
-void PutFixed8(std::string *dst, uint8_t value);
-void PutFixed32(std::string *dst, uint32_t value);
-void PutFixed64(std::string *dst, uint64_t value);
-void PutDouble(std::string *dst, double value);
-
-void EncodeFixed8(char *buf, uint8_t value);
-void EncodeFixed32(char *buf, uint32_t value);
-void EncodeFixed64(char *buf, uint64_t value);
-uint32_t DecodeFixed32(const char *ptr);
-uint64_t DecodeFixed64(const char *ptr);
-double DecodeDouble(const char *ptr);
diff --git a/src/event_listener.cc b/src/event_listener.cc
deleted file mode 100644
index ab1ad85..0000000
--- a/src/event_listener.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-#include "event_listener.h"
-#include <string>
-
-void EventListener::OnCompactionCompleted(rocksdb::DB *db, const rocksdb::CompactionJobInfo &ci) {
-  LOG(INFO) << "[event_listener/compaction_completed] column family: " << ci.cf_name
-            << ", reason: " << static_cast<int>(ci.compaction_reason)
-            << ", compression: " << static_cast<char>(ci.compression)
-            << ", base input level(files): " << ci.base_input_level << "(" << ci.input_files.size() << ")"
-            << ", output level(files): " << ci.output_level << "(" << ci.output_files.size() << ")"
-            << ", input bytes: " << ci.stats.total_input_bytes
-            << ", output bytes:" << ci.stats.total_output_bytes
-            << ", is_maunal:" << ci.stats.is_manual_compaction
-            << ", elapsed(micro): " << ci.stats.elapsed_micros;
-  storage_->IncrCompactionCount(1);
-  storage_->CheckDBSizeLimit();
-}
-
-void EventListener::OnFlushBegin(rocksdb::DB *db, const rocksdb::FlushJobInfo &fi) {
-  LOG(INFO) << "[event_listener/flush_begin] column family: " << fi.cf_name
-            << ", thread_id: " << fi.thread_id << ", job_id: " << fi.job_id
-            << ", reason: " << static_cast<int>(fi.flush_reason);
-}
-
-void EventListener::OnFlushCompleted(rocksdb::DB *db, const rocksdb::FlushJobInfo &fi) {
-  storage_->IncrFlushCount(1);
-  storage_->CheckDBSizeLimit();
-  LOG(INFO) << "[event_listener/flush_completed] column family: " << fi.cf_name
-            << ", thread_id: " << fi.thread_id << ", job_id: " << fi.job_id
-            << ", file: " << fi.file_path
-            << ", reason: " << static_cast<int>(fi.flush_reason)
-            << ", is_write_slowdown: " << (fi.triggered_writes_slowdown ? "yes" : "no")
-            << ", is_write_stall: " << (fi.triggered_writes_stop? "yes" : "no")
-            << ", largest seqno: " << fi.largest_seqno
-            << ", smallest seqno: " << fi.smallest_seqno;
-}
-
-void EventListener::OnBackgroundError(rocksdb::BackgroundErrorReason reason, rocksdb::Status *status) {
-  std::string reason_str;
-  switch (reason) {
-    case rocksdb::BackgroundErrorReason::kCompaction:reason_str = "compact";
-      break;
-    case rocksdb::BackgroundErrorReason::kFlush:reason_str = "flush";
-      break;
-    case rocksdb::BackgroundErrorReason::kMemTable:reason_str = "memtable";
-      break;
-    case rocksdb::BackgroundErrorReason::kWriteCallback:reason_str = "writecallback";
-      break;
-    default:
-      // Should not arrive here
-      break;
-  }
-  LOG(ERROR) << "[event_listener/background_error] reason: " << reason_str
-             << ", status: " << status->ToString();
-}
-
-void EventListener::OnTableFileDeleted(const rocksdb::TableFileDeletionInfo &info) {
-  LOG(INFO) << "[event_listener/table_file_deleted] db: " << info.db_name
-            << ", sst file: " << info.file_path
-            << ", status: " << info.status.ToString();
-}
-
-void EventListener::OnStallConditionsChanged(const rocksdb::WriteStallInfo &info) {
-  const char *stall_condition_strings[] = {"normal", "delay", "stop"};
-  LOG(WARNING) << "[event_listener/stall_cond_changed] column family: " << info.cf_name
-               << " write stall condition was changed, from "
-               << stall_condition_strings[static_cast<int>(info.condition.prev)]
-               << " to " << stall_condition_strings[static_cast<int>(info.condition.cur)];
-}
diff --git a/src/event_listener.h b/src/event_listener.h
deleted file mode 100644
index 1c8442b..0000000
--- a/src/event_listener.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#pragma once
-
-#include <glog/logging.h>
-#include <rocksdb/listener.h>
-
-#include "storage.h"
-
-class EventListener : public rocksdb::EventListener {
- public:
-  explicit EventListener(Engine::Storage *storage) : storage_(storage) {}
-  ~EventListener() override = default;
-  void OnFlushBegin(rocksdb::DB* db, const rocksdb::FlushJobInfo& fi) override;
-  void OnFlushCompleted(rocksdb::DB *db, const rocksdb::FlushJobInfo &fi) override;
-  void OnCompactionCompleted(rocksdb::DB *db, const rocksdb::CompactionJobInfo &ci) override;
-  void OnBackgroundError(rocksdb::BackgroundErrorReason reason, rocksdb::Status *status) override;
-  void OnTableFileDeleted(const rocksdb::TableFileDeletionInfo& info) override;
-  void OnStallConditionsChanged(const rocksdb::WriteStallInfo& info) override;
- private:
-  Engine::Storage *storage_ = nullptr;
-};
diff --git a/src/lock_manager.cc b/src/lock_manager.cc
deleted file mode 100644
index 5801226..0000000
--- a/src/lock_manager.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-#include "lock_manager.h"
-
-#include <thread>
-#include <string>
-
-LockManager::LockManager(int hash_power): hash_power_(hash_power) {
-  hash_mask_ = (1U << hash_power) - 1;
-  for (unsigned i = 0; i < Size(); i++) {
-    mutex_pool_.emplace_back(new std::mutex());
-  }
-}
-
-LockManager::~LockManager() {
-  for (const auto &mu : mutex_pool_) {
-    delete mu;
-  }
-}
-
-unsigned LockManager::hash(const rocksdb::Slice &key) {
-  return static_cast<unsigned>(std::hash<std::string>{}(key.ToString()) & hash_mask_);
-}
-
-unsigned LockManager::Size() {
-  return (1U << hash_power_);
-}
-
-void LockManager::Lock(const rocksdb::Slice &key) {
-  mutex_pool_[hash(key)]->lock();
-}
-
-void LockManager::UnLock(const rocksdb::Slice &key) {
-  mutex_pool_[hash(key)]->unlock();
-}
diff --git a/src/lock_manager.h b/src/lock_manager.h
deleted file mode 100644
index 097b468..0000000
--- a/src/lock_manager.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#pragma once
-
-#include <rocksdb/db.h>
-
-#include <mutex>
-#include <vector>
-
-class LockManager {
- public:
-  explicit LockManager(int hash_power);
-  ~LockManager();
-
-  unsigned Size();
-  void Lock(const rocksdb::Slice &key);
-  void UnLock(const rocksdb::Slice &key);
-
- private:
-  int hash_power_;
-  int hash_mask_;
-  std::vector<std::mutex*> mutex_pool_;
-  unsigned hash(const rocksdb::Slice &key);
-};
-
-class LockGuard {
- public:
-  explicit LockGuard(LockManager *lock_mgr, rocksdb::Slice key):
-      lock_mgr_(lock_mgr),
-      key_(key) {
-    lock_mgr->Lock(key_);
-  }
-  ~LockGuard() {
-    lock_mgr_->UnLock(key_);
-  }
- private:
-  LockManager *lock_mgr_ = nullptr;
-  rocksdb::Slice key_;
-};
diff --git a/src/main.cc b/src/main.cc
deleted file mode 100644
index ff6c257..0000000
--- a/src/main.cc
+++ /dev/null
@@ -1,245 +0,0 @@
-#include <getopt.h>
-#include <stdlib.h>
-#include <event2/thread.h>
-#include <glog/logging.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <dlfcn.h>
-#ifdef __linux__
-#define _XOPEN_SOURCE 700
-#else
-#define _XOPEN_SOURCE
-#endif
-#include <signal.h>
-#include <execinfo.h>
-#include <ucontext.h>
-
-
-#include "worker.h"
-#include "storage.h"
-#include "version.h"
-#include "config.h"
-#include "server.h"
-#include "util.h"
-
-#if defined(__APPLE__) || defined(__linux__)
-#define HAVE_BACKTRACE 1
-#endif
-
-const char *kDefaultConfPath = "../kvrocks.conf";
-
-std::function<void()> hup_handler;
-
-struct Options {
-  std::string conf_file = kDefaultConfPath;
-  bool show_usage = false;
-};
-
-extern "C" void signal_handler(int sig) {
-  if (hup_handler) hup_handler();
-}
-
-#ifdef HAVE_BACKTRACE
-void *getMcontextEip(ucontext_t *uc) {
-#ifdef __x86_64__
-#define REG_EIP REG_RIP
-#endif
-#if defined(__FreeBSD__)
-        return reinterpret_cast<void*>(uc->uc_mcontext.mc_eip);
-#elif defined(__dietlibc__)
-        return reinterpret_cast<void*>(uc->uc_mcontext.eip);
-#elif defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_6)
-#if __x86_64__
-        return reinterpret_cast<void*>(uc->uc_mcontext->__ss.__rip);
-#else
-        return reinterpret_cast<void*>(uc->uc_mcontext->__ss.__eip);
-#endif
-#elif defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6)
-#if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__)
-        return reinterpret_cast<void*>(uc->uc_mcontext->__ss.__rip);
-#else
-        return reinterpret_cast<void*>(uc->uc_mcontext->__ss.__eip);
-#endif
-#elif defined(__i386__) || defined(__X86_64__) || defined(__x86_64__)
-        return reinterpret_cast<void*>(uc->uc_mcontext.gregs[REG_EIP]); /* Linux 32/64 bit */
-#elif defined(__ia64__) /* Linux IA64 */
-        return reinterpret_cast<void*>(uc->uc_mcontext.sc_ip);
-#endif
-  return nullptr;
-}
-
-extern "C" void segvHandler(int sig, siginfo_t *info, void *secret) {
-  void *trace[100];
-  char **messages = nullptr;
-  struct sigaction act;
-  auto uc = reinterpret_cast<ucontext_t*>(secret);
-
-  LOG(WARNING) << "======= Ooops! kvrocks "<< VERSION << " got signal: "  << sig << " =======";
-  int trace_size = backtrace(trace, 100);
-  /* overwrite sigaction with caller's address */
-  if (getMcontextEip(uc) != nullptr) {
-    trace[1] = getMcontextEip(uc);
-  }
-  messages = backtrace_symbols(trace, trace_size);
-  for (int i = 1; i < trace_size; ++i) {
-    LOG(WARNING) << messages[i];
-  }
-  /* Make sure we exit with the right signal at the end. So for instance
-   * the core will be dumped if enabled.
-   */
-  sigemptyset(&act.sa_mask);
-  /* When the SA_SIGINFO flag is set in sa_flags then sa_sigaction
-   * is used. Otherwise, sa_handler is used
-   */
-  act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND;
-  act.sa_handler = SIG_DFL;
-  sigaction(sig, &act, nullptr);
-  kill(getpid(), sig);
-}
-
-void setupSigSegvAction() {
-  struct sigaction act;
-
-  sigemptyset(&act.sa_mask);
-  /* When the SA_SIGINFO flag is set in sa_flags then sa_sigaction
-   * is used. Otherwise, sa_handler is used */
-  act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND | SA_SIGINFO;
-  act.sa_sigaction = segvHandler;
-  sigaction(SIGSEGV, &act, nullptr);
-  sigaction(SIGBUS, &act, nullptr);
-  sigaction(SIGFPE, &act, nullptr);
-  sigaction(SIGILL, &act, nullptr);
-  sigaction(SIGBUS, &act, nullptr);
-
-  act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND;
-  act.sa_handler = signal_handler;
-  sigaction(SIGTERM, &act, nullptr);
-  sigaction(SIGINT, &act, nullptr);
-}
-
-#else /* HAVE_BACKTRACE */
-void setupSigSegvAction() {
-}
-#endif /* HAVE_BACKTRACE */
-
-static void usage(const char* program) {
-  std::cout << program << " implements the Redis protocol based on rocksdb\n"
-            << "\t-c config file, default is " << kDefaultConfPath << "\n"
-            << "\t-h help\n";
-  exit(0);
-}
-
-static Options parseCommandLineOptions(int argc, char **argv) {
-  int ch;
-  Options opts;
-  while ((ch = ::getopt(argc, argv, "c:hv")) != -1) {
-    switch (ch) {
-      case 'c': opts.conf_file = optarg; break;
-      case 'h': opts.show_usage = true; break;
-      case 'v': exit(0);
-      default: usage(argv[0]);
-    }
-  }
-  return opts;
-}
-
-static void initGoogleLog(const Config *config) {
-  FLAGS_minloglevel = config->loglevel;
-  FLAGS_max_log_size = 100;
-  FLAGS_logbufsecs = 0;
-  FLAGS_log_dir = config->dir;
-}
-
-static Status createPidFile(const std::string &path) {
-  int fd = open(path.data(), O_RDWR|O_CREAT|O_EXCL, 0660);
-  if (fd < 0) {
-    return Status(Status::NotOK, strerror(errno));
-  }
-  std::string pid_str = std::to_string(getpid());
-  write(fd, pid_str.data(), pid_str.size());
-  close(fd);
-  return Status::OK();
-}
-
-static void removePidFile(const std::string &path) {
-  std::remove(path.data());
-}
-
-static void daemonize() {
-  pid_t pid;
-
-  pid = fork();
-  if (pid < 0) {
-    LOG(ERROR) << "Failed to fork the process, err: " << strerror(errno);
-    exit(1);
-  }
-  if (pid > 0) exit(EXIT_SUCCESS);  // parent process
-  // change the file mode
-  umask(0);
-  if (setsid() < 0) {
-    LOG(ERROR) << "Failed to setsid, err: %s" << strerror(errno);
-    exit(1);
-  }
-  close(STDIN_FILENO);
-  close(STDOUT_FILENO);
-  close(STDERR_FILENO);
-}
-
-int main(int argc, char* argv[]) {
-  google::InitGoogleLogging("kvrocks");
-  evthread_use_pthreads();
-
-  signal(SIGPIPE, SIG_IGN);
-  signal(SIGINT, signal_handler);
-  signal(SIGTERM, signal_handler);
-  setupSigSegvAction();
-
-  std::cout << "Version: " << VERSION << " @" << GIT_COMMIT << std::endl;
-  auto opts = parseCommandLineOptions(argc, argv);
-  if (opts.show_usage) usage(argv[0]);
-
-  Config config;
-  Status s = config.Load(opts.conf_file);
-  if (!s.IsOK()) {
-    std::cout << "Failed to load config, err: " << s.Msg() << std::endl;
-    exit(1);
-  }
-  initGoogleLog(&config);
-  // Tricky: We don't expect that different instances running on the same port,
-  // but the server use REUSE_PORT to support the multi listeners. So we connect
-  // the listen port to check if the port has already listened or not.
-  if (Util::IsPortInUse(config.port)) {
-    std::cout << "Failed to start the server, the specified port["
-              << config.port << "] is already in use" << std::endl;
-    exit(1);
-  }
-  if (config.daemonize) daemonize();
-  s = createPidFile(config.pidfile);
-  if (!s.IsOK()) {
-    LOG(ERROR) << "Failed to create pidfile: " << s.Msg();
-    exit(1);
-  }
-
-  Engine::Storage storage(&config);
-  s = storage.Open();
-  if (!s.IsOK()) {
-    LOG(ERROR) << "Failed to open: " << s.Msg();
-    removePidFile(config.pidfile);
-    exit(1);
-  }
-  Server svr(&storage, &config);
-  hup_handler = [&svr] {
-    if (!svr.IsStopped()) {
-      LOG(INFO) << "Bye Bye";
-      svr.Stop();
-    }
-  };
-  svr.Start();
-  svr.Join();
-
-  removePidFile(config.pidfile);
-  google::ShutdownGoogleLogging();
-  google::ShutDownCommandLineFlags();
-  libevent_global_shutdown();
-  return 0;
-}
diff --git a/src/redis_bitmap.cc b/src/redis_bitmap.cc
deleted file mode 100644
index 31e3132..0000000
--- a/src/redis_bitmap.cc
+++ /dev/null
@@ -1,219 +0,0 @@
-#include "redis_bitmap.h"
-#include <vector>
-
-namespace Redis {
-
-const uint32_t kBitmapSegmentBits = 1024 * 8;
-const uint32_t kBitmapSegmentBytes = 1024;
-
-uint32_t kNum2Bits[256] = {
-    0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
-    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
-    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
-    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
-    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
-    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
-    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
-    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
-    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
-    4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
-};
-
-rocksdb::Status Bitmap::GetMetadata(const Slice &ns_key, BitmapMetadata *metadata) {
-  return Database::GetMetadata(kRedisBitmap, ns_key, metadata);
-}
-
-rocksdb::Status Bitmap::GetBit(const Slice &user_key, uint32_t offset, bool *bit) {
-  *bit = false;
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  BitmapMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  uint32_t index = (offset / kBitmapSegmentBits) * kBitmapSegmentBytes;
-  std::string sub_key, value;
-  InternalKey(ns_key, std::to_string(index), metadata.version).Encode(&sub_key);
-  s = db_->Get(read_options, sub_key, &value);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-  uint32_t byte_index = (offset / 8) % kBitmapSegmentBytes;
-  if ((byte_index < value.size() && (value[byte_index] & (1 << (offset % 8))))) {
-    *bit = true;
-  }
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Bitmap::SetBit(const Slice &user_key, uint32_t offset, bool new_bit, bool *old_bit) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  BitmapMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok() && !s.IsNotFound()) return s;
-
-  std::string sub_key, value;
-  uint32_t index = (offset / kBitmapSegmentBits) * kBitmapSegmentBytes;
-  InternalKey(ns_key, std::to_string(index), metadata.version).Encode(&sub_key);
-  if (s.ok()) {
-    s = db_->Get(rocksdb::ReadOptions(), sub_key, &value);
-    if (!s.ok() && !s.IsNotFound()) return s;
-  }
-  uint32_t byte_index = (offset / 8) % kBitmapSegmentBytes;
-  uint32_t bitmap_size = metadata.size;
-  if (byte_index >= value.size()) {  // expand the bitmap
-    size_t expand_size;
-    if (byte_index >= value.size() * 2) {
-      expand_size = byte_index - value.size() + 1;
-    } else {
-      expand_size = value.size();
-    }
-    value.append(expand_size, 0);
-    if (value.size() + index > bitmap_size) {
-      bitmap_size = static_cast<uint32_t>(value.size()) + index;
-    }
-  }
-  uint32_t bit_offset = offset % 8;
-  *old_bit = (value[byte_index] & (1 << bit_offset)) != 0;
-  if (new_bit) {
-    value[byte_index] |= 1 << bit_offset;
-  } else {
-    value[byte_index] &= ~(1 << bit_offset);
-  }
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisBitmap, {std::to_string(offset)});
-  batch.PutLogData(log_data.Encode());
-  batch.Put(sub_key, value);
-  if (metadata.size != bitmap_size) {
-    metadata.size = bitmap_size;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status Bitmap::BitCount(const Slice &user_key, int start, int stop, uint32_t *cnt) {
-  *cnt = 0;
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  BitmapMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  if (start < 0) start += metadata.size + 1;
-  if (stop < 0) stop += metadata.size + 1;
-  if (stop > static_cast<int>(metadata.size)) stop = metadata.size;
-  if (start < 0 || stop <= 0 || start >= stop) return rocksdb::Status::OK();
-
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  int start_index = start / kBitmapSegmentBytes;
-  int stop_index = stop / kBitmapSegmentBytes;
-  // Don't use multi get to prevent large range query, and take too much memory
-  std::string sub_key, value;
-  for (int i = start_index; i <= stop_index; i++) {
-    InternalKey(ns_key, std::to_string(i * kBitmapSegmentBytes), metadata.version).Encode(&sub_key);
-    s = db_->Get(read_options, sub_key, &value);
-    if (!s.ok() && !s.IsNotFound()) return s;
-    if (s.IsNotFound()) continue;
-    size_t j = 0;
-    if (i == start_index) j = start % kBitmapSegmentBytes;
-    for (; j < value.size(); j++) {
-      if (i == stop_index && j > (stop % kBitmapSegmentBytes)) break;
-      *cnt += kNum2Bits[static_cast<int>(value[j])];
-    }
-  }
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Bitmap::BitPos(const Slice &user_key, bool bit, int start, int stop, int *pos) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  BitmapMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok() && !s.IsNotFound()) return s;
-  if (s.IsNotFound()) {
-    *pos = bit ? -1 : 0;
-    return rocksdb::Status::OK();
-  }
-  if (start < 0) start += metadata.size + 1;
-  if (stop < 0) stop += metadata.size + 1;
-  if (start < 0 || stop < 0 || start > stop) {
-    *pos = -1;
-    return rocksdb::Status::OK();
-  }
-
-  auto bitPosInByte = [](char byte, bool bit) -> int {
-    for (int i = 0; i < 8; i++) {
-      if (bit && (byte & (1 << i)) != 0) return i;
-      if (!bit && (byte & (1 << i)) == 0) return i;
-    }
-    return -1;
-  };
-
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  int start_index = start / kBitmapSegmentBytes;
-  int stop_index = stop / kBitmapSegmentBytes;
-  // Don't use multi get to prevent large range query, and take too much memory
-  std::string sub_key, value;
-  for (int i = start_index; i <= stop_index; i++) {
-    InternalKey(ns_key, std::to_string(i * kBitmapSegmentBytes), metadata.version).Encode(&sub_key);
-    s = db_->Get(read_options, sub_key, &value);
-    if (!s.ok() && !s.IsNotFound()) return s;
-    if (s.IsNotFound()) {
-      if (!bit) {
-        *pos = i * kBitmapSegmentBits;
-        return rocksdb::Status::OK();
-      }
-      continue;
-    }
-    size_t j = 0;
-    if (i == start_index) j = start % kBitmapSegmentBytes;
-    for (; j < value.size(); j++) {
-      if (i == stop_index && j > (stop % kBitmapSegmentBytes)) break;
-      if (bitPosInByte(value[j], bit) != -1) {
-        *pos = static_cast<int>(i * kBitmapSegmentBits + j * 8 + bitPosInByte(value[j], bit));
-        return rocksdb::Status::OK();
-      }
-    }
-    if (!bit && value.size() < kBitmapSegmentBytes) {
-      *pos = static_cast<int>(i * kBitmapSegmentBits + value.size() * 8);
-      return rocksdb::Status::OK();
-    }
-  }
-  // bit was not found
-  *pos = bit ? -1 : static_cast<int>(metadata.size * 8);
-  return rocksdb::Status::OK();
-}
-
-bool Bitmap::GetBitFromValueAndOffset(const std::string &value, uint32_t offset) {
-  bool bit = false;
-  uint32_t byte_index = (offset / 8) % kBitmapSegmentBytes;
-  if ((byte_index < value.size() && (value[byte_index] & (1 << (offset % 8))))) {
-    bit = true;
-  }
-  return bit;
-}
-
-bool Bitmap::IsEmptySegment(const Slice &segment) {
-  static const char zero_byte_segment[kBitmapSegmentBytes] = {0};
-  std::string value = segment.ToString();
-  return !memcmp(zero_byte_segment, value.c_str(), value.size());
-}
-}  // namespace Redis
diff --git a/src/redis_bitmap.h b/src/redis_bitmap.h
deleted file mode 100644
index b907be6..0000000
--- a/src/redis_bitmap.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#pragma once
-
-#include "redis_db.h"
-#include "redis_metadata.h"
-
-#include <string>
-
-namespace Redis {
-
-class Bitmap : public Database {
- public:
-  Bitmap(Engine::Storage *storage, const std::string &ns): Database(storage, ns) {}
-  rocksdb::Status GetBit(const Slice &user_key, uint32_t offset, bool *bit);
-  rocksdb::Status SetBit(const Slice &user_key, uint32_t offset, bool new_bit, bool *old_bit);
-  rocksdb::Status BitCount(const Slice &user_key, int start, int stop, uint32_t *cnt);
-  rocksdb::Status BitPos(const Slice &user_key, bool bit, int start, int stop, int *pos);
-  static bool GetBitFromValueAndOffset(const std::string &value, const uint32_t offset);
-  static bool IsEmptySegment(const Slice &segment);
- private:
-  rocksdb::Status GetMetadata(const Slice &ns_key, BitmapMetadata *metadata);
-};
-
-}  // namespace Redis
diff --git a/src/redis_cmd.cc b/src/redis_cmd.cc
deleted file mode 100644
index 29a7219..0000000
--- a/src/redis_cmd.cc
+++ /dev/null
@@ -1,3820 +0,0 @@
-#include <arpa/inet.h>
-#include <fcntl.h>
-#include <glog/logging.h>
-#include <sys/socket.h>
-#include <algorithm>
-#include <cctype>
-#include <chrono>
-#include <thread>
-#include <utility>
-#include <memory>
-
-#include "redis_db.h"
-#include "redis_cmd.h"
-#include "redis_hash.h"
-#include "redis_bitmap.h"
-#include "redis_list.h"
-#include "redis_request.h"
-#include "redis_connection.h"
-#include "redis_set.h"
-#include "redis_string.h"
-#include "redis_zset.h"
-#include "redis_pubsub.h"
-#include "replication.h"
-#include "util.h"
-#include "storage.h"
-#include "worker.h"
-#include "server.h"
-
-namespace Redis {
-
-const char *kValueNotInterger = "value is not an integer or out of range";
-
-class CommandAuth : public Commander {
- public:
-  CommandAuth() : Commander("auth", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Config *config = svr->GetConfig();
-    auto iter = config->tokens.find(args_[1]);
-    if (iter != config->tokens.end()) {
-      conn->SetNamespace(iter->second);
-      conn->BecomeUser();
-      *output = Redis::SimpleString("OK");
-      return Status::OK();
-    }
-    const std::string requirepass = config->requirepass;
-    if (!requirepass.empty() && args_[1] != requirepass) {
-      *output = Redis::Error("ERR invaild password");
-      return Status::OK();
-    }
-    conn->SetNamespace(kDefaultNamespace);
-    conn->BecomeAdmin();
-    if (requirepass.empty()) {
-      *output = Redis::Error("ERR Client sent AUTH, but no password is set");
-    } else {
-      *output = Redis::SimpleString("OK");
-    }
-    return Status::OK();
-  }
-};
-
-class CommandNamespace : public Commander {
- public:
-  CommandNamespace() : Commander("namespace", -3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    if (!conn->IsAdmin()) {
-      *output = Redis::Error("only administrator can use namespace command");
-      return Status::OK();
-    }
-    Config *config = svr->GetConfig();
-    if (args_.size() == 3 && args_[1] == "get") {
-      if (args_[2] == "*") {
-        std::vector<std::string> namespaces;
-        auto tokens = config->tokens;
-        for (auto iter = tokens.begin(); iter != tokens.end(); iter++) {
-          namespaces.emplace_back(iter->second);  // namespace
-          namespaces.emplace_back(iter->first);   // token
-        }
-        *output = Redis::MultiBulkString(namespaces);
-      } else {
-        std::string token;
-        config->GetNamespace(args_[2], &token);
-        *output = Redis::BulkString(token);
-      }
-    } else if (args_.size() == 4 && args_[1] == "set") {
-      Status s = config->SetNamespace(args_[2], args_[3]);
-      *output = s.IsOK() ? Redis::SimpleString("OK") : Redis::Error(s.Msg());
-      LOG(WARNING) << "Updated namespace: " << args_[2] << " with token: " << args_[3]
-      << ", addr: " << conn->GetAddr() << ", result: " << s.Msg();
-    } else if (args_.size() == 4 && args_[1] == "add") {
-      Status s = config->AddNamespace(args_[2], args_[3]);
-      *output = s.IsOK() ? Redis::SimpleString("OK") : Redis::Error(s.Msg());
-      LOG(WARNING) << "New namespace: " << args_[2] << " with token: " << args_[3]
-                   << ", addr: " << conn->GetAddr() << ", result: " << s.Msg();
-    } else if (args_.size() == 3 && args_[1] == "del") {
-      Status s = config->DelNamespace(args_[2]);
-      *output = s.IsOK() ? Redis::SimpleString("OK") : Redis::Error(s.Msg());
-      LOG(WARNING) << "Deleted namespace: " << args_[2]
-                   << ", addr: " << conn->GetAddr() << ", result: " << s.Msg();
-    } else {
-      *output = Redis::Error(
-          "NAMESPACE subcommand must be one of GET, SET, DEL, ADD");
-    }
-    return Status::OK();
-  }
-};
-
-class CommandKeys : public Commander {
- public:
-  CommandKeys() : Commander("keys", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    std::string prefix = args_[1];
-    std::vector<std::string> keys;
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    if (prefix == "*") {
-      redis.Keys(std::string(), &keys);
-    } else {
-      if (prefix[prefix.size() - 1] != '*') {
-        *output = Redis::Error("ERR only keys prefix match was supported");
-        return Status::OK();
-      }
-      redis.Keys(prefix.substr(0, prefix.size() - 1), &keys);
-    }
-    *output = Redis::MultiBulkString(keys);
-    return Status::OK();
-  }
-};
-
-class CommandFlushDB : public Commander {
- public:
-  CommandFlushDB() : Commander("flushdb", 1, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = redis.FlushDB();
-    LOG(WARNING) << "DB keys in namespce: " << conn->GetNamespace()
-              << " was flused, addr: " << conn->GetAddr();
-    if (s.ok()) {
-      *output = Redis::SimpleString("OK");
-      return Status::OK();
-    }
-    return Status(Status::RedisExecErr, s.ToString());
-  }
-};
-
-class CommandPing : public Commander {
- public:
-  CommandPing() : Commander("ping", 1, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    *output = Redis::SimpleString("PONG");
-    return Status::OK();
-  }
-};
-
-class CommandSelect: public Commander {
- public:
-  CommandSelect() : Commander("select", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    *output = Redis::SimpleString("OK");
-    return Status::OK();
-  }
-};
-
-class CommandConfig : public Commander {
- public:
-  CommandConfig() : Commander("config", -2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    if (!conn->IsAdmin()) {
-      *output = Redis::Error("only administrator can use config command");
-      return Status::OK();
-    }
-
-    Config *config = svr->GetConfig();
-    if (args_.size() == 2 && Util::ToLower(args_[1]) == "rewrite") {
-      Status s = config->Rewrite();
-      if (!s.IsOK()) return Status(Status::RedisExecErr, s.Msg());
-      *output = Redis::SimpleString("OK");
-      LOG(INFO) << "# CONFIG REWRITE executed with success";
-    } else if (args_.size() == 3 && Util::ToLower(args_[1]) == "get") {
-      std::vector<std::string> values;
-      config->Get(args_[2], &values);
-      *output = Redis::MultiBulkString(values);
-    } else if (args_.size() == 4 && Util::ToLower(args_[1]) == "set") {
-      Status s = config->Set(args_[2], args_[3], svr);
-      if (!s.IsOK()) {
-        return Status(Status::NotOK, s.Msg() + ", key: " + args_[2]);
-      }
-      *output = Redis::SimpleString("OK");
-    } else {
-      *output = Redis::Error("CONFIG subcommand must be one of GET, SET, REWRITE");
-    }
-    return Status::OK();
-  }
-};
-
-class CommandGet : public Commander {
- public:
-  CommandGet() : Commander("get", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    std::string value;
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = string_db.Get(args_[1], &value);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = s.IsNotFound() ? Redis::NilString() : Redis::BulkString(value);
-    return Status::OK();
-  }
-};
-
-class CommandStrlen: public Commander {
- public:
-  CommandStrlen() : Commander("strlen", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    std::string value;
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = string_db.Get(args_[1], &value);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    if (s.IsNotFound()) {
-      *output = Redis::Integer(0);
-    } else {
-      *output = Redis::Integer(value.size());
-    }
-    return Status::OK();
-  }
-};
-
-class CommandGetSet : public Commander {
- public:
-  CommandGetSet() : Commander("getset", 3, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    std::string old_value;
-    rocksdb::Status s = string_db.GetSet(args_[1], args_[2], &old_value);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::BulkString(old_value);
-    return Status::OK();
-  }
-};
-
-class CommandGetRange: public Commander {
- public:
-  CommandGetRange() : Commander("getrange", 4, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      start_ = std::stoi(args[2]);
-      stop_ = std::stoi(args[3]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    std::string value;
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = string_db.Get(args_[1], &value);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    if (s.IsNotFound()) {
-      *output = Redis::NilString();
-      return Status::OK();
-    }
-    if (start_ < 0) start_ = static_cast<int>(value.size()) + start_;
-    if (stop_ < 0) stop_ = static_cast<int>(value.size()) + stop_;
-    if (start_ < 0) start_ = 0;
-    if (stop_ > static_cast<int>(value.size())) stop_ = static_cast<int>(value.size());
-    if (start_ > stop_) {
-      *output = Redis::NilString();
-    } else {
-      *output = Redis::BulkString(value.substr(start_, stop_+1));
-    }
-    return Status::OK();
-  }
-
- private:
-  int start_ = 0, stop_ = 0;
-};
-
-class CommandSetRange: public Commander {
- public:
-  CommandSetRange() : Commander("setrange", 4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      offset_ = std::stoi(args[2]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = string_db.SetRange(args_[1], offset_, args_[3], &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-
- private:
-  int offset_ = 0;
-};
-
-class CommandMGet : public Commander {
- public:
-  CommandMGet() : Commander("mget", -2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    std::vector<Slice> keys;
-    for (size_t i = 1; i < args_.size(); i++) {
-      keys.emplace_back(args_[i]);
-    }
-    std::vector<std::string> values;
-    // always return OK
-    string_db.MGet(keys, &values);
-    *output = Redis::MultiBulkString(values);
-    return Status::OK();
-  }
-};
-
-class CommandAppend: public Commander {
- public:
-  CommandAppend() : Commander("append", 3, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = string_db.Append(args_[1], args_[2], &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandSet : public Commander {
- public:
-  CommandSet() : Commander("set", -3, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    for (size_t i = 3; i < args.size(); i++) {
-      std::string opt = Util::ToLower(args[i]);
-      if (opt == "nx" || opt == "xx") {
-        opt == "nx" ? nx_ = true : xx_ = true;
-        continue;
-      }
-      if ((opt == "ex" || opt == "px") && i+1 < args.size()) {
-        if (opt == "ex") {
-          ttl_ = atoi(args[i+1].c_str());
-        } else {
-          ttl_ = atol(args[i+1].c_str())/1000;
-        }
-        i++;
-        continue;
-      }
-      return Status(Status::NotOK, "syntax error");
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s;
-    if (nx_) {
-      s = string_db.SetNX(args_[1], args_[2], ttl_, &ret);
-    } else if (xx_) {
-      s = string_db.SetXX(args_[1], args_[2], ttl_, &ret);
-    } else {
-      s = string_db.SetEX(args_[1], args_[2], ttl_);
-    }
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    if ((nx_ || xx_) && !ret) {
-      *output = Redis::NilString();
-    } else {
-      *output = Redis::SimpleString("OK");
-    }
-    return Status::OK();
-  }
-
- private:
-  bool xx_ = false;
-  bool nx_ = false;
-  int ttl_ = 0;
-};
-
-class CommandSetEX : public Commander {
- public:
-  CommandSetEX() : Commander("setex", 4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      ttl_ = std::stoi(args[2]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = string_db.SetEX(args_[1], args_[3], ttl_);
-    *output = Redis::SimpleString("OK");
-    return Status::OK();
-  }
-
- private:
-  int ttl_ = 0;
-};
-
-class CommandMSet : public Commander {
- public:
-  CommandMSet() : Commander("mset", -3, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    if (args.size() % 2 != 1) {
-      return Status(Status::RedisParseErr, "wrong number of arguments");
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    std::vector<StringPair> kvs;
-    for (size_t i = 1; i < args_.size(); i+=2) {
-      kvs.emplace_back(StringPair{args_[i], args_[i+1]});
-    }
-    rocksdb::Status s = string_db.MSet(kvs);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::SimpleString("OK");
-    return Status::OK();
-  }
-};
-
-class CommandSetNX : public Commander {
- public:
-  CommandSetNX() : Commander("setnx", 3, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = string_db.SetNX(args_[1], args_[2], 0, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandIncr : public Commander {
- public:
-  CommandIncr() : Commander("incr", 2, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int64_t ret;
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = string_db.IncrBy(args_[1], 1, &ret);
-    if (!s.ok()) return Status(Status::RedisExecErr, s.ToString());
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandDecr : public Commander {
- public:
-  CommandDecr() : Commander("decr", 2, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int64_t ret;
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = string_db.IncrBy(args_[1], -1, &ret);
-    if (!s.ok()) return Status(Status::RedisExecErr, s.ToString());
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandIncrBy : public Commander {
- public:
-  CommandIncrBy() : Commander("incrby", 3, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      increment_ = std::stoll(args[2]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int64_t ret;
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = string_db.IncrBy(args_[1], increment_, &ret);
-    if (!s.ok()) return Status(Status::RedisExecErr, s.ToString());
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-
- private:
-  int64_t increment_ = 0;
-};
-
-class CommandIncrByFloat : public Commander {
- public:
-  CommandIncrByFloat() : Commander("incrbyfloat", 3, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      increment_ = std::stof(args[2]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    float ret;
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = string_db.IncrByFloat(args_[1], increment_, &ret);
-    if (!s.ok()) return Status(Status::RedisExecErr, s.ToString());
-    *output = Redis::BulkString(std::to_string(ret));
-    return Status::OK();
-  }
-
- private:
-  float increment_ = 0;
-};
-
-class CommandDecrBy : public Commander {
- public:
-  CommandDecrBy() : Commander("decrby", 3, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      increment_ = std::stoll(args[2]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int64_t ret;
-    Redis::String string_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = string_db.IncrBy(args_[1], -1 * increment_, &ret);
-    if (!s.ok()) return Status(Status::RedisExecErr, s.ToString());
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-
- private:
-  int64_t increment_ = 0;
-};
-
-class CommandDel : public Commander {
- public:
-  CommandDel() : Commander("del", -2, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int cnt = 0;
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    for (unsigned int i = 1; i < args_.size(); i++) {
-      rocksdb::Status s = redis.Del(args_[i]);
-      if (s.ok()) cnt++;
-    }
-    *output = Redis::Integer(cnt);
-    return Status::OK();
-  }
-};
-
-class CommandGetBit : public Commander {
- public:
-  CommandGetBit() : Commander("getbit", 3, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      offset_ = std::stoul(args[2]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    bool bit;
-    Redis::Bitmap bitmap_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = bitmap_db.GetBit(args_[1], offset_, &bit);
-    if (!s.ok()) return Status(Status::RedisExecErr, s.ToString());
-    *output = Redis::Integer(bit? 1 : 0);
-    return Status::OK();
-  }
- private:
-  uint32_t offset_ = 0;
-};
-
-class CommandSetBit : public Commander {
- public:
-  CommandSetBit() : Commander("setbit", 4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      offset_ = std::stoul(args[2]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    if (args[3] == "0") {
-      bit_ = false;
-    } else if (args[3] == "1") {
-      bit_ = true;
-    } else {
-      return Status(Status::RedisParseErr, "bit should be 0 or 1");
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    bool old_bit;
-    Redis::Bitmap bitmap_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = bitmap_db.SetBit(args_[1], offset_, bit_, &old_bit);
-    if (!s.ok()) return Status(Status::RedisExecErr, s.ToString());
-    *output = Redis::Integer(old_bit? 1 : 0);
-    return Status::OK();
-  }
-
- private:
-  uint32_t offset_ = 0;
-  bool bit_ = false;
-};
-
-class CommandBitCount : public Commander {
- public:
-  CommandBitCount() : Commander("bitcount", -2, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      if (args.size() >= 3) start_ = std::stoi(args[2]);
-      if (args.size() >= 4) stop_ = std::stoi(args[3]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    uint32_t cnt;
-    Redis::Bitmap bitmap_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = bitmap_db.BitCount(args_[1], start_, stop_, &cnt);
-    if (!s.ok()) return Status(Status::RedisExecErr, s.ToString());
-    *output = Redis::Integer(cnt);
-    return Status::OK();
-  }
- private:
-  int start_ = 0, stop_ = -1;
-};
-
-class CommandBitPos: public Commander {
- public:
-  CommandBitPos() : Commander("bitcount", -3, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      if (args.size() >= 4) start_ = std::stoi(args[3]);
-      if (args.size() >= 5) stop_ = std::stoi(args[4]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    if (args[2] == "0") {
-      bit_ = false;
-    } else if (args[2] == "1") {
-      bit_ = true;
-    } else {
-      return Status(Status::RedisParseErr, "bit should be 0 or 1");
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int pos;
-    Redis::Bitmap bitmap_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = bitmap_db.BitPos(args_[1], bit_, start_, stop_, &pos);
-    if (!s.ok()) return Status(Status::RedisExecErr, s.ToString());
-    *output = Redis::Integer(pos);
-    return Status::OK();
-  }
-
- private:
-  int start_ = 0, stop_ = -1;
-  bool bit_ = false;
-};
-
-class CommandType : public Commander {
- public:
-  CommandType() : Commander("type", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    RedisType type;
-    rocksdb::Status s = redis.Type(args_[1], &type);
-    if (s.ok()) {
-      *output = Redis::BulkString(RedisTypeNames[type]);
-      return Status::OK();
-    }
-    return Status(Status::RedisExecErr, s.ToString());
-  }
-};
-
-class CommandObject : public Commander {
- public:
-  CommandObject() : Commander("object", 3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    if (Util::ToLower(args_[1]) == "dump") {
-      Redis::Database redis(svr->storage_, conn->GetNamespace());
-      std::vector<std::string> infos;
-      rocksdb::Status s = redis.Dump(args_[2], &infos);
-      if (!s.ok()) {
-        return Status(Status::RedisExecErr, s.ToString());
-      }
-      output->append(Redis::MultiLen(infos.size()));
-      for (const auto info : infos) {
-        output->append(Redis::BulkString(info));
-      }
-    } else {
-      *output = Redis::Error("object subcommand must be dump");
-    }
-    return Status::OK();
-  }
-};
-
-class CommandTTL : public Commander {
- public:
-  CommandTTL() : Commander("ttl", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    int ttl;
-    rocksdb::Status s = redis.TTL(args_[1], &ttl);
-    if (s.ok()) {
-      *output = Redis::Integer(ttl);
-      return Status::OK();
-    } else {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-  }
-};
-
-class CommandPTTL : public Commander {
- public:
-  CommandPTTL() : Commander("pttl", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    int ttl;
-    rocksdb::Status s = redis.TTL(args_[1], &ttl);
-    if (!s.ok()) return Status(Status::RedisExecErr, s.ToString());
-    if (ttl > 0) {
-      *output = Redis::Integer(ttl*1000);
-    } else {
-      *output = Redis::Integer(ttl);
-    }
-    return Status::OK();
-  }
-};
-
-class CommandExists : public Commander {
- public:
-  CommandExists() : Commander("exists", -2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int cnt = 0;
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    std::vector<rocksdb::Slice> keys;
-    for (unsigned i = 1; i < args_.size(); i++) {
-      keys.emplace_back(args_[i]);
-    }
-    redis.Exists(keys, &cnt);
-    *output = Redis::Integer(cnt);
-    return Status::OK();
-  }
-};
-
-class CommandExpire : public Commander {
- public:
-  CommandExpire() : Commander("expire", 3, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    int64_t now;
-    rocksdb::Env::Default()->GetCurrentTime(&now);
-    try {
-      seconds_ = std::stoi(args[2]);
-      if (seconds_ >= INT32_MAX - now) {
-        return Status(Status::RedisParseErr, "the expire time was overflow");
-      }
-      seconds_ += now;
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = redis.Expire(args_[1], seconds_);
-    if (s.ok()) {
-      *output = Redis::Integer(1);
-    } else {
-      *output = Redis::Integer(0);
-    }
-    return Status::OK();
-  }
-
- private:
-  int seconds_ = 0;
-};
-
-class CommandPExpire : public Commander {
- public:
-  CommandPExpire() : Commander("pexpire", 3, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    int64_t now;
-    rocksdb::Env::Default()->GetCurrentTime(&now);
-    try {
-      seconds_ = std::stol(args[2])/1000;
-      if (seconds_ >= INT32_MAX - now) {
-        return Status(Status::RedisParseErr, "the expire time was overflow");
-      }
-      seconds_ += now;
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = redis.Expire(args_[1], seconds_);
-    if (s.ok()) {
-      *output = Redis::Integer(1);
-    } else {
-      *output = Redis::Integer(0);
-    }
-    return Status::OK();
-  }
-
- private:
-  int seconds_ = 0;
-};
-
-class CommandExpireAt : public Commander {
- public:
-  CommandExpireAt() : Commander("expireat", 3, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      timestamp_ = std::stoi(args[2]);
-      if (timestamp_ >= INT32_MAX) {
-        return Status(Status::RedisParseErr, "the expire time was overflow");
-      }
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = redis.Expire(args_[1], timestamp_);
-    if (s.ok()) {
-      *output = Redis::Integer(1);
-    } else {
-      *output = Redis::Integer(0);
-    }
-    return Status::OK();
-  }
-
- private:
-  int timestamp_ = 0;
-};
-
-class CommandPExpireAt : public Commander {
- public:
-  CommandPExpireAt() : Commander("pexpireat", 3, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      timestamp_ = static_cast<int>(std::stol(args[2])/1000);
-      if (timestamp_ >= INT32_MAX) {
-        return Status(Status::RedisParseErr, "the expire time was overflow");
-      }
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = redis.Expire(args_[1], timestamp_);
-    if (s.ok()) {
-      *output = Redis::Integer(1);
-    } else {
-      *output = Redis::Integer(0);
-    }
-    return Status::OK();
-  }
-
- private:
-  int timestamp_ = 0;
-};
-
-class CommandPersist : public Commander {
- public:
-  CommandPersist() : Commander("persist", 2, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ttl;
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = redis.TTL(args_[1], &ttl);
-    if (!s.ok()) return Status(Status::RedisExecErr, s.ToString());
-    if (ttl == -1 || ttl == -2) {
-      *output = Redis::Integer(0);
-      return Status::OK();
-    }
-    s = redis.Expire(args_[1], 0);
-    if (!s.ok()) return Status(Status::RedisExecErr, s.ToString());
-    *output = Redis::Integer(1);
-    return Status::OK();
-  }
-};
-
-class CommandHGet : public Commander {
- public:
-  CommandHGet() : Commander("hget", 3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    std::string value;
-    rocksdb::Status s = hash_db.Get(args_[1], args_[2], &value);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = s.IsNotFound() ? Redis::NilString() : Redis::BulkString(value);
-    return Status::OK();
-  }
-};
-
-class CommandHSet : public Commander {
- public:
-  CommandHSet() : Commander("hset", 4, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = hash_db.Set(args_[1], args_[2], args_[3], &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandHSetNX : public Commander {
- public:
-  CommandHSetNX() : Commander("hsetnx", 4, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = hash_db.SetNX(args_[1], args_[2], args_[3], &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandHStrlen : public Commander {
- public:
-  CommandHStrlen() : Commander("hstrlen", 3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    std::string value;
-    rocksdb::Status s = hash_db.Get(args_[1], args_[2], &value);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(static_cast<int>(value.size()));
-    return Status::OK();
-  }
-};
-
-class CommandHDel : public Commander {
- public:
-  CommandHDel() : Commander("hdel", -3, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    std::vector<Slice> fields;
-    for (unsigned int i = 2; i < args_.size(); i++) {
-      fields.emplace_back(Slice(args_[i]));
-    }
-    rocksdb::Status s = hash_db.Delete(args_[1], fields, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandHExists : public Commander {
- public:
-  CommandHExists() : Commander("hexists", 3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    std::string value;
-    rocksdb::Status s = hash_db.Get(args_[1], args_[2], &value);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = s.IsNotFound() ? Redis::Integer(0) : Redis::Integer(1);
-    return Status::OK();
-  }
-};
-
-class CommandHLen : public Commander {
- public:
-  CommandHLen() : Commander("hlen", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    uint32_t count;
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = hash_db.Size(args_[1], &count);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = s.IsNotFound() ? Redis::Integer(0) : Redis::Integer(count);
-    return Status::OK();
-  }
-};
-
-class CommandHIncrBy : public Commander {
- public:
-  CommandHIncrBy() : Commander("hincrby", 4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      increment_ = std::stoll(args[3]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int64_t ret;
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = hash_db.IncrBy(args_[1], args_[2], increment_, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-
- private:
-  int64_t increment_ = 0;
-};
-
-class CommandHIncrByFloat : public Commander {
- public:
-  CommandHIncrByFloat() : Commander("hincrbyfloat", 4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      increment_ = std::stof(args[3]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    float ret;
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = hash_db.IncrByFloat(args_[1], args_[2], increment_, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::BulkString(std::to_string(ret));
-    return Status::OK();
-  }
-
- private:
-  float increment_ = 0;
-};
-
-class CommandHMGet : public Commander {
- public:
-  CommandHMGet() : Commander("hmget", -3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    std::vector<Slice> fields;
-    for (unsigned int i = 2; i < args_.size(); i++) {
-      fields.emplace_back(Slice(args_[i]));
-    }
-    std::vector<std::string> values;
-    rocksdb::Status s = hash_db.MGet(args_[1], fields, &values);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    if (s.IsNotFound()) {
-      values.resize(fields.size(), "");
-    }
-    *output = Redis::MultiBulkString(values);
-    return Status::OK();
-  }
-};
-
-class CommandHMSet : public Commander {
- public:
-  CommandHMSet() : Commander("hmset", -4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    if (args.size() % 2 != 0) {
-      return Status(Status::RedisParseErr, "wrong number of arguments");
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    std::vector<FieldValue> field_values;
-    for (unsigned int i = 2; i < args_.size(); i += 2) {
-      field_values.push_back(FieldValue{args_[i], args_[i + 1]});
-    }
-    rocksdb::Status s = hash_db.MSet(args_[1], field_values, false, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandHKeys : public Commander {
- public:
-  CommandHKeys() : Commander("hkeys", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    std::vector<FieldValue> field_values;
-    rocksdb::Status s = hash_db.GetAll(args_[1], &field_values, HashFetchType::kOnlyKey);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    std::vector<std::string> keys;
-    for (const auto fv : field_values) {
-      keys.emplace_back(fv.field);
-    }
-    *output = Redis::MultiBulkString(keys);
-    return Status::OK();
-  }
-};
-
-class CommandHVals : public Commander {
- public:
-  CommandHVals() : Commander("hvals", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    std::vector<FieldValue> field_values;
-    rocksdb::Status s = hash_db.GetAll(args_[1], &field_values, HashFetchType::kOnlyValue);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    std::vector<std::string> values;
-    for (const auto fv : field_values) {
-      values.emplace_back(fv.value);
-    }
-    *output = Redis::MultiBulkString(values);
-    return Status::OK();
-  }
-};
-
-class CommandHGetAll : public Commander {
- public:
-  CommandHGetAll() : Commander("hgetall", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    std::vector<FieldValue> field_values;
-    rocksdb::Status s = hash_db.GetAll(args_[1], &field_values);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = "*" + std::to_string(field_values.size() * 2) + CRLF;
-    for (const auto fv : field_values) {
-      *output += Redis::BulkString(fv.field);
-      *output += Redis::BulkString(fv.value);
-    }
-    return Status::OK();
-  }
-};
-
-class CommandPush : public Commander {
- public:
-  CommandPush(bool create_if_missing, bool left)
-      : Commander("push", -3, true) {
-    left_ = left;
-    create_if_missing_ = create_if_missing;
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::List list_db(svr->storage_, conn->GetNamespace());
-    std::vector<Slice> elems;
-    for (unsigned int i = 2; i < args_.size(); i++) {
-      elems.emplace_back(args_[i]);
-    }
-    int ret;
-    rocksdb::Status s;
-    if (create_if_missing_) {
-      s = list_db.Push(args_[1], elems, left_, &ret);
-    } else {
-      s = list_db.PushX(args_[1], elems, left_, &ret);
-    }
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-
-    svr->WakeupBlockingConns(args_[1], elems.size());
-
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-
- private:
-  bool left_;
-  bool create_if_missing_;
-};
-
-class CommandLPush : public CommandPush {
- public:
-  CommandLPush() : CommandPush(true, true) { name_ = "lpush"; }
-};
-
-class CommandRPush : public CommandPush {
- public:
-  CommandRPush() : CommandPush(true, false) { name_ = "rpush"; }
-};
-
-class CommandLPushX : public CommandPush {
- public:
-  CommandLPushX() : CommandPush(false, true) { name_ = "lpushx"; }
-};
-
-class CommandRPushX : public CommandPush {
- public:
-  CommandRPushX() : CommandPush(false, false) { name_ = "rpushx"; }
-};
-
-class CommandPop : public Commander {
- public:
-  explicit CommandPop(bool left) : Commander("pop", 2, true) { left_ = left; }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::List list_db(svr->storage_, conn->GetNamespace());
-    std::string elem;
-    rocksdb::Status s = list_db.Pop(args_[1], &elem, left_);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    if (s.IsNotFound()) {
-      *output = Redis::NilString();
-    } else {
-      *output = Redis::BulkString(elem);
-    }
-    return Status::OK();
-  }
-
- private:
-  bool left_;
-};
-
-class CommandLPop : public CommandPop {
- public:
-  CommandLPop() : CommandPop(true) { name_ = "lpop"; }
-};
-
-class CommandRPop : public CommandPop {
- public:
-  CommandRPop() : CommandPop(false) { name_ = "rpop"; }
-};
-
-class CommandBPop : public Commander {
- public:
-  explicit CommandBPop(bool left) : Commander("bpop", -3, true) { left_ = left; }
-  ~CommandBPop() {
-    if (timer_ != nullptr) {
-      event_free(timer_);
-      timer_ = nullptr;
-    }
-  }
-
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      timeout_ = std::stoi(args[args.size() - 1]);
-      if (timeout_ < 0) {
-        return Status(Status::RedisParseErr, "timeout should not be negative");
-      }
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, "timeout is not an integer or out of range");
-    }
-    keys_ = std::vector<std::string>(args.begin() + 1, args.end() - 1);
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    svr_ = svr;
-    conn_ = conn;
-
-    auto bev = conn->GetBufferEvent();
-    auto s = TryPopFromList();
-    if (s.ok() || !s.IsNotFound()) {
-      return Status::OK();  // error has already output in TryPopFromList
-    }
-    for (const auto &key : keys_) {
-      svr_->AddBlockingKey(key, conn_);
-    }
-    bufferevent_setcb(bev, nullptr, WriteCB, EventCB, this);
-    if (timeout_) {
-      timer_ = evtimer_new(bufferevent_get_base(bev), TimerCB, this);
-      timeval tm = {timeout_, 0};
-      evtimer_add(timer_, &tm);
-    }
-    return Status::OK();
-  }
-
-  rocksdb::Status TryPopFromList() {
-    Redis::List list_db(svr_->storage_, conn_->GetNamespace());
-    std::string elem;
-    rocksdb::Status s;
-    for (const auto &key : keys_) {
-      s = list_db.Pop(key, &elem, left_);
-      if (s.ok() || !s.IsNotFound()) {
-        break;
-      }
-    }
-    if (s.ok()) {
-      conn_->Reply(Redis::BulkString(elem));
-    } else if (!s.IsNotFound()) {
-      conn_->Reply(Redis::Error("ERR " + s.ToString()));
-      LOG(ERROR) << "Failed to execute redis command: " << conn_->current_cmd_->Name()
-                 << ", err: " << s.ToString();
-    }
-    return s;
-  }
-
-  static void WriteCB(bufferevent *bev, void *ctx) {
-    auto self = reinterpret_cast<CommandBPop *>(ctx);
-    auto s = self->TryPopFromList();
-    // if pop fail ,currently we compromised to close bpop request
-    if (s.IsNotFound()) {
-      self->conn_->Reply(Redis::NilString());
-      LOG(ERROR) << "[BPOP] Failed to execute redis command: " << self->conn_->current_cmd_->Name()
-                 << ", err: another concurrent pop request must have stole the data before this bpop request"
-                 << " or bpop is in a pipeline cmd list(cmd before bpop replyed trigger this writecb)";
-    }
-    if (self->timer_ != nullptr) {
-      event_free(self->timer_);
-      self->timer_ = nullptr;
-    }
-    self->unBlockingAll();
-    bufferevent_setcb(bev, Redis::Connection::OnRead, Redis::Connection::OnWrite,
-                      Redis::Connection::OnEvent, self->conn_);
-    bufferevent_enable(bev, EV_READ);
-  }
-
-  static void EventCB(bufferevent *bev, int16_t events, void *ctx) {
-    auto self = static_cast<CommandBPop *>(ctx);
-    if (events & (BEV_EVENT_EOF | BEV_EVENT_ERROR)) {
-      if (self->timer_ != nullptr) {
-        event_free(self->timer_);
-        self->timer_ = nullptr;
-      }
-      self->unBlockingAll();
-    }
-    Redis::Connection::OnEvent(bev, events, self->conn_);
-  }
-
-  static void TimerCB(int, int16_t events, void *ctx) {
-    auto self = reinterpret_cast<CommandBPop *>(ctx);
-    self->conn_->Reply(Redis::NilString());
-    event_free(self->timer_);
-    self->timer_ = nullptr;
-    self->unBlockingAll();
-    auto bev = self->conn_->GetBufferEvent();
-    bufferevent_setcb(bev, Redis::Connection::OnRead, Redis::Connection::OnWrite,
-                      Redis::Connection::OnEvent, self->conn_);
-    bufferevent_enable(bev, EV_READ);
-  }
-
- private:
-  bool left_ = false;
-  int timeout_ = 0;  // second
-  std::vector<std::string> keys_;
-  Server *svr_ = nullptr;
-  Connection *conn_ = nullptr;
-  event *timer_ = nullptr;
-
-  void unBlockingAll() {
-    for (const auto &key : keys_) {
-      svr_->UnBlockingKey(key, conn_);
-    }
-  }
-};
-
-class CommandBLPop : public CommandBPop {
- public:
-  CommandBLPop() : CommandBPop(true) { name_ = "blpop"; }
-};
-
-class CommandBRPop : public CommandBPop {
- public:
-  CommandBRPop() : CommandBPop(false) { name_ = "brpop"; }
-};
-
-class CommandLRem : public Commander {
- public:
-  CommandLRem() : Commander("lrem", 4, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      count_ = std::stoi(args[2]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-    Redis::List list_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = list_db.Rem(args_[1], count_, args_[3], &ret);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-
- private:
-  int count_ = 0;
-};
-
-class CommandLInsert : public Commander {
- public:
-  CommandLInsert() : Commander("linsert", 5, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    if ((Util::ToLower(args[2]) == "before")) {
-      before_ = true;
-    } else if ((Util::ToLower(args[2]) == "after")) {
-      before_ = false;
-    } else {
-      return Status(Status::RedisParseErr, "syntax error");
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-    Redis::List list_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = list_db.Insert(args_[1], args_[3], args_[4], before_, &ret);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-
- private:
-  bool before_ = false;
-};
-
-class CommandLRange : public Commander {
- public:
-  CommandLRange() : Commander("lrange", 4, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      start_ = std::stoi(args[2]);
-      stop_ = std::stoi(args[3]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::List list_db(svr->storage_, conn->GetNamespace());
-    std::vector<std::string> elems;
-    rocksdb::Status s = list_db.Range(args_[1], start_, stop_, &elems);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::MultiBulkString(elems);
-    return Status::OK();
-  }
-
- private:
-  int start_ = 0, stop_ = 0;
-};
-
-class CommandLLen : public Commander {
- public:
-  CommandLLen() : Commander("llen", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::List list_db(svr->storage_, conn->GetNamespace());
-    uint32_t count;
-    rocksdb::Status s = list_db.Size(args_[1], &count);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(count);
-    return Status::OK();
-  }
-};
-
-class CommandLIndex : public Commander {
- public:
-  CommandLIndex() : Commander("lindex", 3, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      index_ = std::stoi(args[2]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::List list_db(svr->storage_, conn->GetNamespace());
-    std::string elem;
-    rocksdb::Status s = list_db.Index(args_[1], index_, &elem);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::BulkString(elem);
-    return Status::OK();
-  }
-
- private:
-  int index_ = 0;
-};
-
-class CommandLSet : public Commander {
- public:
-  CommandLSet() : Commander("lset", 4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      index_ = std::stoi(args[2]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::List list_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = list_db.Set(args_[1], index_, args_[3]);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::SimpleString("OK");
-    return Status::OK();
-  }
-
- private:
-  int index_ = 0;
-};
-
-class CommandLTrim : public Commander {
- public:
-  CommandLTrim() : Commander("ltrim", 4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      start_ = std::stoi(args[2]);
-      stop_ = std::stoi(args[3]);
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::List list_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = list_db.Trim(args_[1], start_, stop_);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::SimpleString("OK");
-    return Status::OK();
-  }
-
- private:
-  int start_ = 0, stop_ = 0;
-};
-
-class CommandRPopLPUSH : public Commander {
- public:
-  CommandRPopLPUSH() : Commander("rpoplpush", 3, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::List list_db(svr->storage_, conn->GetNamespace());
-    std::string elem;
-    rocksdb::Status s = list_db.RPopLPush(args_[1], args_[2], &elem);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = s.IsNotFound() ? Redis::NilString() : Redis::BulkString(elem);
-    return Status::OK();
-  }
-};
-
-class CommandSAdd : public Commander {
- public:
-  CommandSAdd() : Commander("sadd", -3, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    std::vector<Slice> members;
-    for (unsigned int i = 2; i < args_.size(); i++) {
-      members.emplace_back(args_[i]);
-    }
-    int ret;
-    rocksdb::Status s = set_db.Add(args_[1], members, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandSRem : public Commander {
- public:
-  CommandSRem() : Commander("srem", -3, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    std::vector<Slice> members;
-    for (unsigned int i = 2; i < args_.size(); i++) {
-      members.emplace_back(args_[i]);
-    }
-    int ret;
-    rocksdb::Status s = set_db.Remove(args_[1], members, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandSCard : public Commander {
- public:
-  CommandSCard() : Commander("scard", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    int ret;
-    rocksdb::Status s = set_db.Card(args_[1], &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandSMembers : public Commander {
- public:
-  CommandSMembers() : Commander("smembers", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    std::vector<std::string> members;
-    rocksdb::Status s = set_db.Members(args_[1], &members);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::MultiBulkString(members);
-    return Status::OK();
-  }
-};
-
-class CommandSIsMember : public Commander {
- public:
-  CommandSIsMember() : Commander("sismmeber", 3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    int ret;
-    rocksdb::Status s = set_db.IsMember(args_[1], args_[2], &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandSPop : public Commander {
- public:
-  CommandSPop() : Commander("spop", -2, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      if (args.size() == 3) {
-        count_ = std::stoi(args[2]);
-      }
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    std::vector<std::string> members;
-    rocksdb::Status s = set_db.Take(args_[1], &members, count_, true);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::MultiBulkString(members);
-    return Status::OK();
-  }
-
- private:
-  int count_ = 1;
-};
-
-class CommandSRandMember : public Commander {
- public:
-  CommandSRandMember() : Commander("srandmember", -2, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      if (args.size() == 3) {
-        count_ = std::stoi(args[2]);
-      }
-    } catch (std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    std::vector<std::string> members;
-    rocksdb::Status s = set_db.Take(args_[1], &members, count_, false);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::MultiBulkString(members);
-    return Status::OK();
-  }
-
- private:
-  int count_ = 1;
-};
-
-class CommandSMove : public Commander {
- public:
-  CommandSMove() : Commander("smove", 4, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    int ret;
-    rocksdb::Status s = set_db.Move(args_[1], args_[2], args_[3], &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandSDiff : public Commander {
- public:
-  CommandSDiff() : Commander("sdiff", -2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    std::vector<Slice> keys;
-    for (size_t i = 1; i < args_.size(); i++) {
-      keys.emplace_back(args_[i]);
-    }
-    std::vector<std::string> members;
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    auto s = set_db.Diff(keys, &members);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::MultiBulkString(members);
-    return Status::OK();
-  }
-};
-
-class CommandSUnion : public Commander {
- public:
-  CommandSUnion() : Commander("sunion", -2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    std::vector<Slice> keys;
-    for (size_t i = 1; i < args_.size(); i++) {
-      keys.emplace_back(args_[i]);
-    }
-    std::vector<std::string> members;
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    auto s = set_db.Union(keys, &members);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::MultiBulkString(members);
-    return Status::OK();
-  }
-};
-
-class CommandSInter : public Commander {
- public:
-  CommandSInter() : Commander("sinter", -2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    std::vector<Slice> keys;
-    for (size_t i = 1; i < args_.size(); i++) {
-      keys.emplace_back(args_[i]);
-    }
-    std::vector<std::string> members;
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    auto s = set_db.Inter(keys, &members);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::MultiBulkString(members);
-    return Status::OK();
-  }
-};
-
-class CommandSDiffStore: public Commander {
- public:
-  CommandSDiffStore() : Commander("sdiffstore", -3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret = 0;
-    std::vector<Slice> keys;
-    for (size_t i = 2; i < args_.size(); i++) {
-      keys.emplace_back(args_[i]);
-    }
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    auto s = set_db.DiffStore(args_[1], keys, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandSUnionStore: public Commander {
- public:
-  CommandSUnionStore() : Commander("sunionstore", -3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret = 0;
-    std::vector<Slice> keys;
-    for (size_t i = 2; i < args_.size(); i++) {
-      keys.emplace_back(args_[i]);
-    }
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    auto s = set_db.UnionStore(args_[1], keys, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandSInterStore: public Commander {
- public:
-  CommandSInterStore() : Commander("sinterstore", -3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret = 0;
-    std::vector<Slice> keys;
-    for (size_t i = 2; i < args_.size(); i++) {
-      keys.emplace_back(args_[i]);
-    }
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    auto s = set_db.InterStore(args_[1], keys, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandZAdd : public Commander {
- public:
-  CommandZAdd() : Commander("zadd", -4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    if (args.size() % 2 != 0) {
-      return Status(Status::RedisParseErr, "syntax error");
-    }
-
-    try {
-      for (unsigned i = 2; i < args.size(); i += 2) {
-        double score = std::stod(args[i]);
-        member_scores_.emplace_back(MemberScore{args[i + 1], score});
-      }
-    } catch (const std::exception &e) {
-      return Status(Status::RedisParseErr, "ERR value is not a valid float");
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = zset_db.Add(args_[1], 0, &member_scores_, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-
- private:
-  std::vector<MemberScore> member_scores_;
-};
-
-class CommandZCount : public Commander {
- public:
-  CommandZCount() : Commander("zcount", 4, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    Status s = Redis::ZSet::ParseRangeSpec(args[2], args[3], &spec_);
-    if (!s.IsOK()) {
-      return Status(Status::RedisParseErr, s.Msg());
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = zset_db.Count(args_[1], spec_, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-
- private:
-  ZRangeSpec spec_;
-};
-
-class CommandZCard : public Commander {
- public:
-  CommandZCard() : Commander("zcard", 2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = zset_db.Card(args_[1], &ret);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-};
-
-class CommandZIncrBy : public Commander {
- public:
-  CommandZIncrBy() : Commander("zincrby", 4, true) {}
-
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      incr_ = std::stod(args[2]);
-    } catch (const std::exception &e) {
-      return Status(Status::RedisParseErr, "value is not an double or out of range");
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    double score;
-
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = zset_db.IncrBy(args_[1], args_[3], incr_, &score);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::BulkString(std::to_string(score));
-    return Status::OK();
-  }
-
- private:
-  double incr_ = 0.0;
-};
-
-class CommandZLexCount : public Commander {
- public:
-  CommandZLexCount() : Commander("zlexcount", 4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    Status s = Redis::ZSet::ParseRangeLexSpec(args[2], args[3], &spec_);
-    if (!s.IsOK()) {
-      return Status(Status::RedisParseErr, s.Msg());
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int size;
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = zset_db.RangeByLex(args_[1], spec_, nullptr, &size);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(size);
-    return Status::OK();
-  }
-
- private:
-  ZRangeLexSpec spec_;
-};
-
-class CommandZPop : public Commander {
- public:
-  explicit CommandZPop(bool min) : Commander("zpop", -2, true), min_(min) {}
-
-  Status Parse(const std::vector<std::string> &args) override {
-    if (args.size() > 2) {
-      try {
-        count_ = std::stoi(args[2]);
-      } catch (const std::exception &e) {
-        return Status(Status::RedisParseErr, kValueNotInterger);
-      }
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    std::vector<MemberScore> memeber_scores;
-    rocksdb::Status s = zset_db.Pop(args_[1], count_, min_, &memeber_scores);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    output->append(Redis::MultiLen(memeber_scores.size() * 2));
-    for (const auto ms : memeber_scores) {
-      output->append(Redis::BulkString(ms.member));
-      output->append(Redis::BulkString(std::to_string(ms.score)));
-    }
-    return Status::OK();
-  }
-
- private:
-  bool min_;
-  int count_ = 1;
-};
-
-class CommandZPopMin : public CommandZPop {
- public:
-  CommandZPopMin() : CommandZPop(true) { name_ = "zpopmin"; }
-};
-
-class CommandZPopMax : public CommandZPop {
- public:
-  CommandZPopMax() : CommandZPop(false) { name_ = "zpopmax"; }
-};
-
-class CommandZRange : public Commander {
- public:
-  explicit CommandZRange(bool reversed = false)
-      : Commander("zrange", -4, false), reversed_(reversed) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      start_ = std::stoi(args[2]);
-      stop_ = std::stoi(args[3]);
-    } catch (const std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    if (args.size() > 4 && (Util::ToLower(args[4]) == "withscores")) {
-      with_scores_ = true;
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    std::vector<MemberScore> memeber_scores;
-    uint8_t flags = !reversed_ ? 0 : ZSET_REVERSED;
-    rocksdb::Status s =
-        zset_db.Range(args_[1], start_, stop_, flags, &memeber_scores);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    if (!with_scores_) {
-      output->append(Redis::MultiLen(memeber_scores.size()));
-    } else {
-      output->append(Redis::MultiLen(memeber_scores.size() * 2));
-    }
-    for (const auto ms : memeber_scores) {
-      output->append(Redis::BulkString(ms.member));
-      if (with_scores_)
-        output->append(Redis::BulkString(std::to_string(ms.score)));
-    }
-    return Status::OK();
-  }
-
- private:
-  int start_ = 0;
-  int stop_ = 0;
-  bool reversed_;
-  bool with_scores_ = false;
-};
-
-class CommandZRevRange : public CommandZRange {
- public:
-  CommandZRevRange() : CommandZRange(true) { name_ = "zrevrange"; }
-};
-
-class CommandZRangeByLex : public Commander {
- public:
-  CommandZRangeByLex() : Commander("zrangebylex", -4, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    Status s = Redis::ZSet::ParseRangeLexSpec(args[2], args[3], &spec_);
-    if (!s.IsOK()) {
-      return Status(Status::RedisParseErr, s.Msg());
-    }
-    try {
-      if (args.size() == 7 && Util::ToLower(args[4]) == "limit") {
-        spec_.offset = std::stoi(args[5]);
-        spec_.count = std::stoi(args[6]);
-      }
-    } catch (const std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int size;
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    std::vector<std::string> members;
-    rocksdb::Status s = zset_db.RangeByLex(args_[1], spec_, &members, &size);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::MultiBulkString(members);
-    return Status::OK();
-  }
-
- private:
-  ZRangeLexSpec spec_;
-};
-
-class CommandZRangeByScore : public Commander {
- public:
-  explicit CommandZRangeByScore(bool reversed = false) : Commander("zrangebyscore", -4, false) {
-    spec_.reversed = reversed;
-  }
-  Status Parse(const std::vector<std::string> &args) override {
-    Status s;
-    if (spec_.reversed) {
-      s = Redis::ZSet::ParseRangeSpec(args[3], args[2], &spec_);
-    } else {
-      s = Redis::ZSet::ParseRangeSpec(args[2], args[3], &spec_);
-    }
-    if (!s.IsOK()) {
-      return Status(Status::RedisParseErr, s.Msg());
-    }
-    try {
-      size_t i = 4;
-      while (i < args.size()) {
-        if (Util::ToLower(args[i]) == "withscores") {
-          with_scores_ = true;
-          i++;
-        } else if (Util::ToLower(args[i]) == "limit" && i + 2 < args.size()) {
-          spec_.offset = std::stoi(args[i + 1]);
-          spec_.count = std::stoi(args[i + 2]);
-          i += 3;
-        } else {
-          return Status(Status::RedisParseErr, "syntax error");
-        }
-      }
-    } catch (const std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int size;
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    std::vector<MemberScore> memeber_scores;
-    rocksdb::Status s =
-        zset_db.RangeByScore(args_[1], spec_, &memeber_scores, &size);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    if (!with_scores_) {
-      output->append(Redis::MultiLen(memeber_scores.size()));
-    } else {
-      output->append(Redis::MultiLen(memeber_scores.size() * 2));
-    }
-    for (const auto ms : memeber_scores) {
-      output->append(Redis::BulkString(ms.member));
-      if (with_scores_)
-        output->append(Redis::BulkString(std::to_string(ms.score)));
-    }
-    return Status::OK();
-  }
-
- private:
-  ZRangeSpec spec_;
-  bool with_scores_ = false;
-};
-
-class CommandZRank : public Commander {
- public:
-  explicit CommandZRank(bool reversed = false)
-      : Commander("zrank", 3, false), reversed_(reversed) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int rank;
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = zset_db.Rank(args_[1], args_[2], reversed_, &rank);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    if (rank == -1) {
-      *output = Redis::NilString();
-    } else {
-      *output = Redis::Integer(rank);
-    }
-    return Status::OK();
-  }
-
- private:
-  bool reversed_;
-};
-
-class CommandZRevRank : public CommandZRank {
- public:
-  CommandZRevRank() : CommandZRank(true) { name_ = "zrevrank"; }
-};
-
-class CommandZRevRangeByScore : public CommandZRangeByScore {
- public:
-  CommandZRevRangeByScore() : CommandZRangeByScore(true) { name_ = "zrevrangebyscore"; }
-};
-
-class CommandZRem : public Commander {
- public:
-  CommandZRem() : Commander("zrem", -3, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int size;
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    std::vector<rocksdb::Slice> members;
-    for (unsigned i = 2; i < args_.size(); i++) {
-      members.emplace_back(args_[i]);
-    }
-    rocksdb::Status s = zset_db.Remove(args_[1], members, &size);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(size);
-    return Status::OK();
-  }
-};
-
-class CommandZRemRangeByRank : public Commander {
- public:
-  CommandZRemRangeByRank() : Commander("zremrangebyrank", 4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      start_ = std::stoi(args[2]);
-      stop_ = std::stoi(args[3]);
-    } catch (const std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int ret;
-
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s =
-        zset_db.RemoveRangeByRank(args_[1], start_, stop_, &ret);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(ret);
-    return Status::OK();
-  }
-
- private:
-  int start_ = 0;
-  int stop_ = 0;
-};
-
-class CommandZRemRangeByScore : public Commander {
- public:
-  CommandZRemRangeByScore() : Commander("zremrangebyscore", -4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    Status s = Redis::ZSet::ParseRangeSpec(args[2], args[3], &spec_);
-    if (!s.IsOK()) {
-      return Status(Status::RedisParseErr, s.Msg());
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int size;
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = zset_db.RemoveRangeByScore(args_[1], spec_, &size);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(size);
-    return Status::OK();
-  }
-
- private:
-  ZRangeSpec spec_;
-};
-
-class CommandZRemRangeByLex : public Commander {
- public:
-  CommandZRemRangeByLex() : Commander("zremrangebylex", 4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    Status s = Redis::ZSet::ParseRangeLexSpec(args[2], args[3], &spec_);
-    if (!s.IsOK()) {
-      return Status(Status::RedisParseErr, s.Msg());
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int size;
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = zset_db.RemoveRangeByLex(args_[1], spec_, &size);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(size);
-    return Status::OK();
-  }
-
- private:
-  ZRangeLexSpec spec_;
-};
-
-class CommandZScore : public Commander {
- public:
-  CommandZScore() : Commander("zscore", 3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    double score;
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = zset_db.Score(args_[1], args_[2], &score);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    if (s.IsNotFound()) {
-      *output = Redis::NilString();
-    } else {
-      *output = Redis::BulkString(std::to_string(score));
-    }
-    return Status::OK();
-  }
-};
-
-class CommandZUnionStore : public Commander {
- public:
-  CommandZUnionStore() : Commander("zunionstore", -4, true) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      numkeys_ = std::stoi(args[2]);
-    } catch (const std::exception &e) {
-      return Status(Status::RedisParseErr, kValueNotInterger);
-    }
-    if (numkeys_ > args.size() - 3) {
-      return Status(Status::RedisParseErr, "syntax error");
-    }
-    size_t j = 0;
-    while (j < numkeys_) {
-      keys_weights_.emplace_back(KeyWeight{args[j + 3], 1});
-      j++;
-    }
-    size_t i = 3 + numkeys_;
-    while (i < args.size()) {
-      if (Util::ToLower(args[i]) == "aggregate" && i + 1 < args.size()) {
-        if (Util::ToLower(args[i + 1]) == "sum") {
-          aggregate_method_ = kAggregateSum;
-        } else if (Util::ToLower(args[i + 1]) == "min") {
-          aggregate_method_ = kAggregateMin;
-        } else if (Util::ToLower(args[i + 1]) == "max") {
-          aggregate_method_ = kAggregateMax;
-        } else {
-          return Status(Status::RedisParseErr, "aggregate para error");
-        }
-        i += 2;
-      } else if (Util::ToLower(args[i]) == "weights" && i + numkeys_ < args.size()) {
-        size_t j = 0;
-        while (j < numkeys_) {
-          try {
-            keys_weights_[j].weight = std::stod(args[i + j + 1]);
-          } catch (const std::exception &e) {
-            return Status(Status::RedisParseErr, "value is not an double or out of range");
-          }
-          j++;
-        }
-        i += numkeys_ + 1;
-      } else {
-        return Status(Status::RedisParseErr, "syntax error");
-      }
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int size;
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = zset_db.UnionStore(args_[1], keys_weights_, aggregate_method_, &size);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(size);
-    return Status::OK();
-  }
-
- protected:
-  size_t numkeys_ = 0;
-  std::vector<KeyWeight> keys_weights_;
-  AggregateMethod aggregate_method_ = kAggregateSum;
-};
-
-class CommandZInterStore : public CommandZUnionStore {
- public:
-  CommandZInterStore() : CommandZUnionStore() { name_ = "zinterstore"; }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    int size;
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    rocksdb::Status s = zset_db.InterStore(args_[1], keys_weights_, aggregate_method_, &size);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = Redis::Integer(size);
-    return Status::OK();
-  }
-};
-
-class CommandInfo : public Commander {
- public:
-  CommandInfo() : Commander("info", -1, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    std::string section = "all";
-    if (args_.size() == 2) {
-      section = Util::ToLower(args_[1]);
-    }
-    std::string info;
-    svr->GetInfo(conn->GetNamespace(), section, &info);
-    *output = Redis::BulkString(info);
-    return Status::OK();
-  }
-};
-
-class CommandCompact : public Commander {
- public:
-  CommandCompact() : Commander("compact", 1, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    if (!conn->IsAdmin()) {
-      *output = Redis::Error("only administrator can compact the db");
-      return Status::OK();
-    }
-    Status s = svr->AsyncCompactDB();
-    if (!s.IsOK()) return s;
-    *output = Redis::SimpleString("OK");
-    LOG(INFO) << "Commpact was triggered by manual with executed success";
-    return Status::OK();
-  }
-};
-
-class CommandBGSave: public Commander {
- public:
-  CommandBGSave() : Commander("bgsave", 1, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    if (!conn->IsAdmin()) {
-      *output = Redis::Error("only administrator can do bgsave command");
-      return Status::OK();
-    }
-    Status s = svr->AsyncBgsaveDB();
-    if (!s.IsOK()) return s;
-    *output = Redis::SimpleString("OK");
-    LOG(INFO) << "BGSave was triggered by manual with executed success";
-    return Status::OK();
-  }
-};
-
-class CommandDBSize : public Commander {
- public:
-  CommandDBSize() : Commander("dbsize", -1, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    std::string ns = conn->GetNamespace();
-    if (args_.size() == 1) {
-      KeyNumStats stats;
-      svr->GetLastestKeyNumStats(ns, &stats);
-      *output = Redis::Integer(stats.n_key);
-    } else if (args_.size() == 2 && args_[1] == "scan") {
-      Status s = svr->AsyncScanDBSize(ns);
-      if (s.IsOK()) {
-        *output = Redis::SimpleString("OK");
-      } else {
-        *output = Redis::Error(s.Msg());
-      }
-    } else {
-      *output = Redis::Error("DBSIZE subcommand only supports scan");
-    }
-    return Status::OK();
-  }
-};
-
-class CommandPublish : public Commander {
- public:
-  CommandPublish() : Commander("publish", 3, true) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::PubSub pubsub_db(svr->storage_);
-    auto s = pubsub_db.Publish(args_[1], args_[2]);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-
-    int receivers = svr->PublishMessage(args_[1], args_[2]);
-    *output = Redis::Integer(receivers);
-    return Status::OK();
-  }
-};
-
-class CommandSubscribe : public Commander {
- public:
-  CommandSubscribe() : Commander("subcribe", -2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    for (unsigned i = 1; i < args_.size(); i++) {
-      conn->SubscribeChannel(args_[i]);
-      output->append(Redis::MultiLen(3));
-      output->append(Redis::BulkString("subscribe"));
-      output->append(Redis::BulkString(args_[i]));
-      output->append(Redis::Integer(conn->SubscriptionsCount()));
-    }
-    return Status::OK();
-  }
-};
-
-class CommandUnSubscribe : public Commander {
- public:
-  CommandUnSubscribe() : Commander("unsubcribe", -1, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    if (args_.size() > 1) {
-      conn->UnSubscribeChannel(args_[1]);
-    } else {
-      conn->UnSubscribeAll();
-    }
-    return Status::OK();
-  }
-};
-
-class CommandPSubscribe : public Commander {
- public:
-  CommandPSubscribe() : Commander("psubcribe", -2, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    for (unsigned i = 1; i < args_.size(); i++) {
-      conn->PSubscribeChannel(args_[i]);
-      output->append(Redis::MultiLen(3));
-      output->append(Redis::BulkString("psubscribe"));
-      output->append(Redis::BulkString(args_[i]));
-      output->append(Redis::Integer(conn->PSubscriptionsCount()));
-    }
-    return Status::OK();
-  }
-};
-
-class CommandPUnSubscribe : public Commander {
- public:
-  CommandPUnSubscribe() : Commander("punsubcribe", -1, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    if (args_.size() > 1) {
-      conn->PUnSubscribeChannel(args_[1]);
-    } else {
-      conn->PUnSubscribeAll();
-    }
-    return Status::OK();
-  }
-};
-
-class CommandPubSub : public Commander {
- public:
-  CommandPubSub() : Commander("pubsub", -2, false) {}
-
-  Status Parse(const std::vector<std::string> &args) override {
-    subcommand_ = Util::ToLower(args[1]);
-    if (subcommand_ == "numpat" && args.size() == 2) {
-      return Status::OK();
-    }
-    if ((subcommand_ == "numsub") && args.size() >= 2) {
-      if (args.size() > 2) {
-        channels_ = std::vector<std::string>(args.begin() + 2, args.end());
-      }
-      return Status::OK();
-    }
-    if ((subcommand_ == "channels") && args.size() <= 3) {
-      if (args.size() == 3) {
-        pattern_ = args[2];
-      }
-      return Status::OK();
-    }
-    return Status(Status::RedisInvalidCmd,
-                  "ERR Unknown subcommand or wrong number of arguments");
-  }
-
-  Status Execute(Server *srv, Connection *conn, std::string *output) override {
-    if (subcommand_ == "numpat") {
-      *output = Redis::Integer(srv->GetPubSubPatternSize());
-      return Status::OK();
-    } else if (subcommand_ == "numsub") {
-      std::vector<ChannelSubscribeNum> channel_subscribe_nums;
-      srv->ListChannelSubscribeNum(channels_, &channel_subscribe_nums);
-      output->append(Redis::MultiLen(channel_subscribe_nums.size() * 2));
-      for (const auto chan_subscribe_num : channel_subscribe_nums) {
-        output->append(Redis::BulkString(chan_subscribe_num.channel));
-        output->append(Redis::Integer(chan_subscribe_num.subscribe_num));
-      }
-      return Status::OK();
-    } else if (subcommand_ == "channels") {
-      std::vector<std::string> channels;
-      srv->GetChannelsByPattern(pattern_, &channels);
-      *output = Redis::MultiBulkString(channels);
-      return Status::OK();
-    }
-
-    return Status(Status::RedisInvalidCmd,
-                  "ERR Unknown subcommand or wrong number of arguments");
-  }
-
- private:
-  std::string pattern_;
-  std::vector<std::string> channels_;
-  std::string subcommand_;
-};
-
-class CommandSlaveOf : public Commander {
- public:
-  CommandSlaveOf() : Commander("slaveof", 3, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    host_ = args[1];
-    auto port = args[2];
-    if (Util::ToLower(host_) == "no" && Util::ToLower(port) == "one") {
-      host_.clear();
-      return Status::OK();
-    }
-    try {
-      auto p = std::stoul(port);
-      if (p > UINT32_MAX) {
-        throw std::overflow_error("port out of range");
-      }
-      port_ = static_cast<uint32_t>(p);
-    } catch (const std::exception &e) {
-      return Status(Status::RedisParseErr, "port should be number");
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    if (!conn->IsAdmin()) {
-      *output = Redis::Error("only administrator can use slaveof command");
-      return Status::OK();
-    }
-    Status s;
-    if (host_.empty()) {
-      s = svr->RemoveMaster();
-      if (s.IsOK()) {
-        *output = Redis::SimpleString("OK");
-        LOG(WARNING) << "MASTER MODE enabled (user request from '" << conn->GetAddr() << "')";
-      }
-    } else {
-      s = svr->AddMaster(host_, port_);
-      if (s.IsOK()) {
-        *output = Redis::SimpleString("OK");
-        LOG(WARNING) << "SLAVE OF " << host_ << ":" << port_
-                     << " enabled (user request from '" << conn->GetAddr() << "')";
-      } else {
-        LOG(ERROR) << "SLAVE OF " << host_ << ":" << port_
-                   << " (user request from '" << conn->GetAddr() << "') encounter error: " << s.Msg();
-      }
-    }
-    return s;
-  }
-
- private:
-  std::string host_;
-  uint32_t port_ = 0;
-};
-
-class CommandStats: public Commander {
- public:
-  CommandStats() : Commander("stats", 1, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    std::string stats_json = svr->GetRocksDBStatsJson();
-    *output = Redis::BulkString(stats_json);
-    return Status::OK();
-  }
-};
-
-class CommandPSync : public Commander {
- public:
-  CommandPSync() : Commander("psync", 2, false) {}
-
-  Status Parse(const std::vector<std::string> &args) override {
-    try {
-      auto s = std::stoull(args[1]);
-      next_repl_seq = static_cast<rocksdb::SequenceNumber>(s);
-    } catch (const std::exception &e) {
-      return Status(Status::RedisParseErr, "value is not an unsigned long long or out of range");
-    }
-    return Commander::Parse(args);
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    LOG(INFO) << "Slave " << conn->GetAddr() << " asks for synchronization"
-              << " with next sequence: " << next_repl_seq
-              << ", and local sequence: " << svr->storage_->LatestSeq();
-    if (!checkWALBoundary(svr->storage_, next_repl_seq).IsOK()) {
-      svr->stats_.IncrPSyncErrCounter();
-      *output = "sequence out of range, please use fullsync";
-      return Status(Status::RedisExecErr, *output);
-    }
-    svr->stats_.IncrPSyncOKCounter();
-    Status s = svr->AddSlave(conn, next_repl_seq);
-    if (!s.IsOK()) return s;
-    LOG(INFO) << "New slave: "  << conn->GetAddr() << " was added, start increment syncing";
-    conn->EnableFlag(Redis::Connection::kSlave);
-    // server would spawn a new thread to sync the batch,
-    // and connection would be took over, so should never trigger any event in worker thread
-    conn->Detach();
-    write(conn->GetFD(), "+OK\r\n", 5);
-    return Status::OK();
-  }
-
- private:
-  rocksdb::SequenceNumber next_repl_seq = 0;
-
-  // Return OK if the seq is in the range of the current WAL
-  Status checkWALBoundary(Engine::Storage *storage,
-                          rocksdb::SequenceNumber seq) {
-    if (seq == storage->LatestSeq() + 1) {
-      return Status::OK();
-    }
-    // Upper bound
-    if (seq > storage->LatestSeq() + 1) {
-      return Status(Status::NotOK);
-    }
-    // Lower bound
-    std::unique_ptr<rocksdb::TransactionLogIterator> iter;
-    auto s = storage->GetWALIter(seq, &iter);
-    if (s.IsOK() && iter->Valid()) {
-      auto batch = iter->GetBatch();
-      if (seq < batch.sequence) {
-        return Status(Status::NotOK);
-      }
-      return Status::OK();
-    }
-    return Status(Status::NotOK);
-  }
-};
-
-class CommandPerfLog : public Commander {
- public:
-  CommandPerfLog() : Commander("perflog", -2, false) {}
-
-  Status Parse(const std::vector<std::string> &args) override {
-    subcommand_ = Util::ToLower(args[1]);
-    if (subcommand_ != "reset" && subcommand_ != "get" && subcommand_ != "len") {
-      return Status(Status::NotOK, "PERFLOG subcommand must be one of RESET, LEN, GET");
-    }
-    if (subcommand_ == "get" && args.size() >= 3) {
-      cnt = std::stoi(args[3]);
-    }
-    return Status::OK();
-  }
-
-  Status Execute(Server *srv, Connection *conn, std::string *output) override {
-    auto perf_log = srv->GetPerfLog();
-    if (subcommand_ == "len") {
-      *output = Redis::Integer(perf_log->Len());
-    } else if (subcommand_ == "reset") {
-      perf_log->Reset();
-      *output = Redis::SimpleString("OK");
-    } else if (subcommand_ == "get") {
-      *output = perf_log->ToString(cnt);
-    }
-    return Status::OK();
-  }
-
- private:
-  std::string subcommand_;
-  int cnt = 0;
-};
-
-class CommandSlowlog : public Commander {
- public:
-  CommandSlowlog() : Commander("slowlog", -2, false) {}
-
-  Status Parse(const std::vector<std::string> &args) override {
-    subcommand_ = Util::ToLower(args[1]);
-    if ((subcommand_ == "reset" || subcommand_ == "len" ||
-         subcommand_ == "get") &&
-        args.size() == 2) {
-      return Status::OK();
-    }
-    if (subcommand_ == "get" && args.size() == 3) {
-      try {
-        auto c = std::stoul(args[2]);
-        count_ = static_cast<uint32_t>(c);
-      } catch (const std::exception &e) {
-        return Status(Status::RedisParseErr, "value is not an unsigned long or out of range");
-      }
-      return Status::OK();
-    }
-    return Status(
-        Status::RedisInvalidCmd,
-        "Unknown SLOWLOG subcommand or wrong # of args. Try GET, RESET, LEN.");
-  }
-
-  Status Execute(Server *srv, Connection *conn, std::string *output) override {
-    if (subcommand_ == "reset") {
-      srv->SlowlogReset();
-      *output = Redis::SimpleString("OK");
-      return Status::OK();
-    } else if (subcommand_ == "len") {
-      *output = Redis::Integer(srv->SlowlogLen());
-      return Status::OK();
-    } else if (subcommand_ == "get") {
-      srv->CreateSlowlogReply(output, count_);
-      return Status::OK();
-    }
-    return Status(
-        Status::RedisInvalidCmd,
-        "Unknown SLOWLOG subcommand or wrong # of args. Try GET, RESET, LEN.");
-  }
-
- private:
-  std::string subcommand_;
-  uint32_t count_ = 10;
-};
-
-class CommandClient : public Commander {
- public:
-  CommandClient() : Commander("client", -2, false) {}
-
-  Status Parse(const std::vector<std::string> &args) override {
-    subcommand_ = Util::ToLower(args[1]);
-    // subcommand: getname id kill list setname
-    if ((subcommand_ == "id" || subcommand_ == "getname" ||  subcommand_ == "list") && args.size() == 2) {
-      return Status::OK();
-    }
-    if ((subcommand_ == "setname") && args.size() == 3) {
-      conn_name_ = args[2];
-      return Status::OK();
-    }
-    if ((subcommand_ == "kill")) {
-      if (args.size() == 2) {
-        return Status(Status::RedisParseErr, "syntax error");
-      } else if (args.size() == 3) {
-        addr_ = args[2];
-        new_format_ = false;
-        return Status::OK();
-      }
-
-      uint i = 2;
-      new_format_ = true;
-      while (i < args.size()) {
-        bool moreargs = i < args.size();
-        if (args[i] == "addr" && moreargs) {
-          addr_ = args[i+1];
-        } else if (args[i] == "id" && moreargs) {
-          try {
-            id_ = std::stoll(args[i+1]);
-          } catch (std::exception &e) {
-            return Status(Status::RedisParseErr, kValueNotInterger);
-          }
-        } else if (args[i] == "skipme" && moreargs) {
-          if (args[i+1] == "yes") {
-            skipme_ = true;
-          } else if (args[i+1] == "no") {
-            skipme_ = false;
-          } else {
-            return Status(Status::RedisParseErr, "syntax error");
-          }
-        } else {
-          return Status(Status::RedisParseErr, "syntax error");
-        }
-        i += 2;
-      }
-      return Status::OK();
-    }
-    return Status(Status::RedisInvalidCmd,
-                  "Syntax error, try CLIENT LIST|KILL ip:port|GETNAME|SETNAME");
-  }
-
-  Status Execute(Server *srv, Connection *conn, std::string *output) override {
-    if (subcommand_ == "list") {
-      *output = Redis::BulkString(srv->GetClientsStr());
-      return Status::OK();
-    } else if (subcommand_ == "setname") {
-      conn->SetName(conn_name_);
-      *output = Redis::SimpleString("OK");
-      return Status::OK();
-    } else if (subcommand_ == "getname") {
-      std::string name = conn->GetName();
-      *output = name== ""? Redis::NilString(): Redis::BulkString(name);
-      return Status::OK();
-    } else if (subcommand_ == "id") {
-      *output = Redis::Integer(conn->GetID());
-      return Status::OK();
-    } else if (subcommand_ == "kill") {
-      int64_t killed = 0;
-      srv->KillClient(&killed, addr_, id_, skipme_, conn);
-      if (new_format_) {
-        *output = Redis::Integer(killed);
-      } else {
-        if (killed == 0)
-          *output = Redis::Error("No such client");
-        else
-          *output = Redis::SimpleString("OK");
-      }
-      return Status::OK();
-    }
-
-    return Status(Status::RedisInvalidCmd,
-                  "Syntax error, try CLIENT LIST|KILL ip:port|GETNAME|SETNAME");
-  }
-
- private:
-  std::string addr_;
-  std::string conn_name_;
-  std::string subcommand_;
-  bool skipme_ = false;
-  uint64_t id_ = 0;
-  bool new_format_ = true;
-};
-
-class CommandMonitor : public Commander {
- public:
-  CommandMonitor() : Commander("monitor", 1, false) {}
-  Status Execute(Server *srv, Connection *conn, std::string *output) override {
-    conn->Owner()->BecomeMonitorConn(conn);
-    *output = Redis::SimpleString("OK");
-    return Status::OK();
-  }
-};
-
-class CommandShutdown : public Commander {
- public:
-  CommandShutdown() : Commander("shutdown", -1, false) {}
-  Status Execute(Server *srv, Connection *conn, std::string *output) override {
-    if (!conn->IsAdmin()) {
-      *output = Redis::Error("only administrator can use namespace command");
-      return Status::OK();
-    }
-    if (!srv->IsStopped()) {
-      LOG(INFO) << "bye bye";
-      srv->Stop();
-    }
-    return Status::OK();
-  }
-};
-
-class CommandQuit : public Commander {
- public:
-  CommandQuit() : Commander("quit", -1, false) {}
-  Status Execute(Server *srv, Connection *conn, std::string *output) override {
-    conn->EnableFlag(Redis::Connection::kCloseAfterReply);
-    *output = Redis::SimpleString("OK");
-    return Status::OK();
-  }
-};
-
-class CommandScanBase : public Commander {
- public:
-  explicit CommandScanBase(const std::string &name, int arity, bool is_write = false)
-      : Commander(name, arity, is_write) {}
-  Status ParseMatchAndCountParam(const std::string &type, const std::string &value) {
-    if (type == "match") {
-      prefix = std::move(value);
-      if (!prefix.empty() && prefix[prefix.size() - 1] == '*') {
-        prefix = prefix.substr(0, prefix.size() - 1);
-        return Status::OK();
-      }
-      return Status(Status::RedisParseErr, "only keys prefix match was supported");
-    } else if (type == "count") {
-      try {
-        limit = std::stoi(value);
-      } catch (const std::exception &e) {
-        return Status(Status::RedisParseErr, "ERR count param should be type int");
-      }
-    }
-    return Status::OK();
-  }
-
-  void ParseCursor(const std::string &param) {
-    cursor = param;
-    if (cursor == "0") {
-      cursor = std::string();
-    }
-  }
-
-  std::string GenerateOutput(const std::vector<std::string> &keys) {
-    std::vector<std::string> list;
-    if (!keys.empty()) {
-      list.emplace_back(Redis::BulkString(keys.back()));
-    } else {
-      list.emplace_back(Redis::BulkString("0"));
-    }
-
-    list.emplace_back(Redis::MultiBulkString(keys));
-
-    return Redis::Array(list);
-  }
-
- protected:
-  std::string cursor;
-  std::string prefix;
-  int limit = 20;
-};
-
-class CommandSubkeyScanBase : public CommandScanBase {
- public:
-  explicit CommandSubkeyScanBase(const std::string &name, int arity, bool is_write = false)
-      : CommandScanBase(name, arity, is_write) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    if (args.size() % 2 == 0) {
-      return Status(Status::RedisParseErr, "wrong number of arguments");
-    }
-    key = args[1];
-    ParseCursor(args[2]);
-    if (args.size() >= 5) {
-      Status s = ParseMatchAndCountParam(Util::ToLower(args[3]), args_[4]);
-      if (!s.IsOK()) {
-        return s;
-      }
-    }
-    if (args.size() >= 7) {
-      Status s = ParseMatchAndCountParam(Util::ToLower(args[5]), args_[6]);
-      if (!s.IsOK()) {
-        return s;
-      }
-    }
-    return Commander::Parse(args);
-  }
-
- protected:
-  std::string key;
-};
-
-class CommandScan : public CommandScanBase {
- public:
-  CommandScan() : CommandScanBase("scan", -2, false) {}
-  Status Parse(const std::vector<std::string> &args) override {
-    if (args.size() % 2 != 0) {
-      return Status(Status::RedisParseErr, "wrong number of arguments");
-    }
-
-    ParseCursor(args[1]);
-    if (args.size() >= 4) {
-      Status s = ParseMatchAndCountParam(Util::ToLower(args[2]), args_[3]);
-      if (!s.IsOK()) {
-        return s;
-      }
-    }
-    if (args.size() >= 6) {
-      Status s = ParseMatchAndCountParam(Util::ToLower(args[4]), args_[5]);
-      if (!s.IsOK()) {
-        return s;
-      }
-    }
-    return Commander::Parse(args);
-  }
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Database redis_db(svr->storage_, conn->GetNamespace());
-    std::vector<std::string> keys;
-    auto s = redis_db.Scan(cursor, limit, prefix, &keys);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-
-    *output = GenerateOutput(keys);
-    return Status::OK();
-  }
-};
-
-class CommandHScan : public CommandSubkeyScanBase {
- public:
-  CommandHScan() : CommandSubkeyScanBase("hscan", -3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Hash hash_db(svr->storage_, conn->GetNamespace());
-    std::vector<std::string> fields;
-    auto s = hash_db.Scan(key, cursor, limit, prefix, &fields);
-    if (!s.ok() && !s.IsNotFound()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-    *output = GenerateOutput(fields);
-    return Status::OK();
-  }
-};
-
-class CommandSScan : public CommandSubkeyScanBase {
- public:
-  CommandSScan() : CommandSubkeyScanBase("sscan", -3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::Set set_db(svr->storage_, conn->GetNamespace());
-    std::vector<std::string> members;
-    auto s = set_db.Scan(key, cursor, limit, prefix, &members);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-
-    *output = GenerateOutput(members);
-    return Status::OK();
-  }
-};
-
-class CommandZScan : public CommandSubkeyScanBase {
- public:
-  CommandZScan() : CommandSubkeyScanBase("zscan", -3, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    Redis::ZSet zset_db(svr->storage_, conn->GetNamespace());
-    std::vector<std::string> members;
-    auto s = zset_db.Scan(key, cursor, limit, prefix, &members);
-    if (!s.ok()) {
-      return Status(Status::RedisExecErr, s.ToString());
-    }
-
-    *output = GenerateOutput(members);
-    return Status::OK();
-  }
-};
-
-class CommandRandomKey : public Commander {
- public:
-  CommandRandomKey() : Commander("randomkey", 1, false) {}
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    std::string key;
-    auto cursor = svr->GetLastRandomKeyCursor();
-    Redis::Database redis(svr->storage_, conn->GetNamespace());
-    redis.RandomKey(cursor, &key);
-    svr->SetLastRandomKeyCursor(key);
-    *output = Redis::BulkString(key);
-    return Status::OK();
-  }
-};
-
-class CommandReplConf : public Commander {
- public:
-  CommandReplConf() : Commander("replconf", -3, false) {}
-
-  Status Parse(const std::vector<std::string> &args) override {
-    if (args.size() % 2 == 0) {
-      return Status(Status::RedisParseErr, "wrong number of arguments");
-    }
-    if (args.size() >= 3) {
-      Status s = ParseParam(Util::ToLower(args[1]), args_[2]);
-      if (!s.IsOK()) {
-        return s;
-      }
-    }
-    if (args.size() >= 5) {
-      Status s = ParseParam(Util::ToLower(args[3]), args_[4]);
-      if (!s.IsOK()) {
-        return s;
-      }
-    }
-    return Commander::Parse(args);
-  }
-
-  Status ParseParam(const std::string &option, const std::string &value) {
-    if (option == "listening-port") {
-      try {
-        auto p = std::stoul(value);
-        if (p > UINT32_MAX) {
-          throw std::overflow_error("listening-port out of range");
-        }
-        port_ = static_cast<uint32_t>(p);
-      } catch (const std::exception &e) {
-        return Status(Status::RedisParseErr, "listening-port should be number");
-      }
-    } else {
-      return Status(Status::RedisParseErr, "unknown option");
-    }
-    return Status::OK();
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    if (port_ != 0) {
-      conn->SetListeningPort(port_);
-    }
-    *output = Redis::SimpleString("OK");
-    return Status::OK();
-  }
-
- private:
-  uint32_t port_ = 0;
-};
-
-class CommandFetchMeta : public Commander {
- public:
-  CommandFetchMeta() : Commander("_fetch_meta", 1, false) {}
-
-  Status Parse(const std::vector<std::string> &args) override {
-    return Status::OK();
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    uint64_t file_size;
-    rocksdb::BackupID meta_id;
-    int fd;
-    auto s = Engine::Storage::BackupManager::OpenLatestMeta(
-        svr->storage_, &fd, &meta_id, &file_size);
-    if (!s.IsOK()) {
-      LOG(ERROR) << "Failed to open latest meta, err: " << s.Msg();
-      return Status(Status::DBBackupFileErr, "can't create db backup");
-    }
-    // Send the meta ID
-    conn->Reply(std::to_string(meta_id) + CRLF);
-    // Send meta file size
-    conn->Reply(std::to_string(file_size) + CRLF);
-    // Send meta content
-    conn->SendFile(fd);
-    svr->stats_.IncrFullSyncCounter();
-    return Status::OK();
-  }
-};
-
-class CommandFetchFile : public Commander {
- public:
-  CommandFetchFile() : Commander("_fetch_file", 2, false) {}
-
-  Status Parse(const std::vector<std::string> &args) override {
-    path_ = args[1];
-    return Status::OK();
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    uint64_t file_size = 0;
-    auto fd = Engine::Storage::BackupManager::OpenDataFile(svr->storage_, path_,
-                                                           &file_size);
-    if (fd < 0) return Status(Status::DBBackupFileErr);
-    conn->Reply(std::to_string(file_size) + CRLF);
-    conn->SendFile(fd);
-    return Status::OK();
-  }
-
- private:
-  std::string path_;
-};
-
-class CommandDBName : public Commander {
- public:
-  CommandDBName() : Commander("_db_name", 1, false) {}
-
-  Status Parse(const std::vector<std::string> &args) override {
-    return Status::OK();
-  }
-
-  Status Execute(Server *svr, Connection *conn, std::string *output) override {
-    conn->Reply(svr->storage_->GetName() + CRLF);
-    return Status::OK();
-  }
-};
-
-using CommanderFactory = std::function<std::unique_ptr<Commander>()>;
-std::map<std::string, CommanderFactory> command_table = {
-    {"auth",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandAuth);
-     }},
-    {"ping",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandPing);
-     }},
-    {"select",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSelect);
-     }},
-    {"info",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandInfo);
-     }},
-    {"config",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandConfig);
-     }},
-    {"namespace",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandNamespace);
-     }},
-    {"keys",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandKeys);
-     }},
-    {"flushdb",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandFlushDB);
-     }},
-    {"dbsize",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandDBSize);
-     }},
-    {"slowlog",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSlowlog);
-     }},
-    {"perflog",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandPerfLog);
-     }},
-    {"client",
-     []()->std::unique_ptr<Commander> {
-        return std::unique_ptr<Commander>(new CommandClient);
-    }},
-    {"monitor",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandMonitor);
-     }},
-    {"shutdown",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandShutdown);
-     }},
-    {"quit",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandQuit);
-     }},
-    {"scan",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandScan);
-     }},
-    {"randomkey",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandRandomKey);
-     }},
-    // key command
-    {"ttl",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandTTL);
-     }},
-    {"pttl",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandPTTL);
-     }},
-    {"type",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandType);
-     }},
-    {"object",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandObject);
-     }},
-    {"exists",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandExists);
-     }},
-    {"persist",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandPersist);
-     }},
-    {"expire",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandExpire);
-     }},
-    {"pexpire",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandPExpire);
-     }},
-    {"expireat",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandExpireAt);
-     }},
-    {"pexpireat",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandPExpireAt);
-     }},
-    {"del",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandDel);
-     }},
-    // string command
-    {"get",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandGet);
-     }},
-    {"strlen",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandStrlen);
-     }},
-    {"getset",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandGetSet);
-     }},
-    {"getrange",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandGetRange);
-     }},
-    {"setrange",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSetRange);
-     }},
-    {"mget",
-          []() -> std::unique_ptr<Commander> {
-            return std::unique_ptr<Commander>(new CommandMGet);
-     }},
-    {"append",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandAppend);
-     }},
-    {"set",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSet);
-     }},
-    {"setex",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSetEX);
-     }},
-    {"setnx",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSetNX);
-     }},
-    {"mset",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandMSet);
-     }},
-    {"incrby",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandIncrBy);
-     }},
-    {"incrbyfloat",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandIncrByFloat);
-     }},
-    {"incr",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandIncr);
-     }},
-    {"decrby",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandDecrBy);
-     }},
-    {"decr",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandDecr);
-     }},
-    // bit command
-    {"getbit",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandGetBit);
-     }},
-     {"setbit",
-      []() -> std::unique_ptr<Commander> {
-        return std::unique_ptr<Commander>(new CommandSetBit);
-     }},
-    {"bitcount",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandBitCount);
-     }},
-    {"bitpos",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandBitPos);
-     }},
-    // hash command
-    {"hget",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHGet);
-     }},
-    {"hincrby",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHIncrBy);
-     }},
-    {"hincrbyfloat",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHIncrByFloat);
-     }},
-    {"hset",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHSet);
-     }},
-    {"hsetnx",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHSetNX);
-     }},
-    {"hdel",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHDel);
-     }},
-    {"hstrlen",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHStrlen);
-     }},
-    {"hexists",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHExists);
-     }},
-    {"hlen",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHLen);
-     }},
-    {"hmget",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHMGet);
-     }},
-    {"hmset",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHMSet);
-     }},
-    {"hkeys",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHKeys);
-     }},
-    {"hvals",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHVals);
-     }},
-    {"hgetall",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHGetAll);
-     }},
-    {"hscan",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandHScan);
-     }},
-    // list command
-    {"lpush",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandLPush);
-     }},
-    {"rpush",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandRPush);
-     }},
-    {"lpushx",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandLPushX);
-     }},
-    {"rpushx",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandRPushX);
-     }},
-    {"lpop",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandLPop);
-     }},
-    {"rpop",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandRPop);
-     }},
-    {"blpop",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandBLPop);
-     }},
-    {"brpop",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandBRPop);
-     }},
-    {"lrem",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandLRem);
-     }},
-    {"linsert",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandLInsert);
-     }},
-    {"lrange",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandLRange);
-     }},
-    {"lindex",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandLIndex);
-     }},
-    {"ltrim",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandLTrim);
-     }},
-    {"llen",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandLLen);
-     }},
-    {"lset",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandLSet);
-     }},
-    {"rpoplpush",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandRPopLPUSH);
-     }},
-    // set command
-    {"sadd",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSAdd);
-     }},
-    {"srem",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSRem);
-     }},
-    {"scard",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSCard);
-     }},
-    {"smembers",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSMembers);
-     }},
-    {"sismember",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSIsMember);
-     }},
-    {"spop",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSPop);
-     }},
-    {"srandmember",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSRandMember);
-     }},
-    {"smove",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSMove);
-     }},
-    {"sdiff",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSDiff);
-     }},
-    {"sunion",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSUnion);
-     }},
-    {"sinter",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSInter);
-     }},
-    {"sdiffstore",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSDiffStore);
-     }},
-    {"sunionstore",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSUnionStore);
-     }},
-    {"sinterstore",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSInterStore);
-     }},
-    {"sscan",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSScan);
-     }},
-    // zset command
-    {"zadd",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZAdd);
-     }},
-    {"zcard",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZCard);
-     }},
-    {"zcount",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZCount);
-     }},
-    {"zincrby",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZIncrBy);
-     }},
-    {"zinterstore",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZInterStore);
-     }},
-    {"zlexcount",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZLexCount);
-     }},
-    {"zpopmax",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZPopMax);
-     }},
-    {"zpopmin",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZPopMin);
-     }},
-    {"zrange",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZRange);
-     }},
-    {"zrevrange",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZRevRange);
-     }},
-    {"zrangebylex",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZRangeByLex);
-     }},
-    {"zrangebyscore",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZRangeByScore);
-     }},
-    {"zrank",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZRank);
-     }},
-    {"zrem",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZRem);
-     }},
-    {"zremrangebyrank",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZRemRangeByRank);
-     }},
-    {"zremrangebyscore",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZRemRangeByScore);
-     }},
-    {"zremrangebylex",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZRemRangeByLex);
-     }},
-    {"zrevrangebyscore",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZRevRangeByScore);
-     }},
-    {"zrevrank",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZRevRank);
-     }},
-    {"zscore",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZScore);
-     }},
-    {"zscan",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZScan);
-     }},
-    {"zunionstore",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandZUnionStore);
-     }},
-    // pub/sub command
-    {"publish",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandPublish);
-     }},
-    {"subscribe",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSubscribe);
-     }},
-    {"unsubscribe",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandUnSubscribe);
-     }},
-    {"psubscribe",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandPSubscribe);
-     }},
-    {"punsubscribe",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandPUnSubscribe);
-     }},
-    {"pubsub",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandPubSub);
-     }},
-
-    // internal management cmd
-    {"compact",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandCompact);
-     }},
-    {"bgsave",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandBGSave);
-     }},
-    {"slaveof",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandSlaveOf);
-     }},
-    {"stats",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandStats);
-     }},
-};
-
-// Replication related commands, which are received by workers listening on
-// `repl-port`
-std::map<std::string, CommanderFactory> repl_command_table = {
-    {"auth",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandAuth);
-     }},
-    {"replconf",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandReplConf);
-     }},
-    {"psync",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandPSync);
-     }},
-    {"_fetch_meta",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandFetchMeta);
-     }},
-    {"_fetch_file",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandFetchFile);
-     }},
-    {"_db_name",
-     []() -> std::unique_ptr<Commander> {
-       return std::unique_ptr<Commander>(new CommandDBName);
-     }},
-};
-
-Status LookupCommand(const std::string &cmd_name,
-                     std::unique_ptr<Commander> *cmd, bool is_repl) {
-  if (cmd_name.empty()) return Status(Status::RedisUnknownCmd);
-  if (is_repl) {
-    auto cmd_factory = repl_command_table.find(Util::ToLower(cmd_name));
-    if (cmd_factory == repl_command_table.end()) {
-      return Status(Status::RedisUnknownCmd);
-    }
-    *cmd = cmd_factory->second();
-  } else {
-    auto cmd_factory = command_table.find(Util::ToLower(cmd_name));
-    if (cmd_factory == command_table.end()) {
-      return Status(Status::RedisUnknownCmd);
-    }
-    *cmd = cmd_factory->second();
-  }
-  return Status::OK();
-}
-
-bool IsCommandExists(const std::string &cmd) {
-  return command_table.find(cmd) != command_table.end();
-}
-
-void GetCommandList(std::vector<std::string> *cmds) {
-  cmds->clear();
-  for (const auto &cmd : command_table) {
-    cmds->emplace_back(cmd.first);
-  }
-  for (const auto &cmd : repl_command_table) {
-    cmds->emplace_back(cmd.first);
-  }
-}
-}  // namespace Redis
diff --git a/src/redis_cmd.h b/src/redis_cmd.h
deleted file mode 100644
index 397f7ab..0000000
--- a/src/redis_cmd.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#pragma once
-
-#include <event2/bufferevent.h>
-#include <event2/event.h>
-#include <glog/logging.h>
-#include <rocksdb/types.h>
-#include <rocksdb/utilities/backupable_db.h>
-
-#include <list>
-#include <map>
-#include <string>
-#include <vector>
-#include <thread>
-#include <utility>
-#include <memory>
-
-
-#include "redis_reply.h"
-#include "status.h"
-
-class Server;
-namespace Redis {
-
-class Connection;
-
-class Commander {
- public:
-  // @name: cmd name
-  // @sidecar: whether cmd will be executed in sidecar thread, eg. psync.
-  explicit Commander(std::string name, int arity, bool is_write = false)
-      : name_(std::move(name)), arity_(arity), is_write_(is_write) {}
-  std::string Name() { return name_; }
-  int GetArity() { return arity_; }
-  bool IsWrite() { return is_write_; }
-
-  void SetArgs(const std::vector<std::string> &args) { args_ = args; }
-  const std::vector<std::string>* Args() {
-    return &args_;
-  }
-  virtual Status Parse(const std::vector<std::string> &args) {
-    return Status::OK();
-  }
-  virtual Status Execute(Server *svr, Connection *conn, std::string *output) {
-    return Status(Status::RedisExecErr, "not implemented");
-  }
-
-  virtual ~Commander() = default;
-
- protected:
-  std::vector<std::string> args_;
-  std::string name_;
-  int arity_;
-  bool is_write_;
-};
-
-bool IsCommandExists(const std::string &cmd);
-void GetCommandList(std::vector<std::string> *cmds);
-Status LookupCommand(const std::string &cmd_name,
-                     std::unique_ptr<Commander> *cmd, bool is_repl);
-}  // namespace Redis
diff --git a/src/redis_connection.cc b/src/redis_connection.cc
deleted file mode 100644
index 0a4c78b..0000000
--- a/src/redis_connection.cc
+++ /dev/null
@@ -1,208 +0,0 @@
-#include "redis_connection.h"
-
-#include <glog/logging.h>
-#include "worker.h"
-#include "server.h"
-
-namespace Redis {
-
-Connection::Connection(bufferevent *bev, Worker *owner)
-    : bev_(bev), req_(owner->svr_), owner_(owner) {
-  time_t now;
-  time(&now);
-  create_time_ = now;
-  last_interaction_ = now;
-}
-
-Connection::~Connection() {
-  if (bev_) { bufferevent_free(bev_); }
-  // unscribe all channels and patterns if exists
-  UnSubscribeAll();
-  PUnSubscribeAll();
-}
-
-std::string Connection::ToString() {
-  std::ostringstream stream;
-  stream << "id=" << id_
-    << " addr=" << addr_
-    << " fd=" << bufferevent_getfd(bev_)
-    << " name=" << name_
-    << " age=" << GetAge()
-    << " idle=" << GetIdleTime()
-    << " flags=" << GetFlags()
-    << " namespace=" << ns_
-    << " qbuf=" << evbuffer_get_length(Input())
-    << " obuf=" << evbuffer_get_length(Output())
-    << " cmd=" << last_cmd_
-    << "\n";
-  return stream.str();
-}
-
-void Connection::Close() {
-  owner_->FreeConnection(this);
-}
-
-void Connection::Detach() {
-  owner_->DetachConnection(this);
-}
-
-void Connection::OnRead(struct bufferevent *bev, void *ctx) {
-  DLOG(INFO) << "[connection] on read: " << bufferevent_getfd(bev);
-  auto conn = static_cast<Connection *>(ctx);
-
-  conn->SetLastInteraction();
-  auto s = conn->req_.Tokenize(conn->Input());
-  if (!s.IsOK()) {
-    conn->EnableFlag(Redis::Connection::kCloseAfterReply);
-    conn->Reply(Redis::Error(s.Msg()));
-    return;
-  }
-  conn->req_.ExecuteCommands(conn);
-}
-
-void Connection::OnWrite(struct bufferevent *bev, void *ctx) {
-  auto conn = static_cast<Connection *>(ctx);
-  if (conn->IsFlagEnabled(kCloseAfterReply)) {
-    conn->Close();
-  }
-}
-
-void Connection::OnEvent(bufferevent *bev, int16_t events, void *ctx) {
-  auto conn = static_cast<Connection *>(ctx);
-  if (events & BEV_EVENT_ERROR) {
-    LOG(ERROR) << "[connection] Going to remove the client: " << conn->GetAddr()
-               << ", while encounter error: "
-               << evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR());
-    conn->Close();
-    return;
-  }
-  if (events & BEV_EVENT_EOF) {
-    DLOG(INFO) << "[connection] Going to remove the client: " << conn->GetAddr()
-               << ", while closed by client";
-    conn->Close();
-    return;
-  }
-  if (events & BEV_EVENT_TIMEOUT) {
-    DLOG(INFO) << "[connection] The client: " << conn->GetAddr()  << "] reached timeout";
-    bufferevent_enable(bev, EV_READ | EV_WRITE);
-  }
-}
-
-void Connection::Reply(const std::string &msg) {
-  owner_->svr_->stats_.IncrOutbondBytes(msg.size());
-  Redis::Reply(bufferevent_get_output(bev_), msg);
-}
-
-void Connection::SendFile(int fd) {
-  // NOTE: we don't need to close the fd, the libevent will do that
-  auto output = bufferevent_get_output(bev_);
-  evbuffer_add_file(output, fd, 0, -1);
-}
-
-void Connection::SetAddr(std::string ip, int port) {
-  ip_ = std::move(ip);
-  port_ = port;
-  addr_ = ip_ +":"+ std::to_string(port_);
-}
-
-uint64_t Connection::GetAge() {
-  time_t now;
-  time(&now);
-  return static_cast<uint64_t>(now-create_time_);
-}
-
-void Connection::SetLastInteraction() {
-  time(&last_interaction_);
-}
-
-uint64_t Connection::GetIdleTime() {
-  time_t now;
-  time(&now);
-  return static_cast<uint64_t>(now-last_interaction_);
-}
-
-std::string Connection::GetFlags() {
-  std::string flags;
-  if (owner_->IsRepl()) flags.append("R");
-  if (IsFlagEnabled(kSlave)) flags.append("S");
-  if (IsFlagEnabled(kCloseAfterReply)) flags.append("c");
-  if (IsFlagEnabled(kMonitor)) flags.append("M");
-  if (!subscribe_channels_.empty()) flags.append("P");
-  if (flags.empty()) flags = "N";
-  return flags;
-}
-
-void Connection::EnableFlag(Flag flag) {
-  flags_ |= flag;
-}
-
-bool Connection::IsFlagEnabled(Flag flag) {
-  return (flags_ & flag) > 0;
-}
-
-bool Connection::IsRepl() {
-  return owner_->IsRepl();
-}
-
-void Connection::SubscribeChannel(const std::string &channel) {
-  for (const auto &chan : subscribe_channels_) {
-    if (channel == chan) return;
-  }
-  subscribe_channels_.emplace_back(channel);
-  owner_->svr_->SubscribeChannel(channel, this);
-}
-
-void Connection::UnSubscribeChannel(const std::string &channel) {
-  auto iter = subscribe_channels_.begin();
-  for (; iter != subscribe_channels_.end(); iter++) {
-    if (*iter == channel) {
-      subscribe_channels_.erase(iter);
-      owner_->svr_->UnSubscribeChannel(channel, this);
-      return;
-    }
-  }
-}
-
-void Connection::UnSubscribeAll() {
-  if (subscribe_channels_.empty()) return;
-  for (const auto &chan : subscribe_channels_) {
-    owner_->svr_->UnSubscribeChannel(chan, this);
-  }
-  subscribe_channels_.clear();
-}
-
-int Connection::SubscriptionsCount() {
-  return static_cast<int>(subscribe_channels_.size());
-}
-
-void Connection::PSubscribeChannel(const std::string &pattern) {
-  for (const auto &p : subcribe_patterns_) {
-    if (pattern == p) return;
-  }
-  subcribe_patterns_.emplace_back(pattern);
-  owner_->svr_->PSubscribeChannel(pattern, this);
-}
-
-void Connection::PUnSubscribeChannel(const std::string &pattern) {
-  auto iter = subscribe_channels_.begin();
-  for (; iter != subscribe_channels_.end(); iter++) {
-    if (*iter == pattern) {
-      subscribe_channels_.erase(iter);
-      owner_->svr_->PUnSubscribeChannel(pattern, this);
-      return;
-    }
-  }
-}
-
-void Connection::PUnSubscribeAll() {
-  if (subcribe_patterns_.empty()) return;
-  for (const auto &pattern : subcribe_patterns_) {
-    owner_->svr_->PUnSubscribeChannel(pattern, this);
-  }
-  subcribe_patterns_.clear();
-}
-
-int Connection::PSubscriptionsCount() {
-  return static_cast<int>(subcribe_patterns_.size());
-}
-}  // namespace Redis
diff --git a/src/redis_connection.h b/src/redis_connection.h
deleted file mode 100644
index 64b2f66..0000000
--- a/src/redis_connection.h
+++ /dev/null
@@ -1,98 +0,0 @@
-#pragma once
-
-#include <event2/buffer.h>
-#include <vector>
-#include <string>
-#include <utility>
-#include <memory>
-
-#include "redis_cmd.h"
-#include "redis_request.h"
-
-class Worker;
-
-namespace Redis {
-class Connection {
- public:
-  enum Flag {
-    kSlave           = 1 << 4,
-    kMonitor         = 1 << 5,
-    kCloseAfterReply = 1 << 6,
-  };
-
-  explicit Connection(bufferevent *bev, Worker *owner);
-  ~Connection();
-
-  void Close();
-  void Detach();
-  static void OnRead(struct bufferevent *bev, void *ctx);
-  static void OnWrite(struct bufferevent *bev, void *ctx);
-  static void OnEvent(bufferevent *bev, int16_t events, void *ctx);
-  void Reply(const std::string &msg);
-  void SendFile(int fd);
-  std::string ToString();
-
-  void SubscribeChannel(const std::string &channel);
-  void UnSubscribeChannel(const std::string &channel);
-  void UnSubscribeAll();
-  int SubscriptionsCount();
-  void PSubscribeChannel(const std::string &pattern);
-  void PUnSubscribeChannel(const std::string &pattern);
-  void PUnSubscribeAll();
-  int PSubscriptionsCount();
-
-  uint64_t GetAge();
-  uint64_t GetIdleTime();
-  void SetLastInteraction();
-  std::string GetFlags();
-  void EnableFlag(Flag flag);
-  bool IsFlagEnabled(Flag flag);
-  bool IsRepl();
-
-  uint64_t GetID() { return id_; }
-  void SetID(uint64_t id) { id_ = id; }
-  std::string GetName() { return name_; }
-  void SetName(std::string name) { name_ = std::move(name); }
-  std::string GetAddr() { return addr_; }
-  void SetAddr(std::string ip, int port);
-  void SetLastCmd(std::string cmd) { last_cmd_ = std::move(cmd); }
-  std::string GetIP() { return ip_; }
-  int GetPort() { return port_; }
-  void SetListeningPort(int port) { listening_port_ = port; }
-  int GetListeningPort() { return listening_port_; }
-
-  bool IsAdmin() { return is_admin_; }
-  void BecomeAdmin() { is_admin_ = true; }
-  void BecomeUser() { is_admin_ = false; }
-  std::string GetNamespace() { return ns_; }
-  void SetNamespace(std::string ns) { ns_ = std::move(ns); }
-
-  Worker *Owner() { return owner_; }
-  int GetFD() { return bufferevent_getfd(bev_); }
-  evbuffer *Input() { return bufferevent_get_input(bev_); }
-  evbuffer *Output() { return bufferevent_get_output(bev_); }
-  bufferevent *GetBufferEvent() { return bev_; }
-
-  std::unique_ptr<Commander> current_cmd_;
-
- private:
-  uint64_t id_ = 0;
-  int flags_ = 0;
-  std::string ns_;
-  std::string name_;
-  std::string ip_;
-  int port_ = 0;
-  std::string addr_;
-  int listening_port_ = 0;
-  bool is_admin_ = false;
-  std::string last_cmd_;
-  time_t create_time_;
-  time_t last_interaction_;
-
-  bufferevent *bev_;
-  Request req_;
-  Worker *owner_;
-  std::vector<std::string> subscribe_channels_;
-  std::vector<std::string> subcribe_patterns_;
-};
-}  // namespace Redis
diff --git a/src/redis_db.cc b/src/redis_db.cc
deleted file mode 100644
index c87e209..0000000
--- a/src/redis_db.cc
+++ /dev/null
@@ -1,403 +0,0 @@
-
-#include <ctime>
-
-#include "redis_db.h"
-
-#include "server.h"
-#include "util.h"
-
-namespace Redis {
-
-Database::Database(Engine::Storage *storage, const std::string &ns) {
-  storage_ = storage;
-  metadata_cf_handle_ = storage->GetCFHandle("metadata");
-  db_ = storage->GetDB();
-  namespace_ = ns;
-}
-
-rocksdb::Status Database::GetMetadata(RedisType type, const Slice &ns_key, Metadata *metadata) {
-  std::string old_metadata;
-  metadata->Encode(&old_metadata);
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  std::string bytes;
-  rocksdb::Status s = db_->Get(read_options, metadata_cf_handle_, ns_key, &bytes);
-  if (!s.ok()) {
-    return rocksdb::Status::NotFound();
-  }
-  metadata->Decode(bytes);
-
-  if (metadata->Expired()) {
-    metadata->Decode(old_metadata);
-    return rocksdb::Status::NotFound("the key was Expired");
-  }
-  if (metadata->Type() != type && (metadata->size > 0 || metadata->Type() == kRedisString)) {
-    metadata->Decode(old_metadata);
-    return rocksdb::Status::InvalidArgument("WRONGTYPE Operation against a key holding the wrong kind of value");
-  }
-  if (metadata->size == 0) {
-    metadata->Decode(old_metadata);
-    return rocksdb::Status::NotFound("no elements");
-  }
-  return s;
-}
-
-rocksdb::Status Database::Expire(const Slice &user_key, int timestamp) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  std::string value;
-  Metadata metadata(kRedisNone);
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  rocksdb::Status s = db_->Get(rocksdb::ReadOptions(), metadata_cf_handle_, ns_key, &value);
-  if (!s.ok()) return s;
-  metadata.Decode(value);
-  if (metadata.Expired()) {
-    return rocksdb::Status::NotFound("the key was expired");
-  }
-  if (metadata.Type() != kRedisString && metadata.size == 0) {
-    return rocksdb::Status::NotFound("no elements");
-  }
-  if (metadata.expire == timestamp) return rocksdb::Status::OK();
-
-  char *buf = new char[value.size()];
-  memcpy(buf, value.data(), value.size());
-  // +1 to skip the flags
-  EncodeFixed32(buf + 1, (uint32_t) timestamp);
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisNone, {std::to_string(kRedisCmdExpire)});
-  batch.PutLogData(log_data.Encode());
-  batch.Put(metadata_cf_handle_, ns_key, Slice(buf, value.size()));
-  s = storage_->Write(rocksdb::WriteOptions(), &batch);
-  delete[]buf;
-  return s;
-}
-
-rocksdb::Status Database::Del(const Slice &user_key) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  std::string value;
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  rocksdb::Status s = db_->Get(rocksdb::ReadOptions(), metadata_cf_handle_, ns_key, &value);
-  if (!s.ok()) return s;
-  Metadata metadata(kRedisNone);
-  metadata.Decode(value);
-  if (metadata.Expired()) {
-    return rocksdb::Status::NotFound("the key was expired");
-  }
-  return db_->Delete(rocksdb::WriteOptions(), metadata_cf_handle_, ns_key);
-}
-
-rocksdb::Status Database::Exists(const std::vector<Slice> &keys, int *ret) {
-  *ret = 0;
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-
-  rocksdb::Status s;
-  std::string ns_key, value;
-  for (const auto &key : keys) {
-    AppendNamespacePrefix(key, &ns_key);
-    s = db_->Get(read_options, metadata_cf_handle_, ns_key, &value);
-    if (s.ok()) {
-      Metadata metadata(kRedisNone);
-      metadata.Decode(value);
-      if (!metadata.Expired()) *ret += 1;
-    }
-  }
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Database::TTL(const Slice &user_key, int *ttl) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  *ttl = -2;  // ttl is -2 when the key does not exist or expired
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  std::string value;
-  rocksdb::Status s = db_->Get(read_options, metadata_cf_handle_, ns_key, &value);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  Metadata metadata(kRedisNone);
-  metadata.Decode(value);
-  *ttl = metadata.TTL();
-  return rocksdb::Status::OK();
-}
-
-void Database::GetKeyNumStats(const std::string &prefix, KeyNumStats *stats) {
-  Keys(prefix, nullptr, stats);
-}
-
-void Database::Keys(std::string prefix, std::vector<std::string> *keys, KeyNumStats *stats) {
-  std::string ns_prefix, ns, user_key, value;
-  AppendNamespacePrefix(prefix, &ns_prefix);
-  prefix = ns_prefix;
-
-  uint64_t ttl_sum = 0;
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options, metadata_cf_handle_);
-  prefix.empty() ? iter->SeekToFirst() : iter->Seek(prefix);
-  for (; iter->Valid(); iter->Next()) {
-    if (!prefix.empty() && !iter->key().starts_with(prefix)) {
-      break;
-    }
-    Metadata metadata(kRedisNone);
-    value = iter->value().ToString();
-    metadata.Decode(value);
-    if (metadata.Expired()) {
-      if (stats) stats->n_expired++;
-      continue;
-    }
-    if (stats) {
-      int32_t ttl = metadata.TTL();
-      stats->n_key++;
-      if (ttl != -1) {
-        stats->n_expires++;
-        if (ttl > 0) ttl_sum += ttl;
-      }
-    }
-    if (keys) {
-      ExtractNamespaceKey(iter->key(), &ns, &user_key);
-      keys->emplace_back(user_key);
-    }
-  }
-  if (stats && stats->n_expires > 0) {
-    stats->avg_ttl = ttl_sum / stats->n_expires;
-  }
-  delete iter;
-}
-
-rocksdb::Status Database::Scan(const std::string &cursor,
-                         uint64_t limit,
-                         const std::string &prefix,
-                         std::vector<std::string> *keys) {
-  uint64_t cnt = 0;
-  std::string ns_prefix, ns_cursor, ns, user_key, value;
-  AppendNamespacePrefix(prefix, &ns_prefix);
-  AppendNamespacePrefix(cursor, &ns_cursor);
-
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options, metadata_cf_handle_);
-  if (!cursor.empty()) {
-    iter->Seek(ns_cursor);
-    if (iter->Valid()) {
-      iter->Next();
-    }
-  } else if (ns_prefix.empty()) {
-    iter->SeekToFirst();
-  } else {
-    iter->Seek(ns_prefix);
-  }
-
-  for (; iter->Valid() && cnt < limit; iter->Next()) {
-    if (!ns_prefix.empty() && !iter->key().starts_with(ns_prefix)) {
-      break;
-    }
-    Metadata metadata(kRedisNone);
-    value = iter->value().ToString();
-    metadata.Decode(value);
-    if (metadata.Expired()) continue;
-    ExtractNamespaceKey(iter->key(), &ns, &user_key);
-    keys->emplace_back(user_key);
-    cnt++;
-  }
-  delete iter;
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Database::RandomKey(const std::string &cursor, std::string *key) {
-  key->clear();
-
-  std::vector<std::string> keys;
-  auto s = Scan(cursor, 60, "", &keys);
-  if (!s.ok()) {
-    return s;
-  }
-  if (keys.empty() && !cursor.empty()) {
-    // if reach the end, restart from begining
-    auto s = Scan("", 60, "", &keys);
-    if (!s.ok()) {
-      return s;
-    }
-  }
-  if (!keys.empty()) {
-    unsigned int seed = time(NULL);
-    *key = keys.at(rand_r(&seed) % keys.size());
-  }
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Database::FlushDB() {
-  std::string prefix;
-  AppendNamespacePrefix("", &prefix);
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options, metadata_cf_handle_);
-  for (iter->Seek(prefix);
-       iter->Valid() && iter->key().starts_with(prefix);
-       iter->Next()) {
-    db_->Delete(rocksdb::WriteOptions(), metadata_cf_handle_, iter->key());
-  }
-  delete iter;
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Database::Dump(const Slice &user_key, std::vector<std::string> *infos) {
-  infos->clear();
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  std::string value;
-  rocksdb::Status s = db_->Get(read_options, metadata_cf_handle_, ns_key, &value);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  Metadata metadata(kRedisNone);
-  metadata.Decode(value);
-
-  infos->emplace_back("namespace");
-  infos->emplace_back(namespace_);
-  infos->emplace_back("type");
-  infos->emplace_back(RedisTypeNames[metadata.Type()]);
-  infos->emplace_back("version");
-  infos->emplace_back(std::to_string(metadata.version));
-  infos->emplace_back("expire");
-  infos->emplace_back(std::to_string(metadata.expire));
-  infos->emplace_back("size");
-  infos->emplace_back(std::to_string(metadata.size));
-
-  infos->emplace_back("created_at");
-  struct timeval created_at = metadata.Time();
-  std::time_t tm = created_at.tv_sec;
-  char time_str[25];
-  if (!std::strftime(time_str, sizeof(time_str), "%Y-%m-%d %H:%M:%S", std::localtime(&tm))) {
-    return rocksdb::Status::TryAgain("Fail to format local time_str");
-  }
-  std::string created_at_str(time_str);
-  infos->emplace_back(created_at_str + "." + std::to_string(created_at.tv_usec));
-
-  if (metadata.Type() == kRedisList) {
-    ListMetadata metadata;
-    GetMetadata(kRedisList, ns_key, &metadata);
-    if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-    infos->emplace_back("head");
-    infos->emplace_back(std::to_string(metadata.head));
-    infos->emplace_back("tail");
-    infos->emplace_back(std::to_string(metadata.tail));
-  }
-
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Database::Type(const Slice &user_key, RedisType *type) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  *type = kRedisNone;
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  std::string value;
-  rocksdb::Status s = db_->Get(read_options, metadata_cf_handle_, ns_key, &value);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  Metadata metadata(kRedisNone);
-  metadata.Decode(value);
-  *type = metadata.Type();
-  return rocksdb::Status::OK();
-}
-
-void Database::AppendNamespacePrefix(const Slice &user_key, std::string *output) {
-  ComposeNamespaceKey(namespace_, user_key, output);
-}
-
-rocksdb::Status SubKeyScanner::Scan(RedisType type,
-                                    const Slice &user_key,
-                                    const std::string &cursor,
-                                    uint64_t limit,
-                                    const std::string &subkey_prefix,
-                                    std::vector<std::string> *keys) {
-  uint64_t cnt = 0;
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-  Metadata metadata(type);
-  rocksdb::Status s = GetMetadata(type, ns_key, &metadata);
-  if (!s.ok()) return s;
-
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options);
-  std::string match_prefix_key;
-  if (!subkey_prefix.empty()) {
-    InternalKey(ns_key, subkey_prefix, metadata.version).Encode(&match_prefix_key);
-  } else {
-    InternalKey(ns_key, "", metadata.version).Encode(&match_prefix_key);
-  }
-
-  std::string start_key;
-  if (!cursor.empty()) {
-    InternalKey(ns_key, cursor, metadata.version).Encode(&start_key);
-  } else {
-    start_key = match_prefix_key;
-  }
-  for (iter->Seek(start_key); iter->Valid() && cnt < limit; iter->Next()) {
-    if (!cursor.empty() && iter->key() == start_key) {
-      // if cursor is not empty, then we need to skip start_key
-      // because we already return that key in the last scan
-      continue;
-    }
-    if (!iter->key().starts_with(match_prefix_key)) {
-      break;
-    }
-    InternalKey ikey(iter->key());
-    keys->emplace_back(ikey.GetSubKey().ToString());
-    cnt++;
-  }
-  delete iter;
-  return rocksdb::Status::OK();
-}
-
-RedisType WriteBatchLogData::GetRedisType() {
-  return type_;
-}
-
-std::vector<std::string> *WriteBatchLogData::GetArguments() {
-  return &args_;
-}
-
-std::string WriteBatchLogData::Encode() {
-  std::string ret = std::to_string(type_);
-  for (size_t i = 0; i < args_.size(); i++) {
-    ret += " " + args_[i];
-  }
-  return ret;
-}
-
-Status WriteBatchLogData::Decode(const rocksdb::Slice &blob) {
-  std::string log_data = blob.ToString();
-  std::vector<std::string> args;
-  Util::Split(log_data, " ", &args);
-  type_ = static_cast<RedisType >(std::stoi(args[0]));
-  args_ = std::vector<std::string>(args.begin() + 1, args.end());
-
-  return Status::OK();
-}
-
-}  // namespace Redis
diff --git a/src/redis_db.h b/src/redis_db.h
deleted file mode 100644
index 7b6dc71..0000000
--- a/src/redis_db.h
+++ /dev/null
@@ -1,83 +0,0 @@
-
-#pragma once
-
-#include <string>
-#include <vector>
-#include <utility>
-
-#include "redis_metadata.h"
-#include "storage.h"
-
-namespace Redis {
-class Database {
- public:
-  explicit Database(Engine::Storage *storage, const std::string &ns = "");
-  rocksdb::Status GetMetadata(RedisType type, const Slice &ns_key, Metadata *metadata);
-  rocksdb::Status Expire(const Slice &user_key, int timestamp);
-  rocksdb::Status Del(const Slice &user_key);
-  rocksdb::Status Exists(const std::vector<Slice> &keys, int *ret);
-  rocksdb::Status TTL(const Slice &user_key, int *ttl);
-  rocksdb::Status Type(const Slice &user_key, RedisType *type);
-  rocksdb::Status Dump(const Slice &user_key, std::vector<std::string> *infos);
-  rocksdb::Status FlushDB();
-  void GetKeyNumStats(const std::string &prefix, KeyNumStats *stats);
-  void Keys(std::string prefix, std::vector<std::string> *keys = nullptr, KeyNumStats *stats = nullptr);
-  rocksdb::Status Scan(const std::string &cursor,
-                       uint64_t limit,
-                       const std::string &prefix,
-                       std::vector<std::string> *keys);
-  rocksdb::Status RandomKey(const std::string &cursor, std::string *key);
-  void AppendNamespacePrefix(const Slice &user_key, std::string *output);
-
- protected:
-  Engine::Storage *storage_;
-  rocksdb::DB *db_;
-  rocksdb::ColumnFamilyHandle *metadata_cf_handle_;
-  std::string namespace_;
-
-  class LatestSnapShot {
-   public:
-    explicit LatestSnapShot(rocksdb::DB *db) : db_(db) {
-      snapshot_ = db_->GetSnapshot();
-    }
-    ~LatestSnapShot() {
-      db_->ReleaseSnapshot(snapshot_);
-    }
-    const rocksdb::Snapshot *GetSnapShot() { return snapshot_; }
-   private:
-    rocksdb::DB *db_ = nullptr;
-    const rocksdb::Snapshot *snapshot_ = nullptr;
-  };
-};
-
-class SubKeyScanner : public Redis::Database {
- public:
-  explicit SubKeyScanner(Engine::Storage *storage, const std::string &ns)
-      : Database(storage, ns) {}
-  rocksdb::Status Scan(RedisType type,
-                       const Slice &user_key,
-                       const std::string &cursor,
-                       uint64_t limit,
-                       const std::string &subkey_prefix,
-                       std::vector<std::string> *keys);
-};
-
-class WriteBatchLogData {
- public:
-  WriteBatchLogData() = default;
-  explicit WriteBatchLogData(RedisType type) : type_(type) {}
-  explicit WriteBatchLogData(RedisType type, std::vector<std::string> &&args) :
-      type_(type), args_(std::move(args)) {}
-
-  RedisType GetRedisType();
-  std::vector<std::string> *GetArguments();
-  std::string Encode();
-  Status Decode(const rocksdb::Slice &blob);
-
- private:
-  RedisType type_ = kRedisNone;
-  std::vector<std::string> args_;
-};
-
-}  // namespace Redis
-
diff --git a/src/redis_hash.cc b/src/redis_hash.cc
deleted file mode 100644
index 8891ed7..0000000
--- a/src/redis_hash.cc
+++ /dev/null
@@ -1,286 +0,0 @@
-#include "redis_hash.h"
-#include <rocksdb/status.h>
-#include <limits>
-#include <iostream>
-
-namespace Redis {
-rocksdb::Status Hash::GetMetadata(const Slice &ns_key, HashMetadata *metadata) {
-  return Database::GetMetadata(kRedisHash, ns_key, metadata);
-}
-
-rocksdb::Status Hash::Size(const Slice &user_key, uint32_t *ret) {
-  *ret = 0;
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-  HashMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s;
-  *ret = metadata.size;
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Hash::Get(const Slice &user_key, const Slice &field, std::string *value) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-  HashMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s;
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  std::string sub_key;
-  InternalKey(ns_key, field, metadata.version).Encode(&sub_key);
-  return db_->Get(read_options, sub_key, value);
-}
-
-rocksdb::Status Hash::IncrBy(const Slice &user_key, const Slice &field, int64_t increment, int64_t *ret) {
-  bool exists = false;
-  int64_t old_value = 0;
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  HashMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok() && !s.IsNotFound()) return s;
-
-  std::string sub_key;
-  InternalKey(ns_key, field, metadata.version).Encode(&sub_key);
-  if (s.ok()) {
-    std::string value_bytes;
-    s = db_->Get(rocksdb::ReadOptions(), sub_key, &value_bytes);
-    if (!s.ok() && !s.IsNotFound()) return s;
-    if (s.ok()) {
-      try {
-        old_value = std::stoll(value_bytes);
-      } catch (std::exception &e) {
-        return rocksdb::Status::InvalidArgument(e.what());
-      }
-    }
-    exists = true;
-  }
-  if ((increment < 0 && old_value < 0 && increment < (LLONG_MIN-old_value))
-      || (increment > 0 && old_value > 0 && increment > (LLONG_MAX-old_value))) {
-    return rocksdb::Status::InvalidArgument("increment or decrement would overflow");
-  }
-
-  *ret = old_value + increment;
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisHash);
-  batch.PutLogData(log_data.Encode());
-  batch.Put(sub_key, std::to_string(*ret));
-  if (!exists) {
-    metadata.size += 1;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status Hash::IncrByFloat(const Slice &user_key, const Slice &field, float increment, float *ret) {
-  bool exists = false;
-  float old_value = 0;
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  HashMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok() && !s.IsNotFound()) return s;
-
-  std::string sub_key;
-  InternalKey(ns_key, field, metadata.version).Encode(&sub_key);
-  if (s.ok()) {
-    std::string value_bytes;
-    s = db_->Get(rocksdb::ReadOptions(), sub_key, &value_bytes);
-    if (!s.ok() && !s.IsNotFound()) return s;
-    if (s.ok()) {
-      try {
-        old_value = std::stof(value_bytes);
-      } catch (std::exception &e) {
-        return rocksdb::Status::InvalidArgument(e.what());
-      }
-    }
-    exists = true;
-  }
-  if ((increment < 0 && old_value < 0 && increment < (std::numeric_limits<float>::lowest()-old_value))
-      || (increment > 0 && old_value > 0 && increment > (std::numeric_limits<float>::max()-old_value))) {
-    return rocksdb::Status::InvalidArgument("increment or decrement would overflow");
-  }
-
-  *ret = old_value + increment;
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisHash);
-  batch.PutLogData(log_data.Encode());
-  batch.Put(sub_key, std::to_string(*ret));
-  if (!exists) {
-    metadata.size += 1;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status Hash::MGet(const Slice &user_key,
-                                const std::vector<Slice> &fields,
-                                std::vector<std::string> *values) {
-  values->clear();
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-  HashMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) {
-    return s;
-  }
-
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  std::string sub_key, value;
-  for (const auto &field : fields) {
-    InternalKey(ns_key, field, metadata.version).Encode(&sub_key);
-    value.clear();
-    auto s = db_->Get(read_options, sub_key, &value);
-    if (!s.ok() && !s.IsNotFound()) return s;
-    values->emplace_back(value);
-  }
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Hash::Set(const Slice &user_key, const Slice &field, const Slice &value, int *ret) {
-  FieldValue fv = {field.ToString(), value.ToString()};
-  std::vector<FieldValue> fvs;
-  fvs.push_back(fv);
-  return MSet(user_key, fvs, false, ret);
-}
-
-rocksdb::Status Hash::SetNX(const Slice &user_key, const Slice &field, Slice value, int *ret) {
-  FieldValue fv = {field.ToString(), value.ToString()};
-  std::vector<FieldValue> fvs;
-  fvs.push_back(fv);
-  return MSet(user_key, fvs, false, ret);
-}
-
-rocksdb::Status Hash::Delete(const Slice &user_key, const std::vector<Slice> &fields, int *ret) {
-  *ret = 0;
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  HashMetadata metadata;
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisHash);
-  batch.PutLogData(log_data.Encode());
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  std::string sub_key, value;
-  for (const auto &field : fields) {
-    InternalKey(ns_key, field, metadata.version).Encode(&sub_key);
-    s = db_->Get(rocksdb::ReadOptions(), sub_key, &value);
-    if (s.ok()) {
-      *ret += 1;
-      batch.Delete(sub_key);
-    }
-  }
-  // size was updated
-  if (*ret > 0) {
-    metadata.size -= *ret;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status Hash::MSet(const Slice &user_key, const std::vector<FieldValue> &field_values, bool nx, int *ret) {
-  *ret = 0;
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  HashMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok() && !s.IsNotFound()) return s;
-
-  int added = 0;
-  bool exists = false;
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisHash);
-  batch.PutLogData(log_data.Encode());
-  for (const auto &fv : field_values) {
-    std::string sub_key;
-    InternalKey(ns_key, fv.field, metadata.version).Encode(&sub_key);
-    if (metadata.size > 0) {
-      std::string fieldValue;
-      s = db_->Get(rocksdb::ReadOptions(), sub_key, &fieldValue);
-      if (!s.ok() && !s.IsNotFound()) return s;
-      if (s.ok()) {
-        if (((fieldValue == fv.value) || nx)) continue;
-        exists = true;
-      }
-    }
-    if (!exists) added++;
-    batch.Put(sub_key, fv.value);
-  }
-  if (added > 0) {
-    *ret = added;
-    metadata.size += added;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status Hash::GetAll(const Slice &user_key, std::vector<FieldValue> *field_values, HashFetchType type) {
-  field_values->clear();
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-  HashMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  LatestSnapShot ss(db_);
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options);
-  std::string prefix_key;
-  InternalKey(ns_key, "", metadata.version).Encode(&prefix_key);
-  for (iter->Seek(prefix_key);
-       iter->Valid() && iter->key().starts_with(prefix_key);
-       iter->Next()) {
-    FieldValue fv;
-    if (type == HashFetchType::kOnlyKey) {
-      InternalKey ikey(iter->key());
-      fv.field = ikey.GetSubKey().ToString();
-    } else if (type == HashFetchType::kOnlyValue) {
-      fv.value = iter->value().ToString();
-    } else {
-      InternalKey ikey(iter->key());
-      fv.field = ikey.GetSubKey().ToString();
-      fv.value = iter->value().ToString();
-    }
-    field_values->emplace_back(fv);
-  }
-  delete iter;
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Hash::Scan(const Slice &user_key,
-                                const std::string &cursor,
-                                uint64_t limit,
-                                const std::string &field_prefix,
-                                std::vector<std::string> *fields) {
-  return SubKeyScanner::Scan(kRedisHash, user_key, cursor, limit, field_prefix, fields);
-}
-
-}  // namespace Redis
diff --git a/src/redis_hash.h b/src/redis_hash.h
deleted file mode 100644
index eb23942..0000000
--- a/src/redis_hash.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#pragma once
-
-#include <rocksdb/status.h>
-#include <string>
-#include <vector>
-
-#include "redis_db.h"
-#include "encoding.h"
-#include "redis_metadata.h"
-
-typedef struct FieldValue {
-  std::string field;
-  std::string value;
-} FieldValue;
-
-enum class HashFetchType {
-  kAll = 0,
-  kOnlyKey = 1,
-  kOnlyValue = 2
-};
-
-namespace Redis {
-class Hash : public SubKeyScanner {
- public:
-  Hash(Engine::Storage *storage, const std::string &ns) : SubKeyScanner(storage, ns) {}
-  rocksdb::Status Size(const Slice &user_key, uint32_t *ret);
-  rocksdb::Status Get(const Slice &user_key, const Slice &field, std::string *value);
-  rocksdb::Status Set(const Slice &user_key, const Slice &field, const Slice &value, int *ret);
-  rocksdb::Status SetNX(const Slice &user_key, const Slice &field, Slice value, int *ret);
-  rocksdb::Status Delete(const Slice &user_key, const std::vector<Slice> &fields, int *ret);
-  rocksdb::Status IncrBy(const Slice &user_key, const Slice &field, int64_t increment, int64_t *ret);
-  rocksdb::Status IncrByFloat(const Slice &user_key, const Slice &field, float increment, float *ret);
-  rocksdb::Status MSet(const Slice &user_key, const std::vector<FieldValue> &field_values, bool nx, int *ret);
-  rocksdb::Status MGet(const Slice &user_key, const std::vector<Slice> &fields, std::vector<std::string> *values);
-  rocksdb::Status GetAll(const Slice &user_key,
-                         std::vector<FieldValue> *field_values,
-                         HashFetchType type = HashFetchType::kAll);
-  rocksdb::Status Scan(const Slice &user_key,
-                       const std::string &cursor,
-                       uint64_t limit,
-                       const std::string &field_prefix,
-                       std::vector<std::string> *fields);
- private:
-  rocksdb::Status GetMetadata(const Slice &ns_key, HashMetadata *metadata);
-};
-}  // namespace Redis
diff --git a/src/redis_list.cc b/src/redis_list.cc
deleted file mode 100644
index 3392cf1..0000000
--- a/src/redis_list.cc
+++ /dev/null
@@ -1,457 +0,0 @@
-#include "redis_list.h"
-
-#include "stdlib.h"
-namespace Redis {
-
-rocksdb::Status List::GetMetadata(const Slice &ns_key, ListMetadata *metadata) {
-  return Database::GetMetadata(kRedisList, ns_key, metadata);
-}
-
-rocksdb::Status List::Size(const Slice &user_key, uint32_t *ret) {
-  *ret = 0;
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-  ListMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-  *ret = metadata.size;
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status List::Push(const Slice &user_key, const std::vector<Slice> &elems, bool left, int *ret) {
-  return push(user_key, elems, true, left, ret);
-}
-
-rocksdb::Status List::PushX(const Slice &user_key, const std::vector<Slice> &elems, bool left, int *ret) {
-  return push(user_key, elems, false, left, ret);
-}
-
-rocksdb::Status List::push(const Slice &user_key,
-                           std::vector<Slice> elems,
-                           bool create_if_missing,
-                           bool left,
-                           int *ret) {
-  *ret = 0;
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  ListMetadata metadata;
-  rocksdb::WriteBatch batch;
-  RedisCommand cmd = left ? kRedisCmdLPush : kRedisCmdRPush;
-  WriteBatchLogData log_data(kRedisList, {std::to_string(cmd)});
-  batch.PutLogData(log_data.Encode());
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok() && !create_if_missing && s.IsNotFound()) {
-    return s.IsNotFound() ? rocksdb::Status::OK() : s;
-  }
-  uint64_t index = left ? metadata.head - 1 : metadata.tail;
-  for (const auto &elem : elems) {
-    std::string index_buf, sub_key;
-    PutFixed64(&index_buf, index);
-    InternalKey(ns_key, index_buf, metadata.version).Encode(&sub_key);
-    batch.Put(sub_key, elem);
-    left ? --index : ++index;
-  }
-  if (left) {
-    metadata.head -= elems.size();
-  } else {
-    metadata.tail += elems.size();
-  }
-  std::string bytes;
-  metadata.size += elems.size();
-  metadata.Encode(&bytes);
-  batch.Put(metadata_cf_handle_, ns_key, bytes);
-  *ret = metadata.size;
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status List::Pop(const Slice &user_key, std::string *elem, bool left) {
-  elem->clear();
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  ListMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s;
-
-  uint64_t index = left ? metadata.head : metadata.tail - 1;
-  std::string buf;
-  PutFixed64(&buf, index);
-  std::string sub_key;
-  InternalKey(ns_key, buf, metadata.version).Encode(&sub_key);
-  s = db_->Get(rocksdb::ReadOptions(), sub_key, elem);
-  if (!s.ok()) {
-    // FIXME: should be always exists??
-    return s;
-  }
-  rocksdb::WriteBatch batch;
-  RedisCommand cmd = left ? kRedisCmdLPop : kRedisCmdRPop;
-  WriteBatchLogData log_data(kRedisList, {std::to_string(cmd)});
-  batch.PutLogData(log_data.Encode());
-  batch.Delete(sub_key);
-  if (metadata.size == 1) {
-    batch.Delete(metadata_cf_handle_, ns_key);
-  } else {
-    std::string bytes;
-    metadata.size -= 1;
-    left ? ++metadata.head : --metadata.tail;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-/*
- * LRem would remove which value is equal to elem, and count limit the remove number and direction
- * Caution: The LRem timing complexity is O(N), don't use it on a long list
- * The simplified description of LRem Algothrim follows those steps:
- * 1. find out all the index of elems to delete
- * 2. determine to move the remain elems from the left or right by the length of moving elems
- * 3. move the remain elems with overlay
- * 4. trim and delete
- * For example: lrem list hello 0
- * when the list was like this:
- * | E1 | E2 | E3 | hello | E4 | E5 | hello | E6 |
- * the index of elems to delete is [3, 6], left part size is 6 and right part size is 4,
- * so move elems from right to left:
- * => | E1 | E2 | E3 | E4 | E4 | E5 | hello | E6 |
- * => | E1 | E2 | E3 | E4 | E5 | E5 | hello | E6 |
- * => | E1 | E2 | E3 | E4 | E5 | E6 | hello | E6 |
- * then trim the list from tail with num of elems to delete, here is 2.
- * and list would become: | E1 | E2 | E3 | E4 | E5 | E6 |
- */
-rocksdb::Status List::Rem(const Slice &user_key, int count, const Slice &elem, int *ret) {
-  *ret = 0;
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  ListMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s;
-
-  uint64_t index = count >= 0 ? metadata.head : metadata.tail - 1;
-  std::string buf, start_key, prefix;
-  PutFixed64(&buf, index);
-  InternalKey(ns_key, buf, metadata.version).Encode(&start_key);
-  InternalKey(ns_key, "", metadata.version).Encode(&prefix);
-  bool reversed = count < 0;
-  std::vector<uint64_t> to_delete_indexes;
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options);
-  for (iter->Seek(start_key);
-       iter->Valid() && iter->key().starts_with(prefix);
-       !reversed ? iter->Next() : iter->Prev()) {
-    if (iter->value() == elem) {
-      InternalKey ikey(iter->key());
-      Slice sub_key = ikey.GetSubKey();
-      GetFixed64(&sub_key, &index);
-      to_delete_indexes.emplace_back(index);
-      if (static_cast<int>(to_delete_indexes.size()) == abs(count)) break;
-    }
-  }
-  if (to_delete_indexes.empty()) {
-    delete iter;
-    return rocksdb::Status::NotFound();
-  }
-
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisList, {std::to_string(kRedisCmdLRem), std::to_string(count), elem.ToString()});
-  batch.PutLogData(log_data.Encode());
-
-  if (to_delete_indexes.size() == metadata.size) {
-    batch.Delete(metadata_cf_handle_, ns_key);
-  } else {
-    std::string to_update_key, to_delete_key;
-    uint64_t min_to_delete_index = !reversed ? to_delete_indexes[0] : to_delete_indexes[to_delete_indexes.size() - 1];
-    uint64_t max_to_delete_index = !reversed ? to_delete_indexes[to_delete_indexes.size() - 1] : to_delete_indexes[0];
-    uint64_t left_part_len = max_to_delete_index - metadata.head;
-    uint64_t right_part_len = metadata.tail - 1 - min_to_delete_index;
-    reversed = left_part_len <= right_part_len;
-    buf.clear();
-    PutFixed64(&buf, reversed ? max_to_delete_index : min_to_delete_index);
-    InternalKey(ns_key, buf, metadata.version).Encode(&start_key);
-    for (iter->Seek(start_key);
-         iter->Valid() && iter->key().starts_with(prefix);
-         !reversed ? iter->Next() : iter->Prev()) {
-      if (iter->value() != elem) {
-        buf.clear();
-        PutFixed64(&buf, reversed ? max_to_delete_index-- : min_to_delete_index++);
-        InternalKey(ns_key, buf, metadata.version).Encode(&to_update_key);
-        batch.Put(to_update_key, iter->value());
-      }
-    }
-
-    for (uint64_t idx = 0; idx < to_delete_indexes.size(); ++idx) {
-      buf.clear();
-      PutFixed64(&buf, reversed ? (metadata.head + idx) : (metadata.tail - 1 - idx));
-      InternalKey(ns_key, buf, metadata.version).Encode(&to_delete_key);
-      batch.Delete(to_delete_key);
-    }
-    if (reversed) {
-      metadata.head += to_delete_indexes.size();
-    } else {
-      metadata.tail -= to_delete_indexes.size();
-    }
-    metadata.size -= to_delete_indexes.size();
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-
-  delete iter;
-  *ret = static_cast<int>(to_delete_indexes.size());
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status List::Insert(const Slice &user_key, const Slice &pivot, const Slice &elem, bool before, int *ret) {
-  *ret = 0;
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  ListMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s;
-
-  std::string buf, start_key, prefix;
-  uint64_t pivot_index = metadata.head - 1, new_elem_index;
-  PutFixed64(&buf, metadata.head);
-  InternalKey(ns_key, buf, metadata.version).Encode(&start_key);
-  InternalKey(ns_key, "", metadata.version).Encode(&prefix);
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options);
-  for (iter->Seek(start_key);
-       iter->Valid() && iter->key().starts_with(prefix);
-       iter->Next()) {
-    if (iter->value() == pivot) {
-      InternalKey ikey(iter->key());
-      Slice sub_key = ikey.GetSubKey();
-      GetFixed64(&sub_key, &pivot_index);
-      break;
-    }
-  }
-  if (pivot_index == (metadata.head - 1)) {
-    delete iter;
-    return rocksdb::Status::NotFound();
-  }
-
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisList,
-                             {std::to_string(kRedisCmdLInsert),
-                              before ? "1" : "0",
-                              pivot.ToString(),
-                              elem.ToString()});
-  batch.PutLogData(log_data.Encode());
-
-  std::string to_update_key;
-  uint64_t left_part_len = pivot_index - metadata.head + (before ? 0 : 1);
-  uint64_t right_part_len = metadata.tail - 1 - pivot_index + (before ? 1 : 0);
-  bool reversed = left_part_len <= right_part_len;
-  if ((reversed && !before) || (!reversed && before)) {
-    new_elem_index = pivot_index;
-  } else {
-    new_elem_index = reversed ? --pivot_index : ++pivot_index;
-    !reversed ? iter->Next() : iter->Prev();
-  }
-  for (;
-      iter->Valid() && iter->key().starts_with(prefix);
-      !reversed ? iter->Next() : iter->Prev()) {
-    buf.clear();
-    PutFixed64(&buf, reversed ? --pivot_index : ++pivot_index);
-    InternalKey(ns_key, buf, metadata.version).Encode(&to_update_key);
-    batch.Put(to_update_key, iter->value());
-  }
-  buf.clear();
-  PutFixed64(&buf, new_elem_index);
-  InternalKey(ns_key, buf, metadata.version).Encode(&to_update_key);
-  batch.Put(to_update_key, elem);
-
-  if (reversed) {
-    metadata.head--;
-  } else {
-    metadata.tail++;
-  }
-  metadata.size++;
-  std::string bytes;
-  metadata.Encode(&bytes);
-  batch.Put(metadata_cf_handle_, ns_key, bytes);
-
-  delete iter;
-  *ret = metadata.size;
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status List::Index(const Slice &user_key, int index, std::string *elem) {
-  elem->clear();
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-  ListMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s;
-
-  if (index < 0) index += metadata.size;
-  if (index < 0 || index >= static_cast<int>(metadata.size)) return rocksdb::Status::OK();
-
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  std::string buf;
-  PutFixed64(&buf, metadata.head + index);
-  std::string sub_key;
-  InternalKey(ns_key, buf, metadata.version).Encode(&sub_key);
-  return db_->Get(read_options, sub_key, elem);
-}
-
-// The offset can also be negative, -1 is the last element, -2 the penultimate
-// Out of range indexes will not produce an error.
-// If start is larger than the end of the list, an empty list is returned.
-// If stop is larger than the actual end of the list,
-// Redis will treat it like the last element of the list.
-rocksdb::Status List::Range(const Slice &user_key, int start, int stop, std::vector<std::string> *elems) {
-  elems->clear();
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-  ListMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  if (start < 0) start = static_cast<int>(metadata.size) + start;
-  if (stop < 0) stop = static_cast<int>(metadata.size) + stop;
-  if (start > static_cast<int>(metadata.size) || stop < 0 || start > stop) return rocksdb::Status::OK();
-  if (start < 0) start = 0;
-
-  std::string buf;
-  PutFixed64(&buf, metadata.head + start);
-  std::string start_key, prefix;
-  InternalKey(ns_key, buf, metadata.version).Encode(&start_key);
-  InternalKey(ns_key, "", metadata.version).Encode(&prefix);
-
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options);
-  for (iter->Seek(start_key);
-       iter->Valid() && iter->key().starts_with(prefix);
-       iter->Next()) {
-    InternalKey ikey(iter->key());
-    Slice sub_key = ikey.GetSubKey();
-    uint64_t index;
-    GetFixed64(&sub_key, &index);
-    // index should be always >= start
-    if (index > metadata.head + stop) break;
-    elems->push_back(iter->value().ToString());
-  }
-  delete iter;
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status List::Set(const Slice &user_key, int index, Slice elem) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  ListMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s;
-  if (index < 0) index = metadata.size + index;
-  if (index < 0 || index >= static_cast<int>(metadata.size)) {
-    return rocksdb::Status::InvalidArgument("index out of range");
-  }
-
-  std::string buf, value, sub_key;
-  PutFixed64(&buf, metadata.head + index);
-  InternalKey(ns_key, buf, metadata.version).Encode(&sub_key);
-  s = db_->Get(rocksdb::ReadOptions(), sub_key, &value);
-  if (!s.ok()) {
-    return s;
-  }
-  if (value == elem) return rocksdb::Status::OK();
-
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData
-      log_data(kRedisList, {std::to_string(kRedisCmdLSet), std::to_string(index)});
-  batch.PutLogData(log_data.Encode());
-  batch.Put(sub_key, elem);
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status List::RPopLPush(const Slice &src, const Slice &dst, std::string *elem) {
-  rocksdb::Status s = Pop(src, elem, false);
-  if (!s.ok()) return s;
-
-  int ret;
-  std::vector<Slice> elems;
-  elems.emplace_back(*elem);
-  s = Push(dst, elems, true, &ret);
-  return s;
-}
-
-// Caution: trim the big list may block the server
-rocksdb::Status List::Trim(const Slice &user_key, int start, int stop) {
-  uint32_t trim_cnt = 0;
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  ListMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  if (start < 0) start = metadata.size + start;
-  if (stop < 0) stop = static_cast<int>(metadata.size) > -1 * stop ? metadata.size + stop : metadata.size;
-  // the result will be empty list when start > stop,
-  // or start is larger than the end of list
-  if (start > stop) {
-    return db_->Delete(rocksdb::WriteOptions(), metadata_cf_handle_, ns_key);
-  }
-  if (start < 0) start = 0;
-
-  std::string buf;
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisList,
-                             std::vector<std::string>{std::to_string(kRedisCmdLTrim), std::to_string(start),
-                                                      std::to_string(stop)});
-  batch.PutLogData(log_data.Encode());
-  uint64_t left_index = metadata.head + start;
-  for (uint64_t i = metadata.head; i < left_index; i++) {
-    PutFixed64(&buf, i);
-    std::string sub_key;
-    InternalKey(ns_key, buf, metadata.version).Encode(&sub_key);
-    batch.Delete(sub_key);
-    metadata.head++;
-    trim_cnt++;
-  }
-  uint64_t right_index = metadata.head + stop + 1;
-  for (uint64_t i = right_index; i < metadata.tail; i++) {
-    std::string sub_key;
-    InternalKey(ns_key, buf, metadata.version).Encode(&sub_key);
-    batch.Delete(sub_key);
-    metadata.tail--;
-    trim_cnt++;
-  }
-  if (metadata.size >= trim_cnt) {
-    metadata.size -= trim_cnt;
-  } else {
-    metadata.size = 0;
-  }
-  std::string bytes;
-  metadata.Encode(&bytes);
-  batch.Put(metadata_cf_handle_, ns_key, bytes);
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-}  // namespace Redis
diff --git a/src/redis_list.h b/src/redis_list.h
deleted file mode 100644
index 513dd85..0000000
--- a/src/redis_list.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#pragma once
-
-#include <stdint.h>
-#include <vector>
-#include <string>
-
-#include "redis_db.h"
-#include "redis_metadata.h"
-#include "encoding.h"
-
-namespace Redis {
-class List : public Database {
- public:
-  explicit List(Engine::Storage *storage, const std::string &ns) : Database(storage, ns) {}
-  rocksdb::Status Size(const Slice &user_key, uint32_t *ret);
-  rocksdb::Status Trim(const Slice &user_key, int start, int stop);
-  rocksdb::Status Set(const Slice &user_key, int index, Slice elem);
-  rocksdb::Status Insert(const Slice &user_key, const Slice &pivot, const Slice &elem, bool before, int *ret);
-  rocksdb::Status Pop(const Slice &user_key, std::string *elem, bool left);
-  rocksdb::Status Rem(const Slice &user_key, int count, const Slice &elem, int *ret);
-  rocksdb::Status Index(const Slice &user_key, int index, std::string *elem);
-  rocksdb::Status RPopLPush(const Slice &src, const Slice &dst, std::string *elem);
-  rocksdb::Status Push(const Slice &user_key, const std::vector<Slice> &elems, bool left, int *ret);
-  rocksdb::Status PushX(const Slice &user_key, const std::vector<Slice> &elems, bool left, int *ret);
-  rocksdb::Status Range(const Slice &user_key, int start, int stop, std::vector<std::string> *elems);
-
- private:
-  rocksdb::Status GetMetadata(const Slice &ns_key, ListMetadata *metadata);
-  rocksdb::Status push(const Slice &user_key, std::vector<Slice> elems, bool create_if_missing, bool left, int *ret);
-};
-}  // namespace Redis
diff --git a/src/redis_metadata.cc b/src/redis_metadata.cc
deleted file mode 100644
index 89f8f80..0000000
--- a/src/redis_metadata.cc
+++ /dev/null
@@ -1,215 +0,0 @@
-#include "redis_metadata.h"
-#include <time.h>
-#include <stdlib.h>
-#include <sys/time.h>
-#include <rocksdb/env.h>
-
-#include <vector>
-#include <cstdlib>
-#include <atomic>
-
-#include "util.h"
-
-InternalKey::InternalKey(Slice input) {
-  uint32_t key_size;
-  uint8_t namespace_size;
-  GetFixed8(&input, &namespace_size);
-  namespace_ = Slice(input.data(), namespace_size);
-  input.remove_prefix(namespace_size);
-  GetFixed32(&input, &key_size);
-  key_ = Slice(input.data(), key_size);
-  input.remove_prefix(key_size);
-  GetFixed64(&input, &version_);
-  sub_key_ = Slice(input.data(), input.size());
-  buf_ = nullptr;
-  memset(prealloc_, '\0', sizeof(prealloc_));
-}
-
-InternalKey::InternalKey(Slice ns_key, Slice sub_key, uint64_t version) {
-  uint8_t namespace_size;
-  GetFixed8(&ns_key, &namespace_size);
-  namespace_ = Slice(ns_key.data(), namespace_size);
-  ns_key.remove_prefix(namespace_size);
-  key_ = ns_key;
-  sub_key_ = sub_key;
-  version_ = version;
-  buf_ = nullptr;
-  memset(prealloc_, '\0', sizeof(prealloc_));
-}
-
-InternalKey::~InternalKey() {
-  if (buf_ != nullptr && buf_ != prealloc_) delete []buf_;
-}
-
-Slice InternalKey::GetNamespace() const {
-  return namespace_;
-}
-
-Slice InternalKey::GetKey() const {
-  return key_;
-}
-
-Slice InternalKey::GetSubKey() const {
-  return sub_key_;
-}
-
-uint64_t InternalKey::GetVersion() const {
-  return version_;
-}
-
-void InternalKey::Encode(std::string *out) {
-  out->clear();
-  size_t pos = 0;
-  size_t total = 1+namespace_.size()+4+key_.size()+8+sub_key_.size();
-  if (total < sizeof(prealloc_)) {
-    buf_ = prealloc_;
-  } else {
-    buf_ = new char[total];
-  }
-  EncodeFixed8(buf_+pos, static_cast<uint8_t>(namespace_.size()));
-  pos += 1;
-  memcpy(buf_+pos, namespace_.data(), namespace_.size());
-  pos += namespace_.size();
-  EncodeFixed32(buf_+pos, static_cast<uint32_t>(key_.size()));
-  pos += 4;
-  memcpy(buf_+pos, key_.data(), key_.size());
-  pos += key_.size();
-  EncodeFixed64(buf_+pos, version_);
-  pos += 8;
-  memcpy(buf_+pos, sub_key_.data(), sub_key_.size());
-  pos += sub_key_.size();
-  out->assign(buf_, pos);
-}
-
-bool InternalKey::operator==(const InternalKey &that) const {
-  if (key_ != that.key_) return false;
-  if (sub_key_ != that.sub_key_) return false;
-  return version_ == that.version_;
-}
-
-void ExtractNamespaceKey(Slice ns_key, std::string *ns, std::string *key) {
-  uint8_t namespace_size;
-  GetFixed8(&ns_key, &namespace_size);
-  *ns = ns_key.ToString().substr(0, namespace_size);
-  ns_key.remove_prefix(namespace_size);
-  *key = ns_key.ToString();
-}
-
-void ComposeNamespaceKey(const Slice& ns, const Slice& key, std::string *ns_key) {
-  ns_key->clear();
-  PutFixed8(ns_key, static_cast<uint8_t>(ns.size()));
-  ns_key->append(ns.ToString());
-  ns_key->append(key.ToString());
-}
-
-Metadata::Metadata(RedisType type) {
-  flags = (uint8_t)0x0f & type;
-  expire = -1;
-  version = 0;
-  size = 0;
-  version = generateVersion();
-}
-
-rocksdb::Status Metadata::Decode(const std::string &bytes) {
-  // flags(1byte) + expire (4byte)
-  if (bytes.size() < 5) {
-    return rocksdb::Status::InvalidArgument("the metadata was too short");
-  }
-  Slice input(bytes);
-  GetFixed8(&input, &flags);
-  GetFixed32(&input, reinterpret_cast<uint32_t *>(&expire));
-  if (Type() != kRedisString) {
-    if (input.size() < 12) rocksdb::Status::InvalidArgument("the metadata was too short");
-    GetFixed64(&input, &version);
-    GetFixed32(&input, &size);
-  }
-  return rocksdb::Status::OK();
-}
-
-void Metadata::Encode(std::string *dst) {
-  PutFixed8(dst, flags);
-  PutFixed32(dst, (uint32_t) expire);
-  if (Type() != kRedisString) {
-    PutFixed64(dst, version);
-    PutFixed32(dst, size);
-  }
-}
-
-uint64_t Metadata::generateVersion() {
-  struct timeval now;
-  gettimeofday(&now, nullptr);
-  uint64_t version = static_cast<uint64_t >(now.tv_sec)*1000000;
-  version += static_cast<uint64_t>(now.tv_usec);
-  // use random position for initial counter to avoid conflicts,
-  // when the slave was promoted as master and the system clock may backoff
-  srand(static_cast<unsigned>(now.tv_sec));
-  static std::atomic<uint64_t> version_counter_ {static_cast<uint64_t>(std::rand())};
-  uint64_t counter = version_counter_.fetch_add(1);
-  return (version << VersionCounterBits) + (counter%(1 << VersionCounterBits));
-}
-
-bool Metadata::operator==(const Metadata &that) const {
-  if (flags != that.flags) return false;
-  if (expire != that.expire) return false;
-  if (Type() != kRedisString) {
-    if (size != that.size) return false;
-    if (version != that.version) return false;
-  }
-  return true;
-}
-
-RedisType Metadata::Type() const {
-  return static_cast<RedisType>(flags & (uint8_t)0x0f);
-}
-
-int32_t Metadata::TTL() const {
-  int64_t now;
-  rocksdb::Env::Default()->GetCurrentTime(&now);
-  if (expire != 0 && expire < now) {
-    return -2;
-  }
-  return expire == 0 ? -1 : int32_t (expire - now);
-}
-
-timeval Metadata::Time() const {
-  auto t = version >> VersionCounterBits;
-  struct timeval created_at{static_cast<uint32_t>(t / 1000000), static_cast<int32_t>(t % 1000000)};
-  return created_at;
-}
-
-bool Metadata::Expired() const {
-  int64_t now;
-  rocksdb::Env::Default()->GetCurrentTime(&now);
-  if (expire > 0 && expire < now) {
-    return true;
-  }
-  return Type() != kRedisString && size == 0;
-}
-
-ListMetadata::ListMetadata() : Metadata(kRedisList) {
-  head = UINT64_MAX/2;
-  tail = head;
-}
-
-void ListMetadata::Encode(std::string *dst) {
-  Metadata::Encode(dst);
-  PutFixed64(dst, head);
-  PutFixed64(dst, tail);
-}
-
-rocksdb::Status ListMetadata::Decode(const std::string &bytes) {
-  Slice input(bytes);
-  GetFixed8(&input, &flags);
-  GetFixed32(&input, reinterpret_cast<uint32_t *>(&expire));
-  if (Type() != kRedisString) {
-    if (input.size() < 12) rocksdb::Status::InvalidArgument("the metadata was too short");
-    GetFixed64(&input, &version);
-    GetFixed32(&input, &size);
-  }
-  if (Type() == kRedisList) {
-    if (input.size() < 16) rocksdb::Status::InvalidArgument("the metadata was too short");
-    GetFixed64(&input, &head);
-    GetFixed64(&input, &tail);
-  }
-  return rocksdb::Status();
-}
diff --git a/src/redis_metadata.h b/src/redis_metadata.h
deleted file mode 100644
index 64313de..0000000
--- a/src/redis_metadata.h
+++ /dev/null
@@ -1,124 +0,0 @@
-#pragma once
-
-#include <rocksdb/status.h>
-
-#include <string>
-#include <vector>
-
-#include "encoding.h"
-
-enum RedisType {
-  kRedisNone,
-  kRedisString,
-  kRedisHash,
-  kRedisList,
-  kRedisSet,
-  kRedisZSet,
-  kRedisBitmap
-};
-
-enum RedisCommand {
-  kRedisCmdLSet,
-  kRedisCmdLInsert,
-  kRedisCmdLTrim,
-  kRedisCmdLPop,
-  kRedisCmdRPop,
-  kRedisCmdLRem,
-  kRedisCmdLPush,
-  kRedisCmdRPush,
-  kRedisCmdExpire,
-};
-
-const std::vector<std::string> RedisTypeNames = {
-    "none", "string", "hash",
-    "list", "set", "zset"
-};
-
-using rocksdb::Slice;
-
-struct KeyNumStats {
-  uint64_t n_key = 0;
-  uint64_t n_expires = 0;
-  uint64_t n_expired = 0;
-  uint64_t avg_ttl = 0;
-};
-
-// 52 bit for microseconds and 11 bit for counter
-const int VersionCounterBits = 11;
-
-void ExtractNamespaceKey(Slice ns_key, std::string *ns, std::string *key);
-void ComposeNamespaceKey(const Slice &ns, const Slice &key, std::string *ns_key);
-
-class InternalKey {
- public:
-  explicit InternalKey(Slice ns_key, Slice sub_key, uint64_t version);
-  explicit InternalKey(Slice input);
-  ~InternalKey();
-
-  Slice GetNamespace() const;
-  Slice GetKey() const;
-  Slice GetSubKey() const;
-  uint64_t GetVersion() const;
-  void Encode(std::string *out);
-  bool operator==(const InternalKey &that) const;
-
- private:
-  Slice namespace_;
-  Slice key_;
-  Slice sub_key_;
-  uint64_t version_;
-  char *buf_;
-  char prealloc_[256];
-};
-
-class Metadata {
- public:
-  uint8_t flags;
-  int expire;
-  uint64_t version;
-  uint32_t size;
-
- public:
-  explicit Metadata(RedisType type);
-
-  RedisType Type() const;
-  virtual int32_t TTL() const;
-  virtual timeval Time() const;
-  virtual bool Expired() const;
-  virtual void Encode(std::string *dst);
-  virtual rocksdb::Status Decode(const std::string &bytes);
-  bool operator==(const Metadata &that) const;
-
- private:
-  uint64_t generateVersion();
-};
-
-class HashMetadata : public Metadata {
- public:
-  HashMetadata():Metadata(kRedisHash){}
-};
-
-class SetMetadata : public Metadata {
- public:
-  SetMetadata(): Metadata(kRedisSet) {}
-};
-
-class ZSetMetadata : public Metadata {
- public:
-  ZSetMetadata(): Metadata(kRedisZSet){}
-};
-
-class BitmapMetadata : public Metadata {
- public:
-  BitmapMetadata(): Metadata(kRedisBitmap){}
-};
-
-class ListMetadata : public Metadata {
- public:
-  uint64_t head;
-  uint64_t tail;
-  ListMetadata();
- public:
-  void Encode(std::string *dst) override;
-  rocksdb::Status Decode(const std::string &bytes) override;
-};
diff --git a/src/redis_pubsub.cc b/src/redis_pubsub.cc
deleted file mode 100644
index 9432688..0000000
--- a/src/redis_pubsub.cc
+++ /dev/null
@@ -1,9 +0,0 @@
-#include "redis_pubsub.h"
-
-namespace Redis {
-rocksdb::Status PubSub::Publish(const Slice &channel, const Slice &value) {
-  rocksdb::WriteBatch batch;
-  batch.Put(pubsub_cf_handle_, channel, value);
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-}  // namespace Redis
diff --git a/src/redis_pubsub.h b/src/redis_pubsub.h
deleted file mode 100644
index 48b7597..0000000
--- a/src/redis_pubsub.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#pragma once
-
-#include <string>
-
-#include "redis_db.h"
-#include "redis_metadata.h"
-
-namespace Redis {
-
-class PubSub : public Database {
- public:
-  explicit PubSub(Engine::Storage *storage) :
-      Database(storage),
-      pubsub_cf_handle_(storage->GetCFHandle("pubsub")) {}
-  rocksdb::Status Publish(const Slice &channel, const Slice &value);
-
- private:
-  rocksdb::ColumnFamilyHandle *pubsub_cf_handle_;
-};
-
-}  // namespace Redis
diff --git a/src/redis_reply.cc b/src/redis_reply.cc
deleted file mode 100644
index d847e03..0000000
--- a/src/redis_reply.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-#include "redis_reply.h"
-#include <numeric>
-
-namespace Redis {
-
-void Reply(evbuffer *output, const std::string &data) {
-  evbuffer_add(output, data.c_str(), data.length());
-}
-
-std::string SimpleString(const std::string &data) { return "+" + data + CRLF; }
-
-std::string Error(const std::string &err) { return "-" + err + CRLF; }
-
-std::string Integer(int64_t data) { return ":" + std::to_string(data) + CRLF; }
-
-std::string BulkString(const std::string &data) {
-  if (!data.empty()) {
-    return "$" + std::to_string(data.length()) + CRLF + data + CRLF;
-  }
-  return NilString();
-}
-
-std::string NilString() {
-  return "$-1\r\n";
-}
-
-std::string MultiLen(int64_t len) {
-  return "*"+std::to_string(len)+"\r\n";
-}
-
-std::string MultiBulkString(std::vector<std::string> list) {
-  for (size_t i = 0; i < list.size(); i++) {
-    if (list[i].empty()) {
-      list[i] = NilString();
-    }  else {
-      list[i] = BulkString(list[i]);
-    }
-  }
-  return Array(list);
-}
-
-std::string Array(std::vector<std::string> list) {
-  return std::accumulate(list.begin(), list.end(), "*" + std::to_string(list.size()) + CRLF);
-}
-
-}  // namespace Redis
diff --git a/src/redis_reply.h b/src/redis_reply.h
deleted file mode 100644
index 27cc06a..0000000
--- a/src/redis_reply.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#pragma once
-
-#include <event2/buffer.h>
-#include <string>
-#include <vector>
-
-#define CRLF "\r\n"
-
-namespace Redis {
-void Reply(evbuffer *output, const std::string &data);
-std::string SimpleString(const std::string &data);
-std::string Error(const std::string &err);
-std::string Integer(int64_t data);
-std::string BulkString(const std::string &data);
-std::string NilString();
-std::string MultiLen(int64_t len);
-std::string Array(std::vector<std::string> list);
-std::string MultiBulkString(std::vector<std::string> list);
-std::string ParseSimpleString(evbuffer *input);
-}  // namespace Redis
diff --git a/src/redis_request.cc b/src/redis_request.cc
deleted file mode 100644
index d6cf171..0000000
--- a/src/redis_request.cc
+++ /dev/null
@@ -1,196 +0,0 @@
-#include <glog/logging.h>
-#include <rocksdb/perf_context.h>
-#include <rocksdb/iostats_context.h>
-
-#include <chrono>
-#include <utility>
-
-#include "util.h"
-#include "redis_cmd.h"
-#include "redis_reply.h"
-#include "redis_request.h"
-#include "redis_connection.h"
-#include "server.h"
-
-namespace Redis {
-const size_t PROTO_INLINE_MAX_SIZE = 16 * 1024L;
-const size_t PROTO_BULK_MAX_SIZE = 128 * 1024L * 1024L;
-const size_t PROTO_MAX_MULTI_BULKS = 8 * 1024L;
-
-Status Request::Tokenize(evbuffer *input) {
-  char *line;
-  size_t len;
-  while (true) {
-    switch (state_) {
-      case ArrayLen:
-        line = evbuffer_readln(input, &len, EVBUFFER_EOL_CRLF_STRICT);
-        if (!line || len <= 0) return Status::OK();
-        svr_->stats_.IncrInbondBytes(len);
-        if (line[0] == '*') {
-          try {
-            multi_bulk_len_ = std::stoull(std::string(line + 1, len-1));
-          } catch (std::exception &e) {
-            free(line);
-            return Status(Status::NotOK, "Protocol error: expect integer");
-          }
-          if (multi_bulk_len_ > PROTO_MAX_MULTI_BULKS) {
-            free(line);
-            return Status(Status::NotOK, "Protocol error: too many bulk strings");
-          }
-          state_ = BulkLen;
-        } else {
-          if (len > PROTO_INLINE_MAX_SIZE) {
-            free(line);
-            return Status(Status::NotOK, "Protocol error: too big inline request");
-          }
-          Util::Split(std::string(line, len), " \t", &tokens_);
-          commands_.push_back(std::move(tokens_));
-          state_ = ArrayLen;
-        }
-        free(line);
-        break;
-      case BulkLen:
-        line = evbuffer_readln(input, &len, EVBUFFER_EOL_CRLF_STRICT);
-        if (!line || len <= 0) return Status::OK();
-        svr_->stats_.IncrInbondBytes(len);
-        if (line[0] != '$') {
-          free(line);
-          return Status(Status::NotOK, "Protocol error: expect '$'");
-        }
-        try {
-          bulk_len_ = std::stoull(std::string(line + 1, len-1));
-        } catch (std::exception &e) {
-          free(line);
-          return Status(Status::NotOK, "Protocol error: expect integer");
-        }
-        if (bulk_len_ > PROTO_BULK_MAX_SIZE) {
-          free(line);
-          return Status(Status::NotOK, "Protocol error: too big bulk string");
-        }
-        free(line);
-        state_ = BulkData;
-        break;
-      case BulkData:
-        if (evbuffer_get_length(input) < bulk_len_ + 2) return Status::OK();
-        char *data = reinterpret_cast<char *>(evbuffer_pullup(input, bulk_len_ + 2));
-        tokens_.emplace_back(data, bulk_len_);
-        evbuffer_drain(input, bulk_len_ + 2);
-        svr_->stats_.IncrInbondBytes(bulk_len_ + 2);
-        --multi_bulk_len_;
-        if (multi_bulk_len_ == 0) {
-          state_ = ArrayLen;
-          commands_.push_back(std::move(tokens_));
-          tokens_.clear();
-        } else {
-          state_ = BulkLen;
-        }
-        break;
-    }
-  }
-}
-
-bool Request::inCommandWhitelist(const std::string &command) {
-  std::vector<std::string> whitelist = {"auth"};
-  for (const auto &allow_command : whitelist) {
-    if (allow_command == command) return true;
-  }
-  return false;
-}
-
-bool Request::turnOnProfilingIfNeed(const std::string &cmd) {
-  auto config = svr_->GetConfig();
-  if (config->profiling_sample_ratio == 0) return false;
-  if (!config->profiling_sample_all_commands &&
-      config->profiling_sample_commands.find(cmd) == config->profiling_sample_commands.end()) {
-    return false;
-  }
-  if (config->profiling_sample_ratio == 100 ||
-      std::rand() % 100 <= config->profiling_sample_ratio) {
-    rocksdb::SetPerfLevel(rocksdb::PerfLevel::kEnableTimeExceptForMutex);
-    rocksdb::get_perf_context()->Reset();
-    rocksdb::get_iostats_context()->Reset();
-    return true;
-  }
-  return false;
-}
-
-void Request::recordProfilingSampleIfNeed(const std::string &cmd, uint64_t duration) {
-  int threshold = svr_->GetConfig()->profiling_sample_record_threshold_ms;
-  if (threshold > 0 && static_cast<int>(duration/1000) < threshold) {
-    rocksdb::SetPerfLevel(rocksdb::PerfLevel::kDisable);
-    return;
-  }
-  std::string perf_context = rocksdb::get_perf_context()->ToString(true);
-  std::string iostats_context = rocksdb::get_iostats_context()->ToString(true);
-  rocksdb::SetPerfLevel(rocksdb::PerfLevel::kDisable);
-  if (perf_context.empty()) return;  // request without db operation
-  svr_->GetPerfLog()->PushEntry({cmd, std::move(perf_context),
-                                std::move(iostats_context), duration, 0});
-}
-
-void Request::ExecuteCommands(Connection *conn) {
-  if (commands_.empty()) return;
-
-  Config *config = svr_->GetConfig();
-  std::string reply;
-  for (auto &cmd_tokens : commands_) {
-    if (conn->IsFlagEnabled(Redis::Connection::kCloseAfterReply)) break;
-    if (conn->GetNamespace().empty()) {
-      if (!config->requirepass.empty() && Util::ToLower(cmd_tokens.front()) != "auth") {
-        conn->Reply(Redis::Error("NOAUTH Authentication required."));
-        continue;
-      }
-      conn->BecomeAdmin();
-      conn->SetNamespace(kDefaultNamespace);
-    }
-    auto s = LookupCommand(cmd_tokens.front(), &conn->current_cmd_, conn->IsRepl());
-    if (!s.IsOK()) {
-      conn->Reply(Redis::Error("ERR unknown command"));
-      continue;
-    }
-    if (svr_->IsLoading() && !inCommandWhitelist(conn->current_cmd_->Name())) {
-      conn->Reply(Redis::Error("ERR restoring the db from backup"));
-      break;
-    }
-    int arity = conn->current_cmd_->GetArity();
-    int tokens = static_cast<int>(cmd_tokens.size());
-    if ((arity > 0 && tokens != arity)
-        || (arity < 0 && tokens < -arity)) {
-      conn->Reply(Redis::Error("ERR wrong number of arguments"));
-      continue;
-    }
-    conn->current_cmd_->SetArgs(cmd_tokens);
-    s = conn->current_cmd_->Parse(cmd_tokens);
-    if (!s.IsOK()) {
-      conn->Reply(Redis::Error(s.Msg()));
-      continue;
-    }
-    if (config->slave_readonly && svr_->IsSlave() && conn->current_cmd_->IsWrite()) {
-      conn->Reply(Redis::Error("READONLY You can't write against a read only slave."));
-      continue;
-    }
-    conn->SetLastCmd(conn->current_cmd_->Name());
-    svr_->stats_.IncrCalls(conn->current_cmd_->Name());
-    auto start = std::chrono::high_resolution_clock::now();
-    bool is_profiling = turnOnProfilingIfNeed(conn->current_cmd_->Name());
-    svr_->IncrExecutingCommandNum();
-    s = conn->current_cmd_->Execute(svr_, conn, &reply);
-    svr_->DecrExecutingCommandNum();
-    auto end = std::chrono::high_resolution_clock::now();
-    uint64_t duration = std::chrono::duration_cast<std::chrono::microseconds>(end-start).count();
-    if (is_profiling) recordProfilingSampleIfNeed(conn->current_cmd_->Name(), duration);
-    svr_->SlowlogPushEntryIfNeeded(conn->current_cmd_->Args(), duration);
-    svr_->stats_.IncrLatency(static_cast<uint64_t>(duration), conn->current_cmd_->Name());
-    svr_->FeedMonitorConns(conn, cmd_tokens);
-    if (!s.IsOK()) {
-      conn->Reply(Redis::Error("ERR " + s.Msg()));
-      LOG(ERROR) << "[request] Failed to execute command: " << conn->current_cmd_->Name()
-                 << ", encounter err: " << s.Msg();
-      continue;
-    }
-    if (!reply.empty()) conn->Reply(reply);
-  }
-  commands_.clear();
-}
-
-}  // namespace Redis
diff --git a/src/redis_request.h b/src/redis_request.h
deleted file mode 100644
index 7e7992e..0000000
--- a/src/redis_request.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#pragma once
-
-#include <event2/buffer.h>
-#include <vector>
-#include <string>
-
-#include "status.h"
-
-class Server;
-
-namespace Redis {
-
-class Connection;
-
-class Request {
- public:
-  explicit Request(Server *svr) : svr_(svr) {}
-  // Not copyable
-  Request(const Request &) = delete;
-  Request &operator=(const Request &) = delete;
-
-  // Parse the redis requests (bulk string array format)
-  Status Tokenize(evbuffer *input);
-  // Exec return true when command finished
-  void ExecuteCommands(Connection *conn);
-
- private:
-  // internal states related to parsing
-
-  enum ParserState { ArrayLen, BulkLen, BulkData };
-  ParserState state_ = ArrayLen;
-  size_t multi_bulk_len_ = 0;
-  size_t bulk_len_ = 0;
-  using CommandTokens = std::vector<std::string>;
-  CommandTokens tokens_;
-  std::vector<CommandTokens> commands_;
-
-  Server *svr_;
-  bool inCommandWhitelist(const std::string &command);
-  bool turnOnProfilingIfNeed(const std::string &cmd);
-  void recordProfilingSampleIfNeed(const std::string &cmd, uint64_t duration);
-};
-
-}  // namespace Redis
diff --git a/src/redis_set.cc b/src/redis_set.cc
deleted file mode 100644
index 687af7f..0000000
--- a/src/redis_set.cc
+++ /dev/null
@@ -1,334 +0,0 @@
-#include "redis_set.h"
-
-#include <map>
-#include <iostream>
-
-namespace Redis {
-
-rocksdb::Status Set::GetMetadata(const Slice &ns_key, SetMetadata *metadata) {
-  return Database::GetMetadata(kRedisSet, ns_key, metadata);
-}
-
-// Make sure members are uniq before use Overwrite
-rocksdb::Status Set::Overwrite(Slice user_key, const std::vector<std::string> &members) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  SetMetadata metadata;
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisSet);
-  batch.PutLogData(log_data.Encode());
-  std::string sub_key;
-  for (const auto &member : members) {
-    InternalKey(ns_key, member, metadata.version).Encode(&sub_key);
-    batch.Put(sub_key, Slice());
-  }
-  metadata.size = static_cast<uint32_t>(members.size());
-  std::string bytes;
-  metadata.Encode(&bytes);
-  batch.Put(metadata_cf_handle_, ns_key, bytes);
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status Set::Add(const Slice &user_key, const std::vector<Slice> &members, int *ret) {
-  *ret = 0;
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  SetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok() && !s.IsNotFound()) return s;
-
-  std::string value;
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisSet);
-  batch.PutLogData(log_data.Encode());
-  std::string sub_key;
-  for (const auto &member : members) {
-    InternalKey(ns_key, member, metadata.version).Encode(&sub_key);
-    s = db_->Get(rocksdb::ReadOptions(), sub_key, &value);
-    if (s.ok()) continue;
-    batch.Put(sub_key, Slice());
-    *ret += 1;
-  }
-  if (*ret > 0) {
-    metadata.size += *ret;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status Set::Remove(const Slice &user_key, const std::vector<Slice> &members, int *ret) {
-  *ret = 0;
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  SetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  std::string value, sub_key;
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisSet);
-  batch.PutLogData(log_data.Encode());
-  for (const auto &member : members) {
-    InternalKey(ns_key, member, metadata.version).Encode(&sub_key);
-    s = db_->Get(rocksdb::ReadOptions(), sub_key, &value);
-    if (!s.ok()) continue;
-    batch.Delete(sub_key);
-    *ret += 1;
-  }
-  if (*ret > 0) {
-    metadata.size -= *ret;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status Set::Card(const Slice &user_key, int *ret) {
-  *ret = 0;
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  SetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-  *ret = metadata.size;
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Set::Members(const Slice &user_key, std::vector<std::string> *members) {
-  members->clear();
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  SetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  std::string prefix;
-  InternalKey(ns_key, "", metadata.version).Encode(&prefix);
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options);
-  for (iter->Seek(prefix);
-       iter->Valid() && iter->key().starts_with(prefix);
-       iter->Next()) {
-    InternalKey ikey(iter->key());
-    members->emplace_back(ikey.GetSubKey().ToString());
-  }
-  delete iter;
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Set::IsMember(const Slice &user_key, const Slice &member, int *ret) {
-  *ret = 0;
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  SetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  std::string sub_key;
-  InternalKey(ns_key, member, metadata.version).Encode(&sub_key);
-  std::string value;
-  s = db_->Get(read_options, sub_key, &value);
-  if (s.ok()) {
-    *ret = 1;
-  }
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Set::Take(const Slice &user_key, std::vector<std::string> *members, int count, bool pop) {
-  int n = 0;
-  members->clear();
-  if (count <= 0) return rocksdb::Status::OK();
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  if (pop) LockGuard guard(storage_->GetLockManager(), ns_key);
-  SetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisSet);
-  batch.PutLogData(log_data.Encode());
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options);
-  std::string prefix;
-  InternalKey(ns_key, "", metadata.version).Encode(&prefix);
-  for (iter->Seek(prefix);
-       iter->Valid() && iter->key().starts_with(prefix);
-       iter->Next()) {
-    InternalKey ikey(iter->key());
-    members->emplace_back(ikey.GetSubKey().ToString());
-    if (pop) batch.Delete(iter->key());
-    if (++n >= count) break;
-  }
-  delete iter;
-  if (pop && n > 0) {
-    metadata.size -= n;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status Set::Move(const Slice &src, const Slice &dst, const Slice &member, int *ret) {
-  std::vector<Slice> members{member};
-  rocksdb::Status s = Remove(src, members, ret);
-  if (!s.ok() || *ret == 0) {
-    return s;
-  }
-  return Add(dst, members, ret);
-}
-
-rocksdb::Status Set::Scan(const Slice &user_key,
-                          const std::string &cursor,
-                          uint64_t limit,
-                          const std::string &member_prefix,
-                          std::vector<std::string> *members) {
-  return SubKeyScanner::Scan(kRedisSet, user_key, cursor, limit, member_prefix, members);
-}
-
-/*
- * Returns the members of the set resulting from the difference between
- * the first set and all the successive sets. For example:
- * key1 = {a,b,c,d}
- * key2 = {c}
- * key3 = {a,c,e}
- * DIFF key1 key2 key3 = {b,d}
- */
-rocksdb::Status Set::Diff(const std::vector<Slice> &keys, std::vector<std::string> *members) {
-  members->clear();
-  std::vector<std::string> source_members;
-  auto s = Members(keys[0], &source_members);
-  if (!s.ok()) return s;
-
-  std::map<std::string, bool> exclude_members;
-  std::vector<std::string> target_members;
-  for (size_t i = 1; i < keys.size(); i++) {
-    s = Members(keys[i], &target_members);
-    if (!s.ok()) return s;
-    for (const auto &member : target_members) {
-      exclude_members[member] = true;
-    }
-  }
-  for (const auto &member : source_members) {
-    if (exclude_members.find(member) == exclude_members.end()) {
-      members->push_back(member);
-    }
-  }
-  return rocksdb::Status::OK();
-}
-
-/*
- * Returns the members of the set resulting from the union of all the given sets.
- * For example:
- * key1 = {a,b,c,d}
- * key2 = {c}
- * key3 = {a,c,e}
- * UNION key1 key2 key3 = {a,b,c,d,e}
- */
-rocksdb::Status Set::Union(const std::vector<Slice> &keys, std::vector<std::string> *members) {
-  members->clear();
-
-  std::map<std::string, bool> union_members;
-  std::vector<std::string> target_members;
-  for (size_t i = 0; i < keys.size(); i++) {
-    auto s = Members(keys[i], &target_members);
-    if (!s.ok()) return s;
-    for (const auto &member : target_members) {
-      union_members[member] = true;
-    }
-  }
-  for (const auto &iter : union_members) {
-    members->emplace_back(iter.first);
-  }
-  return rocksdb::Status::OK();
-}
-
-/*
- * Returns the members of the set resulting from the intersection of all the given sets.
- * For example:
- * key1 = {a,b,c,d}
- * key2 = {c}
- * key3 = {a,c,e}
- * INTER key1 key2 key3 = {c}
- */
-rocksdb::Status Set::Inter(const std::vector<Slice> &keys, std::vector<std::string> *members) {
-  members->clear();
-
-  std::map<std::string, size_t> member_counters;
-  std::vector<std::string> target_members;
-  auto s = Members(keys[0], &target_members);
-  if (!s.ok() || target_members.empty()) return s;
-  for (const auto &member : target_members) {
-    member_counters[member] = 1;
-  }
-  for (size_t i = 1; i < keys.size(); i++) {
-    auto s = Members(keys[i], &target_members);
-    if (!s.ok() || target_members.empty()) return s;
-    for (const auto &member : target_members) {
-      if (member_counters.find(member) == member_counters.end()) continue;
-      member_counters[member]++;
-    }
-  }
-  for (const auto iter : member_counters) {
-    if (iter.second == keys.size()) {  // all the sets contain this member
-      members->emplace_back(iter.first);
-    }
-  }
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status Set::DiffStore(const Slice &dst, const std::vector<Slice> &keys, int *ret) {
-  *ret = 0;
-  std::vector<std::string> members;
-  auto s = Diff(keys, &members);
-  if (!s.ok()) return s;
-  *ret = static_cast<int>(members.size());
-  return Overwrite(dst, members);
-}
-
-rocksdb::Status Set::UnionStore(const Slice &dst, const std::vector<Slice> &keys, int *ret) {
-  *ret = 0;
-  std::vector<std::string> members;
-  auto s = Union(keys, &members);
-  if (!s.ok()) return s;
-  *ret = static_cast<int>(members.size());
-  return Overwrite(dst, members);
-}
-
-rocksdb::Status Set::InterStore(const Slice &dst, const std::vector<Slice> &keys, int *ret) {
-  *ret = 0;
-  std::vector<std::string> members;
-  auto s = Inter(keys, &members);
-  if (!s.ok()) return s;
-  *ret = static_cast<int>(members.size());
-  return Overwrite(dst, members);
-}
-}  // namespace Redis
diff --git a/src/redis_set.h b/src/redis_set.h
deleted file mode 100644
index 7d9a19f..0000000
--- a/src/redis_set.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#pragma once
-
-#include <string>
-#include <vector>
-
-#include "redis_db.h"
-#include "redis_metadata.h"
-
-namespace Redis {
-
-class Set : public SubKeyScanner {
- public:
-  explicit Set(Engine::Storage *storage, const std::string &ns)
-      : SubKeyScanner(storage, ns) {}
-
-  rocksdb::Status Card(const Slice &user_key, int *ret);
-  rocksdb::Status IsMember(const Slice &user_key, const Slice &member, int *ret);
-  rocksdb::Status Add(const Slice &user_key, const std::vector<Slice> &members, int *ret);
-  rocksdb::Status Remove(const Slice &user_key, const std::vector<Slice> &members, int *ret);
-  rocksdb::Status Members(const Slice &user_key, std::vector<std::string> *members);
-  rocksdb::Status Move(const Slice &src, const Slice &dst, const Slice &member, int *ret);
-  rocksdb::Status Take(const Slice &user_key, std::vector<std::string> *members, int count, bool pop);
-  rocksdb::Status Diff(const std::vector<Slice> &keys, std::vector<std::string> *members);
-  rocksdb::Status Union(const std::vector<Slice> &keys, std::vector<std::string> *members);
-  rocksdb::Status Inter(const std::vector<Slice> &keys, std::vector<std::string> *members);
-  rocksdb::Status Overwrite(Slice user_key, const std::vector<std::string> &members);
-  rocksdb::Status DiffStore(const Slice &dst, const std::vector<Slice> &keys, int *ret);
-  rocksdb::Status UnionStore(const Slice &dst, const std::vector<Slice> &keys, int *ret);
-  rocksdb::Status InterStore(const Slice &dst, const std::vector<Slice> &keys, int *ret);
-  rocksdb::Status Scan(const Slice &user_key,
-                       const std::string &cursor,
-                       uint64_t limit,
-                       const std::string &member_prefix,
-                       std::vector<std::string> *members);
-
- private:
-  rocksdb::Status GetMetadata(const Slice &ns_key, SetMetadata *metadata);
-};
-
-}  // namespace Redis
diff --git a/src/redis_string.cc b/src/redis_string.cc
deleted file mode 100644
index b8e1b77..0000000
--- a/src/redis_string.cc
+++ /dev/null
@@ -1,280 +0,0 @@
-#include "redis_string.h"
-#include <string>
-#include <limits>
-
-namespace Redis {
-
-rocksdb::Status String::getValue(const Slice &ns_key, std::string *raw_value, std::string *value) {
-  if (value) value->clear();
-  if (raw_value) {
-    raw_value->clear();
-    std::string md_bytes;
-    Metadata(kRedisString).Encode(&md_bytes);
-    raw_value->append(md_bytes);
-  }
-
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  std::string raw_bytes;
-  rocksdb::Status s = db_->Get(read_options, metadata_cf_handle_, ns_key, &raw_bytes);
-  if (!s.ok()) return s;
-
-  Metadata metadata(kRedisNone);
-  metadata.Decode(raw_bytes);
-  if (metadata.Expired()) {
-    return rocksdb::Status::NotFound("the key was expired");
-  }
-  if (metadata.Type() != kRedisString && metadata.size > 0) {
-    return rocksdb::Status::InvalidArgument("WRONGTYPE Operation against a key holding the wrong kind of value");
-  }
-  if (value) value->assign(raw_bytes.substr(5, raw_bytes.size()-5));
-  if (raw_value) raw_value->assign(raw_bytes.data(), raw_bytes.size());
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status String::updateValue(const Slice &ns_key, const Slice &raw_value, const Slice &new_value) {
-  std::string metadata_bytes;
-  if (raw_value.empty()) {
-    Metadata(kRedisString).Encode(&metadata_bytes);
-  } else {
-    metadata_bytes = raw_value.ToString().substr(0, 5);
-  }
-  metadata_bytes.append(new_value.ToString());
-
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisString);
-  batch.PutLogData(log_data.Encode());
-  batch.Put(metadata_cf_handle_, ns_key, metadata_bytes);
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status String::Append(const Slice &user_key, const Slice &value, int *ret) {
-  *ret = 0;
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  std::string raw_value_bytes, value_bytes;
-  rocksdb::Status s = getValue(ns_key, &raw_value_bytes, &value_bytes);
-  if (!s.ok() && !s.IsNotFound()) return s;
-  value_bytes.append(value.ToString());
-  *ret = static_cast<int>(value_bytes.size());
-  return updateValue(ns_key, raw_value_bytes, value_bytes);
-}
-
-std::vector<rocksdb::Status> String::MGet(const std::vector<Slice> &keys, std::vector<std::string> *values) {
-  std::string ns_key;
-  std::string value;
-  std::vector<rocksdb::Status> statuses;
-  for (size_t i = 0; i < keys.size(); i++) {
-    AppendNamespacePrefix(keys[i], &ns_key);
-    statuses.emplace_back(getValue(ns_key, nullptr, &value));
-    values->emplace_back(value);
-  }
-  return statuses;
-}
-
-rocksdb::Status String::Get(const Slice &user_key, std::string *value) {
-  std::vector<Slice> keys{user_key};
-  std::vector<std::string> values;
-  std::vector<rocksdb::Status> statuses = MGet(keys, &values);
-  *value = values[0];
-  return statuses[0];
-}
-rocksdb::Status String::GetSet(const Slice &user_key, const Slice &new_value, std::string *old_value) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  std::string raw_value_bytes, value_bytes;
-  rocksdb::Status s = getValue(ns_key, &raw_value_bytes, &value_bytes);
-  if (!s.ok() && !s.IsNotFound()) return s;
-  *old_value = value_bytes;
-  return updateValue(ns_key, raw_value_bytes, new_value);
-}
-
-rocksdb::Status String::Set(const Slice &user_key, const Slice &value) {
-  std::vector<StringPair> pairs{StringPair{user_key, value}};
-  return MSet(pairs, 0);
-}
-
-rocksdb::Status String::SetEX(const Slice &user_key, const Slice &value, int ttl) {
-  std::vector<StringPair> pairs{StringPair{user_key, value}};
-  return MSet(pairs, ttl);
-}
-
-rocksdb::Status String::SetNX(const Slice &user_key, const Slice &value, int ttl, int *ret) {
-  std::vector<StringPair> pairs{StringPair{user_key, value}};
-  return MSetNX(pairs, ttl, ret);
-}
-
-rocksdb::Status String::SetXX(const Slice &user_key, const Slice &value, int ttl, int *ret) {
-  *ret = 0;
-  int exists = 0;
-  uint32_t expire = 0;
-  if (ttl > 0) {
-    int64_t now;
-    rocksdb::Env::Default()->GetCurrentTime(&now);
-    expire = uint32_t(now) + ttl;
-  }
-
-  std::string ns_key;
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisString);
-  batch.PutLogData(log_data.Encode());
-
-  AppendNamespacePrefix(user_key, &ns_key);
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  Exists({user_key}, &exists);
-  if (exists != 1) return rocksdb::Status::OK();
-
-  *ret = 1;
-  std::string bytes;
-  Metadata metadata(kRedisString);
-  metadata.expire = expire;
-  metadata.Encode(&bytes);
-  bytes.append(value.ToString());
-  batch.Put(metadata_cf_handle_, ns_key, bytes);
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status String::SetRange(const Slice &user_key, int offset, Slice value, int *ret) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  std::string raw_value_bytes, value_bytes;
-  rocksdb::Status s = getValue(ns_key, &raw_value_bytes, &value_bytes);
-  if (!s.ok() && !s.IsNotFound()) return s;
-  if (offset > static_cast<int>(value_bytes.size())) {
-    // padding the value with zero byte while offset is longer than value size
-    int paddings = offset- static_cast<int>(value_bytes.size());
-    value_bytes.append(paddings, '\0');
-  }
-  if (offset+value.size() >= value_bytes.size()) {
-    value_bytes = value_bytes.substr(0, offset);
-    value_bytes.append(value.ToString());
-  } else {
-    for (size_t i = 0; i < value.size(); i++) {
-      value_bytes[i] = value[i];
-    }
-  }
-  *ret = static_cast<int>(value_bytes.size());
-  return updateValue(ns_key, raw_value_bytes, value_bytes);
-}
-
-rocksdb::Status String::IncrBy(const Slice &user_key, int64_t increment, int64_t *ret) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  std::string raw_value_bytes, value_bytes;
-  rocksdb::Status s = getValue(ns_key, &raw_value_bytes, &value_bytes);
-  if (!s.ok() && !s.IsNotFound()) return s;
-  int64_t value = 0;
-  if (!value_bytes.empty()) {
-    try {
-      value = std::stoll(value_bytes);
-    } catch(std::exception &e) {
-      return rocksdb::Status::InvalidArgument("value is not an integer or out of range");
-    }
-  }
-  if ((increment < 0 && value < 0 && increment < (LLONG_MIN-value))
-      || (increment > 0 && value > 0 && increment > (LLONG_MAX-value))) {
-    return rocksdb::Status::InvalidArgument("increment or decrement would overflow");
-  }
-  value += increment;
-  *ret = value;
-  return updateValue(ns_key, raw_value_bytes, std::to_string(value));
-}
-
-rocksdb::Status String::IncrByFloat(const Slice &user_key, float increment, float *ret) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  std::string raw_value_bytes, value_bytes;
-  rocksdb::Status s = getValue(ns_key, &raw_value_bytes, &value_bytes);
-  if (!s.ok() && !s.IsNotFound()) return s;
-  float value = 0;
-  if (!value_bytes.empty()) {
-    try {
-      value = std::stof(value_bytes);
-    } catch(std::exception &e) {
-      return rocksdb::Status::InvalidArgument("value is not an integer");
-    }
-  }
-  auto float_min = std::numeric_limits<float>::min();
-  auto float_max = std::numeric_limits<float>::max();
-  if ((increment < 0 && value < 0 && increment < (float_min-value))
-      || (increment > 0 && value > 0 && increment > (float_max-value))) {
-    return rocksdb::Status::InvalidArgument("increment or decrement would overflow");
-  }
-  value += increment;
-  *ret = value;
-  return updateValue(ns_key, raw_value_bytes, std::to_string(value));
-}
-
-rocksdb::Status String::MSet(const std::vector<StringPair> &pairs, int ttl) {
-  uint32_t expire = 0;
-  if (ttl > 0) {
-    int64_t now;
-    rocksdb::Env::Default()->GetCurrentTime(&now);
-    expire = uint32_t(now) + ttl;
-  }
-
-  // Data race, key string maybe overwrite by other key while didn't lock the key here,
-  // to improve the set performance
-  std::string ns_key;
-  for (const auto &pair : pairs) {
-    std::string bytes;
-    Metadata metadata(kRedisString);
-    metadata.expire = expire;
-    metadata.Encode(&bytes);
-    bytes.append(pair.value.ToString());
-    rocksdb::WriteBatch batch;
-    WriteBatchLogData log_data(kRedisString);
-    batch.PutLogData(log_data.Encode());
-    AppendNamespacePrefix(pair.key, &ns_key);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-    LockGuard guard(storage_->GetLockManager(), ns_key);
-    auto s = storage_->Write(rocksdb::WriteOptions(), &batch);
-    if (!s.ok()) return s;
-  }
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status String::MSetNX(const std::vector<StringPair> &pairs, int ttl, int *ret) {
-  *ret = 0;
-
-  uint32_t expire = 0;
-  if (ttl > 0) {
-    int64_t now;
-    rocksdb::Env::Default()->GetCurrentTime(&now);
-    expire = uint32_t(now) + ttl;
-  }
-
-  int exists;
-  std::string ns_key;
-  for (StringPair pair : pairs) {
-    AppendNamespacePrefix(pair.key, &ns_key);
-    LockGuard guard(storage_->GetLockManager(), ns_key);
-    if (Exists({pair.key}, &exists).ok() && exists == 1) {
-      return rocksdb::Status::OK();
-    }
-    std::string bytes;
-    Metadata metadata(kRedisString);
-    metadata.expire = expire;
-    metadata.Encode(&bytes);
-    bytes.append(pair.value.ToString());
-    rocksdb::WriteBatch batch;
-    WriteBatchLogData log_data(kRedisString);
-    batch.PutLogData(log_data.Encode());
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-    auto s = storage_->Write(rocksdb::WriteOptions(), &batch);
-    if (!s.ok()) return s;
-  }
-  *ret = 1;
-  return rocksdb::Status::OK();
-}
-}  // namespace Redis
diff --git a/src/redis_string.h b/src/redis_string.h
deleted file mode 100644
index f6df6dd..0000000
--- a/src/redis_string.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#pragma once
-
-#include <vector>
-#include <string>
-
-#include "redis_db.h"
-#include "redis_metadata.h"
-
-typedef struct {
-  Slice key;
-  Slice value;
-} StringPair;
-
-namespace Redis {
-
-class String : public Database {
- public:
-  explicit String(Engine::Storage *storage, const std::string &ns) : Database(storage, ns) {}
-  rocksdb::Status Append(const Slice &user_key, const Slice &value, int *ret);
-  rocksdb::Status Get(const Slice &user_key, std::string *value);
-  rocksdb::Status GetSet(const Slice &user_key, const Slice &new_value, std::string *old_value);
-  rocksdb::Status Set(const Slice &user_key, const Slice &value);
-  rocksdb::Status SetEX(const Slice &user_key, const Slice &value, int ttl);
-  rocksdb::Status SetNX(const Slice &user_key, const Slice &value, int ttl, int *ret);
-  rocksdb::Status SetXX(const Slice &user_key, const Slice &value, int ttl, int *ret);
-  rocksdb::Status SetRange(const Slice &user_key, int offset, Slice value, int *ret);
-  rocksdb::Status IncrBy(const Slice &user_key, int64_t increment, int64_t *ret);
-  rocksdb::Status IncrByFloat(const Slice &user_key, float increment, float *ret);
-  std::vector<rocksdb::Status> MGet(const std::vector<Slice> &keys, std::vector<std::string> *values);
-  rocksdb::Status MSet(const std::vector<StringPair> &pairs, int ttl = 0);
-  rocksdb::Status MSetNX(const std::vector<StringPair> &pairs, int ttl, int *ret);
-
- private:
-  rocksdb::Status getValue(const Slice &ns_key, std::string *raw_value, std::string *value = nullptr);
-  rocksdb::Status updateValue(const Slice &ns_key, const Slice &raw_value, const Slice &new_value);
-};
-
-}  // namespace Redis
diff --git a/src/redis_zset.cc b/src/redis_zset.cc
deleted file mode 100644
index ecca080..0000000
--- a/src/redis_zset.cc
+++ /dev/null
@@ -1,694 +0,0 @@
-#include "redis_zset.h"
-
-#include <math.h>
-#include <map>
-#include <limits>
-
-namespace Redis {
-
-rocksdb::Status ZSet::GetMetadata(const Slice &ns_key, ZSetMetadata *metadata) {
-  return Database::GetMetadata(kRedisZSet, ns_key, metadata);
-}
-
-rocksdb::Status ZSet::Add(const Slice &user_key, uint8_t flags, std::vector<MemberScore> *mscores, int *ret) {
-  *ret = 0;
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  ZSetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok() && !s.IsNotFound()) return s;
-
-  int added = 0;
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisZSet);
-  batch.PutLogData(log_data.Encode());
-  std::string member_key;
-  for (size_t i = 0; i < mscores->size(); i++) {
-    InternalKey(ns_key, (*mscores)[i].member, metadata.version).Encode(&member_key);
-    if (metadata.size > 0) {
-      std::string old_score_bytes;
-      s = db_->Get(rocksdb::ReadOptions(), member_key, &old_score_bytes);
-      if (!s.ok() && !s.IsNotFound()) return s;
-      if (s.ok()) {
-        double old_score = DecodeDouble(old_score_bytes.data());
-        if (flags == ZSET_INCR) {
-          (*mscores)[i].score += old_score;
-        }
-        if ((*mscores)[i].score != old_score) {
-          old_score_bytes.append((*mscores)[i].member);
-          std::string old_score_key;
-          InternalKey(ns_key, old_score_bytes, metadata.version).Encode(&old_score_key);
-          batch.Delete(score_cf_handle_, old_score_key);
-          std::string new_score_bytes, new_score_key;
-          PutDouble(&new_score_bytes, (*mscores)[i].score);
-          batch.Put(member_key, new_score_bytes);
-          new_score_bytes.append((*mscores)[i].member);
-          InternalKey(ns_key, new_score_bytes, metadata.version).Encode(&new_score_key);
-          batch.Put(score_cf_handle_, new_score_key, Slice());
-        }
-        continue;
-      }
-    }
-    std::string score_bytes, score_key;
-    PutDouble(&score_bytes, (*mscores)[i].score);
-    batch.Put(member_key, score_bytes);
-    score_bytes.append((*mscores)[i].member);
-    InternalKey(ns_key, score_bytes, metadata.version).Encode(&score_key);
-    batch.Put(score_cf_handle_, score_key, Slice());
-    added++;
-  }
-  if (added > 0) {
-    *ret = added;
-    metadata.size += added;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status ZSet::Card(const Slice &user_key, int *ret) {
-  *ret = 0;
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  ZSetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound()? rocksdb::Status::OK():s;
-  *ret = metadata.size;
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status ZSet::Count(const Slice &user_key, const ZRangeSpec &spec, int *ret) {
-  *ret = 0;
-  return RangeByScore(user_key, spec, nullptr, ret);
-}
-
-rocksdb::Status ZSet::IncrBy(const Slice &user_key, const Slice &member, double increment, double *score) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  mscores.emplace_back(MemberScore{member.ToString(), increment});
-  rocksdb::Status s = Add(user_key, ZSET_INCR, &mscores, &ret);
-  if (!s.ok()) return s;
-  *score = mscores[0].score;
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status ZSet::Pop(const Slice &user_key, int count, bool min, std::vector<MemberScore> *mscores) {
-  mscores->clear();
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  ZSetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound()? rocksdb::Status::OK():s;
-  if (count <=0) return rocksdb::Status::OK();
-  if (count > static_cast<int>(metadata.size)) count = metadata.size;
-
-  std::string score_bytes;
-  double score = min ? std::numeric_limits<double>::lowest():std::numeric_limits<double>::max();
-  PutDouble(&score_bytes, score);
-  std::string start_key, prefix_key;
-  InternalKey(ns_key, score_bytes, metadata.version).Encode(&start_key);
-  InternalKey(ns_key, "", metadata.version).Encode(&prefix_key);
-
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisZSet);
-  batch.PutLogData(log_data.Encode());
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options, score_cf_handle_);
-  iter->Seek(start_key);
-  // see comment in rangebyscore()
-  if (!min && (!iter->Valid() || !iter->key().starts_with(prefix_key))) {
-    iter->SeekForPrev(start_key);
-  }
-  for (;
-      iter->Valid() && iter->key().starts_with(prefix_key);
-      min ? iter->Next() : iter->Prev()) {
-    InternalKey ikey(iter->key());
-    Slice score_key = ikey.GetSubKey();
-    GetDouble(&score_key, &score);
-    mscores->emplace_back(MemberScore{score_key.ToString(), score});
-    std::string default_cf_key;
-    InternalKey(ns_key, score_key, metadata.version).Encode(&default_cf_key);
-    batch.Delete(default_cf_key);
-    batch.Delete(score_cf_handle_, iter->key());
-    if (mscores->size() >= static_cast<unsigned>(count)) break;
-  }
-  delete iter;
-
-  if (!mscores->empty()) {
-    metadata.size -= mscores->size();
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status ZSet::Range(const Slice &user_key, int start, int stop, uint8_t flags, std::vector<MemberScore>
-*mscores) {
-  mscores->clear();
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  bool removed = (flags & (uint8_t)ZSET_REMOVED) != 0;
-  bool reversed = (flags & (uint8_t)ZSET_REVERSED) != 0;
-  if (removed) LockGuard guard(storage_->GetLockManager(), ns_key);
-  ZSetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound()? rocksdb::Status::OK():s;
-  if (start < 0) start += metadata.size;
-  if (stop < 0) stop += metadata.size;
-  if (start < 0 || stop < 0 || start > stop) {
-    return rocksdb::Status::OK();
-  }
-
-  std::string score_bytes;
-  double score = !reversed ? std::numeric_limits<double>::lowest():std::numeric_limits<double>::max();
-  PutDouble(&score_bytes, score);
-  std::string start_key, prefix_key;
-  InternalKey(ns_key, score_bytes, metadata.version).Encode(&start_key);
-  InternalKey(ns_key, "", metadata.version).Encode(&prefix_key);
-
-  int count = 0;
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-  rocksdb::WriteBatch batch;
-  auto iter = db_->NewIterator(read_options, score_cf_handle_);
-  iter->Seek(start_key);
-  // see comment in rangebyscore()
-  if (reversed && (!iter->Valid() || !iter->key().starts_with(prefix_key))) {
-    iter->SeekForPrev(start_key);
-  }
-  for (;
-      iter->Valid() && iter->key().starts_with(prefix_key);
-      !reversed ? iter->Next() : iter->Prev()) {
-    InternalKey ikey(iter->key());
-    Slice score_key = ikey.GetSubKey();
-    GetDouble(&score_key, &score);
-    if (count >= start) {
-      if (removed) {
-        std::string sub_key;
-        InternalKey(ns_key, score_key, metadata.version).Encode(&sub_key);
-        batch.Delete(sub_key);
-        batch.Delete(score_cf_handle_, iter->key());
-      }
-      mscores->emplace_back(MemberScore{score_key.ToString(), score});
-    }
-    if (count++ >= stop) break;
-  }
-  delete iter;
-
-  if (removed &&count > 0) {
-    metadata.size -= count;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-    return storage_->Write(rocksdb::WriteOptions(), &batch);
-  }
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status ZSet::RangeByScore(const Slice &user_key,
-                                        ZRangeSpec spec,
-                                        std::vector<MemberScore> *mscores,
-                                        int *size) {
-  if (size) *size = 0;
-  if (mscores) mscores->clear();
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  if (spec.removed) LockGuard guard(storage_->GetLockManager(), ns_key);
-  ZSetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound()? rocksdb::Status::OK():s;
-
-  std::string start_score_bytes;
-  PutDouble(&start_score_bytes, spec.reversed ? spec.max : spec.min);
-  std::string start_key, prefix_key;
-  InternalKey(ns_key, start_score_bytes, metadata.version).Encode(&start_key);
-  InternalKey(ns_key, "", metadata.version).Encode(&prefix_key);
-
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-
-  int pos = 0;
-  auto iter = db_->NewIterator(read_options, score_cf_handle_);
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisZSet);
-  batch.PutLogData(log_data.Encode());
-  iter->Seek(start_key);
-  // when using reverse range, we should use the `SeekForPrev` to put the iter to the right position in those cases:
-  //    a. the key with the max score was the maximum key in DB and the iter would be invalid, try to seek the prev key
-  //    b. there's some key after the key with max score and the iter would be valid, but the current key may be:
-  //        b1. the prefix was the same with start key, don't skip it (the zset key has existed)
-  //        b2. the prefix was not the same with start key, try to seek the prev key
-  // Note: the DB key was composed by `NS|key|version|score|member`, so SeekForPrev(`NS|key|version|score`)
-  // may skip the current score if exists, so do SeekForPrev if prefix was not the same with start key
-  if (spec.reversed && (!iter->Valid() || !iter->key().starts_with(prefix_key))) {
-    iter->SeekForPrev(start_key);
-  }
-  for (;
-      iter->Valid() && iter->key().starts_with(prefix_key);
-      !spec.reversed ? iter->Next() : iter->Prev()) {
-    InternalKey ikey(iter->key());
-    Slice score_key = ikey.GetSubKey();
-    double score;
-    GetDouble(&score_key, &score);
-    if (spec.reversed) {
-      if ((spec.minex && score == spec.min) || score < spec.min) break;
-      if ((spec.maxex && score == spec.max) || score > spec.max) continue;
-    } else {
-      if ((spec.minex && score == spec.min) || score < spec.min) continue;
-      if ((spec.maxex && score == spec.max) || score > spec.max) break;
-    }
-    if (spec.offset >= 0 && pos++ < spec.offset) continue;
-    if (spec.removed) {
-      std::string sub_key;
-      InternalKey(ns_key, score_key, metadata.version).Encode(&sub_key);
-      batch.Delete(sub_key);
-      batch.Delete(score_cf_handle_, iter->key());
-    } else {
-      if (mscores) mscores->emplace_back(MemberScore{score_key.ToString(), score});
-    }
-    if (size) *size += 1;
-    if (spec.count > 0 && mscores && mscores->size() >= static_cast<unsigned>(spec.count)) break;
-  }
-  delete iter;
-
-  if (spec.removed && *size > 0) {
-    metadata.size -= *size;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-    return storage_->Write(rocksdb::WriteOptions(), &batch);
-  }
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status ZSet::RangeByLex(const Slice &user_key,
-                                 ZRangeLexSpec spec,
-                                 std::vector<std::string> *members,
-                                 int *size) {
-  if (size) *size = 0;
-  if (members) members->clear();
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  ZSetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound() ? rocksdb::Status::OK() : s;
-
-  std::string start_key, prefix_key;
-  InternalKey(ns_key, spec.min, metadata.version).Encode(&start_key);
-  InternalKey(ns_key, "", metadata.version).Encode(&prefix_key);
-
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  read_options.fill_cache = false;
-
-  int pos = 0;
-  auto iter = db_->NewIterator(read_options);
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisZSet);
-  batch.PutLogData(log_data.Encode());
-  for (iter->Seek(start_key);
-       iter->Valid() && iter->key().starts_with(prefix_key);
-       iter->Next()) {
-    InternalKey ikey(iter->key());
-    Slice member = ikey.GetSubKey();
-    if (spec.minex && member == spec.min) continue;  // the min score was exclusive
-    if ((spec.maxex && member == spec.max) || (!spec.max_infinite && member.ToString() > spec.max)) break;
-    if (spec.offset >= 0 && pos++ < spec.offset) continue;
-    if (spec.removed) {
-      std::string score_bytes = iter->value().ToString();
-      score_bytes.append(member.ToString());
-      std::string score_key;
-      InternalKey(ns_key, score_bytes, metadata.version).Encode(&score_key);
-      batch.Delete(score_cf_handle_, score_key);
-      batch.Delete(iter->key());
-    } else {
-      if (members) members->emplace_back(member.ToString());
-    }
-    if (size) *size += 1;
-    if (spec.count > 0 && members && members->size() >= static_cast<unsigned>(spec.count)) break;
-  }
-  delete iter;
-
-  if (spec.removed && *size > 0) {
-    metadata.size -= *size;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-    return storage_->Write(rocksdb::WriteOptions(), &batch);
-  }
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status ZSet::Score(const Slice &user_key, const Slice &member, double *score) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-  ZSetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s;
-
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-
-  std::string member_key, score_bytes;
-  InternalKey(ns_key, member, metadata.version).Encode(&member_key);
-  s = db_->Get(read_options, member_key, &score_bytes);
-  if (!s.ok()) return s;
-  *score = DecodeDouble(score_bytes.data());
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status ZSet::Remove(const Slice &user_key, const std::vector<Slice> &members, int *ret) {
-  *ret = 0;
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  ZSetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound()? rocksdb::Status::OK():s;
-
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisZSet);
-  batch.PutLogData(log_data.Encode());
-  int removed = 0;
-  std::string member_key, score_key;
-  for (const auto &member : members) {
-    InternalKey(ns_key, member, metadata.version).Encode(&member_key);
-    std::string score_bytes;
-    s = db_->Get(rocksdb::ReadOptions(), member_key, &score_bytes);
-    if (s.ok()) {
-      score_bytes.append(member.ToString());
-      InternalKey(ns_key, score_bytes, metadata.version).Encode(&score_key);
-      batch.Delete(member_key);
-      batch.Delete(score_cf_handle_, score_key);
-      removed++;
-    }
-  }
-  if (removed > 0) {
-    *ret = removed;
-    metadata.size -= removed;
-    std::string bytes;
-    metadata.Encode(&bytes);
-    batch.Put(metadata_cf_handle_, ns_key, bytes);
-  }
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status ZSet::RemoveRangeByScore(const Slice &user_key, ZRangeSpec spec, int *ret) {
-  spec.removed = true;
-  return RangeByScore(user_key, spec, nullptr, ret);
-}
-
-rocksdb::Status ZSet::RemoveRangeByLex(const Slice &user_key, ZRangeLexSpec spec, int *ret) {
-  spec.removed = true;
-  return RangeByLex(user_key, spec, nullptr, ret);
-}
-
-rocksdb::Status ZSet::RemoveRangeByRank(const Slice &user_key, int start, int stop, int *ret) {
-  uint8_t flags = ZSET_REMOVED;
-  std::vector<MemberScore> mscores;
-  rocksdb::Status s = Range(user_key, start, stop, flags, &mscores);
-  *ret = static_cast<int>(mscores.size());
-  return s;
-}
-
-rocksdb::Status ZSet::Rank(const Slice &user_key, const Slice &member, bool reversed, int *ret) {
-  *ret = -1;
-
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-  ZSetMetadata metadata;
-  rocksdb::Status s = GetMetadata(ns_key, &metadata);
-  if (!s.ok()) return s.IsNotFound()? rocksdb::Status::OK():s;
-
-  rocksdb::ReadOptions read_options;
-  LatestSnapShot ss(db_);
-  read_options.snapshot = ss.GetSnapShot();
-  std::string score_bytes, member_key;
-  InternalKey(ns_key, member, metadata.version).Encode(&member_key);
-  s = db_->Get(rocksdb::ReadOptions(), member_key, &score_bytes);
-  if (!s.ok()) return s.IsNotFound()? rocksdb::Status::OK():s;
-
-  double target_score = DecodeDouble(score_bytes.data());
-  std::string start_score_bytes, start_key, prefix_key;
-  double start_score = !reversed ? std::numeric_limits<double>::lowest():std::numeric_limits<double>::max();
-  PutDouble(&start_score_bytes, start_score);
-  InternalKey(ns_key, start_score_bytes, metadata.version).Encode(&start_key);
-  InternalKey(ns_key, "", metadata.version).Encode(&prefix_key);
-
-  int rank = 0;
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options, score_cf_handle_);
-  iter->Seek(start_key);
-  // see comment in rangebyscore()
-  if (reversed && (!iter->Valid() || !iter->key().starts_with(prefix_key))) {
-    iter->SeekForPrev(start_key);
-  }
-  for (;
-      iter->Valid() && iter->key().starts_with(prefix_key);
-      !reversed ? iter->Next() : iter->Prev()) {
-    InternalKey ikey(iter->key());
-    Slice score_key = ikey.GetSubKey();
-    double score;
-    GetDouble(&score_key, &score);
-    if (score == target_score && score_key == member) break;
-    rank++;
-  }
-  delete iter;
-
-  *ret = rank;
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status ZSet::Overwrite(const Slice &user_key, const std::vector<MemberScore> &mscores) {
-  std::string ns_key;
-  AppendNamespacePrefix(user_key, &ns_key);
-
-  LockGuard guard(storage_->GetLockManager(), ns_key);
-  ZSetMetadata metadata;
-  rocksdb::WriteBatch batch;
-  WriteBatchLogData log_data(kRedisZSet);
-  batch.PutLogData(log_data.Encode());
-  for (const auto &ms : mscores) {
-    std::string member_key, score_bytes, score_key;
-    InternalKey(ns_key, ms.member, metadata.version).Encode(&member_key);
-    PutDouble(&score_bytes, ms.score);
-    batch.Put(member_key, score_bytes);
-    score_bytes.append(ms.member);
-    InternalKey(ns_key, score_bytes, metadata.version).Encode(&score_key);
-    batch.Put(score_cf_handle_, score_key, Slice());
-  }
-  metadata.size = static_cast<uint32_t>(mscores.size());
-  std::string bytes;
-  metadata.Encode(&bytes);
-  batch.Put(metadata_cf_handle_, ns_key, bytes);
-  return storage_->Write(rocksdb::WriteOptions(), &batch);
-}
-
-rocksdb::Status ZSet::InterStore(const Slice &dst,
-                                 const std::vector<KeyWeight> &keys_weights,
-                                 AggregateMethod aggregate_method,
-                                 int *size) {
-  if (size) *size = 0;
-
-  std::map<std::string, double> dst_zset;
-  std::map<std::string, size_t> member_counters;
-  std::vector<MemberScore> target_mscores;
-  int target_size;
-  ZRangeSpec spec;
-  auto s = RangeByScore(keys_weights[0].key, spec, &target_mscores, &target_size);
-  if (!s.ok() || target_mscores.empty()) return s;
-  for (const auto &ms : target_mscores) {
-    double score = ms.score * keys_weights[0].weight;
-    dst_zset[ms.member] = score;
-    member_counters[ms.member] = 1;
-  }
-  for (size_t i = 1; i < keys_weights.size(); i++) {
-    auto s = RangeByScore(keys_weights[i].key, spec, &target_mscores, &target_size);
-    if (!s.ok() || target_mscores.empty()) return s;
-    for (const auto &ms : target_mscores) {
-      if (dst_zset.find(ms.member) == dst_zset.end()) continue;
-      member_counters[ms.member]++;
-      double score = ms.score * keys_weights[i].weight;
-      switch (aggregate_method) {
-        case kAggregateSum:dst_zset[ms.member] += score;
-          break;
-        case kAggregateMin:
-          if (dst_zset[ms.member] > score) {
-            dst_zset[ms.member] = score;
-          }
-          break;
-        case kAggregateMax:
-          if (dst_zset[ms.member] < score) {
-            dst_zset[ms.member] = score;
-          }
-          break;
-      }
-    }
-  }
-  if (!dst_zset.empty()) {
-    std::vector<MemberScore> mscores;
-    for (const auto &iter : dst_zset) {
-      if (member_counters[iter.first] != keys_weights.size()) continue;
-      mscores.emplace_back(MemberScore{iter.first, iter.second});
-    }
-    if (size) *size = mscores.size();
-    Overwrite(dst, mscores);
-  }
-
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status ZSet::UnionStore(const Slice &dst,
-                                 const std::vector<KeyWeight> &keys_weights,
-                                 AggregateMethod aggregate_method,
-                                 int *size) {
-  if (size) *size = 0;
-
-  std::map<std::string, double> dst_zset;
-  std::vector<MemberScore> target_mscores;
-  int target_size;
-  ZRangeSpec spec;
-  for (const auto &key_weight : keys_weights) {
-    // get all member
-    auto s = RangeByScore(key_weight.key, spec, &target_mscores, &target_size);
-    if (!s.ok() && !s.IsNotFound()) return s;
-    for (const auto &ms : target_mscores) {
-      double score = ms.score * key_weight.weight;
-      if (dst_zset.find(ms.member) == dst_zset.end()) {
-        dst_zset[ms.member] = score;
-      } else {
-        switch (aggregate_method) {
-          case kAggregateSum:dst_zset[ms.member] += score;
-            break;
-          case kAggregateMin:
-            if (dst_zset[ms.member] > score) {
-              dst_zset[ms.member] = score;
-            }
-            break;
-          case kAggregateMax:
-            if (dst_zset[ms.member] < score) {
-              dst_zset[ms.member] = score;
-            }
-            break;
-        }
-      }
-    }
-  }
-  if (!dst_zset.empty()) {
-    std::vector<MemberScore> mscores;
-    for (const auto &iter : dst_zset) {
-      mscores.emplace_back(MemberScore{iter.first, iter.second});
-    }
-    if (size) *size = mscores.size();
-    Overwrite(dst, mscores);
-  }
-
-  return rocksdb::Status::OK();
-}
-
-Status ZSet::ParseRangeSpec(const std::string &min, const std::string &max, ZRangeSpec *spec) {
-  const char *sptr = nullptr;
-  char *eptr = nullptr;
-
-  if (min == "+inf" ||  max == "-inf") {
-    return Status(Status::NotOK, "min > max");
-  }
-
-  if (min == "-inf") {
-    spec->min = std::numeric_limits<double>::lowest();
-  } else {
-    sptr = min.data();
-    if (!min.empty() && min[0] == '(') {
-      spec->minex = true;
-      sptr++;
-    }
-    spec->min = strtod(sptr, &eptr);
-    if ((eptr && eptr[0] != '\0') || isnan(spec->min)) {
-      return Status(Status::NotOK, "the min isn't double");
-    }
-  }
-
-  if (max == "+inf") {
-    spec->max = std::numeric_limits<double>::max();
-  } else {
-    sptr = max.data();
-    if (!max.empty() && max[0] == '(') {
-      spec->maxex = true;
-      sptr++;
-    }
-    spec->max = strtod(sptr, &eptr);
-    if ((eptr && eptr[0] != '\0') || isnan(spec->max)) {
-      return Status(Status::NotOK, "the max isn't double");
-    }
-  }
-  return Status::OK();
-}
-
-Status ZSet::ParseRangeLexSpec(const std::string &min, const std::string &max, ZRangeLexSpec *spec) {
-  if (min == "+" || max == "-") {
-    return Status(Status::NotOK, "min > max");
-  }
-
-  if (min == "-") {
-    spec->min = "";
-  } else {
-    if (min[0] == '(') {
-      spec->minex = true;
-    } else if (min[0] == '[') {
-      spec->minex = false;
-    } else {
-      return Status(Status::NotOK, "the min is illegal");
-    }
-    spec->min = min.substr(1);
-  }
-
-  if (max == "+") {
-    spec->max_infinite = true;
-  } else {
-    if (max[0] == '(') {
-      spec->maxex = true;
-    } else if (max[0] == '[') {
-      spec->maxex = false;
-    } else {
-      return Status(Status::NotOK, "the max is illegal");
-    }
-    spec->max = max.substr(1);
-  }
-  return Status::OK();
-}
-
-rocksdb::Status ZSet::Scan(const Slice &user_key,
-                                const std::string &cursor,
-                                uint64_t limit,
-                                const std::string &member_prefix,
-                                std::vector<std::string> *members) {
-  return SubKeyScanner::Scan(kRedisZSet, user_key, cursor, limit, member_prefix, members);
-}
-
-}  // namespace Redis
diff --git a/src/redis_zset.h b/src/redis_zset.h
deleted file mode 100644
index a8a8cc3..0000000
--- a/src/redis_zset.h
+++ /dev/null
@@ -1,104 +0,0 @@
-#pragma once
-
-#include <string>
-#include <vector>
-#include <limits>
-
-#include "redis_db.h"
-#include "redis_metadata.h"
-
-enum AggregateMethod {
-  kAggregateSum,
-  kAggregateMin,
-  kAggregateMax
-};
-
-typedef struct ZRangeSpec {
-  double min, max;
-  bool minex, maxex; /* are min or max exclusive */
-  int offset, count;
-  bool removed, reversed;
-  ZRangeSpec() {
-    min = std::numeric_limits<double>::lowest();
-    max = std::numeric_limits<double>::max();
-    minex = maxex = false;
-    offset = -1; count = -1;
-    removed = reversed = false;
-  }
-} ZRangeSpec;
-
-typedef struct ZRangeLexSpec {
-  std::string min, max;
-  bool minex, maxex; /* are min or max exclusive */
-  bool max_infinite; /* are max infinite */
-  int offset, count;
-  bool removed;
-  ZRangeLexSpec() {
-    minex = maxex = false;
-    max_infinite = false;
-    offset = -1;
-    count = -1;
-    removed = false;
-  }
-} ZRangeLexSpec;
-
-typedef struct KeyWeight {
-  std::string key;
-  double weight;
-} KeyWeight;
-
-typedef struct {
-  std::string member;
-  double score;
-} MemberScore;
-
-#define ZSET_INCR 1
-#define ZSET_NX (1<<1)
-#define ZSET_XX (1<<2)
-#define ZSET_REVERSED (1<<3)
-#define ZSET_REMOVED 1<<4
-
-namespace Redis {
-
-class ZSet : public SubKeyScanner {
- public:
-  explicit ZSet(Engine::Storage *storage, const std::string &ns) :
-      SubKeyScanner(storage, ns),
-      score_cf_handle_(storage->GetCFHandle("zset_score")) {}
-  rocksdb::Status Add(const Slice &user_key, uint8_t flags, std::vector<MemberScore> *mscores, int *ret);
-  rocksdb::Status Card(const Slice &user_key, int *ret);
-  rocksdb::Status Count(const Slice &user_key, const ZRangeSpec &spec, int *ret);
-  rocksdb::Status IncrBy(const Slice &user_key, const Slice &member, double increment, double *score);
-  rocksdb::Status Range(const Slice &user_key, int start, int stop, uint8_t flags, std::vector<MemberScore> *mscores);
-  rocksdb::Status RangeByScore(const Slice &user_key, ZRangeSpec spec, std::vector<MemberScore> *mscores, int *size);
-  rocksdb::Status RangeByLex(const Slice &user_key, ZRangeLexSpec spec, std::vector<std::string> *members, int *size);
-  rocksdb::Status Rank(const Slice &user_key, const Slice &member, bool reversed, int *ret);
-  rocksdb::Status Remove(const Slice &user_key, const std::vector<Slice> &members, int *ret);
-  rocksdb::Status RemoveRangeByScore(const Slice &user_key, ZRangeSpec spec, int *ret);
-  rocksdb::Status RemoveRangeByLex(const Slice &user_key, ZRangeLexSpec spec, int *ret);
-  rocksdb::Status RemoveRangeByRank(const Slice &user_key, int start, int stop, int *ret);
-  rocksdb::Status Pop(const Slice &user_key, int count, bool min, std::vector<MemberScore> *mscores);
-  rocksdb::Status Score(const Slice &user_key, const Slice &member, double *score);
-  static Status ParseRangeSpec(const std::string &min, const std::string &max, ZRangeSpec *spec);
-  static Status ParseRangeLexSpec(const std::string &min, const std::string &max, ZRangeLexSpec *spec);
-  rocksdb::Status Scan(const Slice &user_key,
-                       const std::string &cursor,
-                       uint64_t limit,
-                       const std::string &member_prefix,
-                       std::vector<std::string> *members);
-  rocksdb::Status Overwrite(const Slice &user_key, const std::vector<MemberScore> &mscores);
-  rocksdb::Status InterStore(const Slice &dst,
-                             const std::vector<KeyWeight> &keys_weights,
-                             AggregateMethod aggregate_method,
-                             int *size);
-  rocksdb::Status UnionStore(const Slice &dst,
-                             const std::vector<KeyWeight> &keys_weights,
-                             AggregateMethod aggregate_method,
-                             int *size);
-
- private:
-  rocksdb::ColumnFamilyHandle *score_cf_handle_;
-  rocksdb::Status GetMetadata(const Slice &ns_key, ZSetMetadata *metadata);
-};
-
-}  // namespace Redis
diff --git a/src/replication.cc b/src/replication.cc
deleted file mode 100644
index 64f7f57..0000000
--- a/src/replication.cc
+++ /dev/null
@@ -1,802 +0,0 @@
-#include "replication.h"
-
-#include <signal.h>
-#include <arpa/inet.h>
-#include <event2/buffer.h>
-#include <event2/bufferevent.h>
-#include <event2/event.h>
-#include <glog/logging.h>
-#include <netinet/tcp.h>
-#include <future>
-#include <string>
-#include <thread>
-
-#include "redis_reply.h"
-#include "rocksdb_crc32c.h"
-#include "util.h"
-#include "status.h"
-#include "server.h"
-
-FeedSlaveThread::~FeedSlaveThread() {
-  delete conn_;
-}
-
-Status FeedSlaveThread::Start() {
-  try {
-    t_ = std::thread([this]() {
-      Util::ThreadSetName("feed-slave-thread");
-      sigset_t mask, omask;
-      sigemptyset(&mask);
-      sigemptyset(&omask);
-      sigaddset(&mask, SIGCHLD);
-      sigaddset(&mask, SIGHUP);
-      sigaddset(&mask, SIGPIPE);
-      pthread_sigmask(SIG_BLOCK, &mask, &omask);
-      // force feed slave thread was scheduled after making the fd blocking,
-      // and write "+OK\r\n" response to psync command
-      usleep(10000);
-      this->loop();
-    });
-  } catch (const std::system_error &e) {
-    conn_ = nullptr;  // prevent connection was freed when failed to start the thread
-    return Status(Status::NotOK, e.what());
-  }
-  return Status::OK();
-}
-
-void FeedSlaveThread::Stop() {
-  stop_ = true;
-  LOG(WARNING) << "Slave thread was terminated, would stop feeding the slave: " << conn_->GetAddr();
-}
-
-void FeedSlaveThread::Join() {
-  if (t_.joinable()) t_.join();
-}
-
-void FeedSlaveThread::checkLivenessIfNeed() {
-  if (++interval % 1000) return;
-  const auto ping_command = Redis::BulkString("ping");
-  auto s = Util::SockSend(conn_->GetFD(), ping_command);
-  if (!s.IsOK()) {
-    LOG(ERROR) << "Ping slave[" << conn_->GetAddr() << "] err: " << s.Msg()
-               << ", would stop the thread";
-    Stop();
-  }
-}
-
-void FeedSlaveThread::loop() {
-  uint32_t yield_milliseconds = 2000;
-  std::vector<std::string> batch_list;
-  while (!IsStopped()) {
-    if (!iter_ || !iter_->Valid()) {
-      if (iter_) LOG(INFO) << "WAL was rotated, would reopen again";
-      if (!srv_->storage_->WALHasNewData(next_repl_seq_)
-          || !srv_->storage_->GetWALIter(next_repl_seq_, &iter_).IsOK()) {
-        iter_ = nullptr;
-        usleep(yield_milliseconds);
-        checkLivenessIfNeed();
-        continue;
-      }
-    }
-    // iter_ would be always valid here
-    auto batch = iter_->GetBatch();
-    auto data = batch.writeBatchPtr->Data();
-    batch_list.emplace_back(Redis::BulkString(data));
-    // feed the bulks data to slave in batch mode iff the lag was far from the master
-    auto latest_seq = srv_->storage_->LatestSeq();
-    if (latest_seq - batch.sequence <= 20 || batch_list.size() >= 20) {
-      for (const auto &bulk_str : batch_list) {
-        auto s = Util::SockSend(conn_->GetFD(), bulk_str);
-        if (!s.IsOK()) {
-          LOG(ERROR) << "Write error while sending batch to slave: " << s.Msg();
-          Stop();
-          return;
-        }
-      }
-      batch_list.clear();
-    }
-    if (batch.sequence != next_repl_seq_) {
-      LOG(ERROR) << "Fatal error encountered, WAL iterator is discrete, some seq might be lost";
-      Stop();
-      return;
-    }
-    next_repl_seq_ = batch.sequence + batch.writeBatchPtr->Count();
-    while (!IsStopped() && !srv_->storage_->WALHasNewData(next_repl_seq_)) {
-      usleep(yield_milliseconds);
-      checkLivenessIfNeed();
-    }
-    iter_->Next();
-  }
-}
-
-void send_string(bufferevent *bev, const std::string &data) {
-  auto output = bufferevent_get_output(bev);
-  evbuffer_add(output, data.c_str(), data.length());
-}
-
-void ReplicationThread::CallbacksStateMachine::ConnEventCB(
-    bufferevent *bev, int16_t events, void *state_machine_ptr) {
-  if (events & BEV_EVENT_CONNECTED) {
-    // call write_cb when connected
-    bufferevent_data_cb write_cb;
-    bufferevent_getcb(bev, nullptr, &write_cb, nullptr, nullptr);
-    if (write_cb) write_cb(bev, state_machine_ptr);
-    return;
-  }
-  if (events & (BEV_EVENT_ERROR | BEV_EVENT_EOF)) {
-    LOG(ERROR) << "[replication] connection error/eof, reconnect the master";
-    // Wait a bit and reconnect
-    auto state_m = static_cast<CallbacksStateMachine *>(state_machine_ptr);
-    state_m->repl_->repl_state_ = kReplConnecting;
-    std::this_thread::sleep_for(std::chrono::seconds(1));
-    state_m->Stop();
-    state_m->Start();
-  }
-}
-
-void ReplicationThread::CallbacksStateMachine::SetReadCB(
-    bufferevent *bev, bufferevent_data_cb cb, void *state_machine_ptr) {
-  bufferevent_enable(bev, EV_READ);
-  bufferevent_setcb(bev, cb, nullptr, ConnEventCB, state_machine_ptr);
-}
-
-void ReplicationThread::CallbacksStateMachine::SetWriteCB(
-    bufferevent *bev, bufferevent_data_cb cb, void *state_machine_ptr) {
-  bufferevent_enable(bev, EV_WRITE);
-  bufferevent_setcb(bev, nullptr, cb, ConnEventCB, state_machine_ptr);
-}
-
-ReplicationThread::CallbacksStateMachine::CallbacksStateMachine(
-    ReplicationThread *repl,
-    ReplicationThread::CallbacksStateMachine::CallbackList &&handlers)
-    : repl_(repl), handlers_(std::move(handlers)) {
-  if (!repl_->auth_.empty()) {
-    handlers_.emplace_front(CallbacksStateMachine::READ, "auth read", authReadCB);
-    handlers_.emplace_front(CallbacksStateMachine::WRITE, "auth write", authWriteCB);
-  }
-}
-
-void ReplicationThread::CallbacksStateMachine::EvCallback(bufferevent *bev,
-                                                          void *ctx) {
-  auto self = static_cast<CallbacksStateMachine *>(ctx);
-LOOP_LABEL:
-  assert(self->handler_idx_ <= self->handlers_.size());
-  DLOG(INFO) << "[replication] Execute handler[" << self->getHandlerName(self->handler_idx_) << "]";
-  auto st = self->getHandlerFunc(self->handler_idx_)(bev, self->repl_);
-  time(&self->repl_->last_io_time_);
-  switch (st) {
-    case CBState::NEXT:
-      ++self->handler_idx_;
-      if (self->getHandlerEventType(self->handler_idx_) == WRITE) {
-        SetWriteCB(bev, EvCallback, ctx);
-      } else {
-        SetReadCB(bev, EvCallback, ctx);
-      }
-      // invoke the read handler (of next step) directly, as the bev might
-      // have the data already.
-      goto LOOP_LABEL;
-    case CBState::AGAIN:
-      break;
-    case CBState::QUIT:  // state that can not be retry, or all steps are executed.
-      bufferevent_free(bev);
-      self->bev_ = nullptr;
-      self->repl_->repl_state_ = kReplError;
-      break;
-    case CBState::RESTART:  // state that can be retried some time later
-      self->Stop();
-      if (self->repl_->stop_flag_) {
-        LOG(INFO) << "[replication] Wouldn't restart while the replication thread was stopped";
-        break;
-      }
-      LOG(INFO) << "[replication] Retry in 10 seconds";
-      std::this_thread::sleep_for(std::chrono::seconds(10));
-      self->Start();
-  }
-}
-
-void ReplicationThread::CallbacksStateMachine::Start() {
-  if (handlers_.empty()) {
-    return;
-  }
-  auto sockaddr_inet = Util::NewSockaddrInet(repl_->host_, repl_->port_);
-  auto bev = bufferevent_socket_new(repl_->base_, -1, BEV_OPT_CLOSE_ON_FREE);
-  if (bufferevent_socket_connect(bev,
-                                 reinterpret_cast<sockaddr *>(&sockaddr_inet),
-                                 sizeof(sockaddr_inet)) != 0) {
-    // NOTE: Connection error will not appear here, network err will be reported
-    // in ConnEventCB. the error here is something fatal.
-    LOG(ERROR) << "[replication] Failed to start state machine, err: " << strerror(errno);
-  }
-  handler_idx_ = 0;
-  if (getHandlerEventType(0) == WRITE) {
-    SetWriteCB(bev, EvCallback, this);
-  } else {
-    SetReadCB(bev, EvCallback, this);
-  }
-  bev_ = bev;
-}
-
-void ReplicationThread::CallbacksStateMachine::Stop() {
-  if (bev_) {
-    bufferevent_free(bev_);
-    bev_ = nullptr;
-  }
-}
-
-ReplicationThread::ReplicationThread(std::string host, uint32_t port,
-                                     Server *srv, std::string auth)
-    : host_(std::move(host)),
-      port_(port),
-      auth_(std::move(auth)),
-      srv_(srv),
-      storage_(srv->storage_),
-      repl_state_(kReplConnecting),
-      psync_steps_(this,
-                   CallbacksStateMachine::CallbackList{
-                       CallbacksStateMachine::CallbackType{
-                           CallbacksStateMachine::WRITE, "dbname write", checkDBNameWriteCB
-                       },
-                       CallbacksStateMachine::CallbackType{
-                           CallbacksStateMachine::READ, "dbname read", checkDBNameReadCB
-                       },
-                       CallbacksStateMachine::CallbackType{
-                           CallbacksStateMachine::WRITE, "replconf write", replConfWriteCB
-                       },
-                       CallbacksStateMachine::CallbackType{
-                           CallbacksStateMachine::READ, "replconf read", replConfReadCB
-                       },
-                       CallbacksStateMachine::CallbackType{
-                           CallbacksStateMachine::WRITE, "psync write", tryPSyncWriteCB
-                       },
-                       CallbacksStateMachine::CallbackType{
-                           CallbacksStateMachine::READ, "psync read", tryPSyncReadCB
-                       },
-                       CallbacksStateMachine::CallbackType{
-                           CallbacksStateMachine::READ, "batch loop", incrementBatchLoopCB
-                       }
-                   }),
-      fullsync_steps_(this,
-                      CallbacksStateMachine::CallbackList{
-                          CallbacksStateMachine::CallbackType{
-                              CallbacksStateMachine::WRITE, "fullsync write", fullSyncWriteCB
-                          },
-                          CallbacksStateMachine::CallbackType{
-                              CallbacksStateMachine::READ, "fullsync read", fullSyncReadCB}
-                      }) {
-}
-
-Status ReplicationThread::Start(std::function<void()> &&pre_fullsync_cb,
-                                std::function<void()> &&post_fullsync_cb) {
-  pre_fullsync_cb_ = std::move(pre_fullsync_cb);
-  post_fullsync_cb_ = std::move(post_fullsync_cb);
-
-  // Remove the backup_dir, so we can start replication in a clean state
-  if (!Engine::Storage::BackupManager::PurgeBackup(storage_).IsOK()) {
-    return Status(Status::NotOK, "can't delete the existed backup dir");
-  }
-
-  try {
-    t_ = std::thread([this]() {
-      Util::ThreadSetName("master-repl");
-      this->run();
-      assert(stop_flag_);
-    });
-  } catch (const std::system_error &e) {
-    return Status(Status::NotOK, e.what());
-  }
-  return Status::OK();
-}
-
-void ReplicationThread::Stop() {
-  if (stop_flag_) return;
-
-  stop_flag_ = true;  // Stopping procedure is asynchronous,
-                      // handled by timer
-  t_.join();
-  LOG(INFO) << "[replication] Stopped";
-}
-
-/*
- * Run connect to master, and start the following steps
- * asynchronously
- *  - CheckDBName
- *  - TryPsync
- *  - - if ok, IncrementBatchLoop
- *  - - not, FullSync and restart TryPsync when done
- */
-void ReplicationThread::run() {
-  base_ = event_base_new();
-  if (base_ == nullptr) {
-    LOG(ERROR) << "[replication] Failed to create new ev base";
-    return;
-  }
-  psync_steps_.Start();
-
-  auto timer = event_new(base_, -1, EV_PERSIST, EventTimerCB, this);
-  timeval tmo{0, 100000};  // 100 ms
-  evtimer_add(timer, &tmo);
-
-  event_base_dispatch(base_);
-  event_free(timer);
-  event_base_free(base_);
-}
-
-ReplicationThread::CBState ReplicationThread::authWriteCB(bufferevent *bev,
-                                                          void *ctx) {
-  auto self = static_cast<ReplicationThread *>(ctx);
-  const auto auth_len_str = std::to_string(self->auth_.length());
-  send_string(bev, Redis::MultiBulkString({"AUTH", self->auth_}));
-  LOG(INFO) << "[replication] Auth request was sent, waiting for response";
-  self->repl_state_ = kReplSendAuth;
-  return CBState::NEXT;
-}
-
-ReplicationThread::CBState ReplicationThread::authReadCB(bufferevent *bev,
-                                                         void *ctx) {
-  char *line;
-  size_t line_len;
-  auto input = bufferevent_get_input(bev);
-  line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
-  if (!line) return CBState::AGAIN;
-  if (strncmp(line, "+OK", 3) != 0) {
-    // Auth failed
-    LOG(ERROR) << "[replication] Auth failed: " << line;
-    free(line);
-    auto self = static_cast<ReplicationThread *>(ctx);
-    self->srv_->ResetMaster();
-    return CBState::QUIT;
-  }
-  free(line);
-  LOG(INFO) << "[replication] Auth response was received, continue...";
-  return CBState::NEXT;
-}
-
-ReplicationThread::CBState ReplicationThread::checkDBNameWriteCB(
-    bufferevent *bev, void *ctx) {
-  send_string(bev, Redis::MultiBulkString({"_db_name"}));
-  auto self = static_cast<ReplicationThread *>(ctx);
-  self->repl_state_ = kReplCheckDBName;
-  LOG(INFO) << "[replication] Check db name request was sent, waiting for response";
-  return CBState::NEXT;
-}
-
-ReplicationThread::CBState ReplicationThread::checkDBNameReadCB(
-    bufferevent *bev, void *ctx) {
-  char *line;
-  size_t line_len;
-  auto input = bufferevent_get_input(bev);
-  line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
-  if (!line) return CBState::AGAIN;
-
-  if (line[0] == '-' && isRestoringError(line)) {
-    free(line);
-    LOG(WARNING) << "The master was restoring the db, retry later";
-    return CBState::RESTART;
-  }
-  auto self = static_cast<ReplicationThread *>(ctx);
-  std::string db_name = self->storage_->GetName();
-  if (line_len == db_name.size() && !strncmp(line, db_name.data(), line_len)) {
-    // DB name match, we should continue to next step: TryPsync
-    free(line);
-    LOG(INFO) << "[replication] DB name is valid, continue...";
-    return CBState::NEXT;
-  }
-  LOG(ERROR) << "[replication] db-name mismatched, remote db name: " << line;
-  free(line);
-  self->srv_->ResetMaster();
-  return CBState::QUIT;
-}
-
-ReplicationThread::CBState ReplicationThread::replConfWriteCB(
-    bufferevent *bev, void *ctx) {
-  auto self = static_cast<ReplicationThread *>(ctx);
-  send_string(bev,
-              Redis::MultiBulkString({"replconf", "listening-port", std::to_string(self->srv_->GetConfig()->port)}));
-  self->repl_state_ = kReplReplConf;
-  LOG(INFO) << "[replication] replconf request was sent, waiting for response";
-  return CBState::NEXT;
-}
-
-ReplicationThread::CBState ReplicationThread::replConfReadCB(
-    bufferevent *bev, void *ctx) {
-  char *line;
-  size_t line_len;
-  auto input = bufferevent_get_input(bev);
-  line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
-  if (!line) return CBState::AGAIN;
-
-  if (line[0] == '-' && isRestoringError(line)) {
-    free(line);
-    LOG(WARNING) << "The master was restoring the db, retry later";
-    return CBState::RESTART;
-  }
-  if (strncmp(line, "+OK", 3) != 0) {
-    LOG(WARNING) << "[replication] Failed to replconf: " << line;
-    free(line);
-    //  backward compatible with old version that doesn't support replconf cmd
-    return CBState::NEXT;
-  } else {
-    free(line);
-    LOG(INFO) << "[replication] replconf is ok, start psync";
-    return CBState::NEXT;
-  }
-}
-
-ReplicationThread::CBState ReplicationThread::tryPSyncWriteCB(
-    bufferevent *bev, void *ctx) {
-  auto self = static_cast<ReplicationThread *>(ctx);
-  auto next_seq = self->storage_->LatestSeq() + 1;
-  send_string(bev, Redis::MultiBulkString({"PSYNC", std::to_string(next_seq)}));
-  self->repl_state_ = kReplSendPSync;
-  LOG(INFO) << "[replication] Try to use psync, next seq: " << next_seq;
-  return CBState::NEXT;
-}
-
-ReplicationThread::CBState ReplicationThread::tryPSyncReadCB(bufferevent *bev,
-                                                             void *ctx) {
-  char *line;
-  size_t line_len;
-  auto self = static_cast<ReplicationThread *>(ctx);
-  auto input = bufferevent_get_input(bev);
-  line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
-  if (!line) return CBState::AGAIN;
-
-  if (line[0] == '-' && isRestoringError(line)) {
-    free(line);
-    LOG(WARNING) << "The master was restoring the db, retry later";
-    return CBState::RESTART;
-  }
-  if (strncmp(line, "+OK", 3) != 0) {
-    // PSYNC isn't OK, we should use FullSync
-    // Switch to fullsync state machine
-    self->fullsync_steps_.Start();
-    LOG(INFO) << "[replication] Failed to psync, switch to fullsync";
-    free(line);
-    return CBState::QUIT;
-  } else {
-    // PSYNC is OK, use IncrementBatchLoop
-    free(line);
-    LOG(INFO) << "[replication] PSync is ok, start increment batch loop";
-    return CBState::NEXT;
-  }
-}
-
-ReplicationThread::CBState ReplicationThread::incrementBatchLoopCB(
-    bufferevent *bev, void *ctx) {
-  char *line = nullptr;
-  size_t line_len = 0;
-  char *bulk_data = nullptr;
-  auto self = static_cast<ReplicationThread *>(ctx);
-  self->repl_state_ = kReplConnected;
-  auto input = bufferevent_get_input(bev);
-  while (true) {
-    switch (self->incr_state_) {
-      case Incr_batch_size:
-        // Read bulk length
-        line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
-        if (!line) return CBState::AGAIN;
-        self->incr_bulk_len_ = line_len > 0 ? std::strtoull(line + 1, nullptr, 10) : 0;
-        free(line);
-        if (self->incr_bulk_len_ == 0) {
-          LOG(ERROR) << "[replication] Invalid increment data size";
-          return CBState::RESTART;
-        }
-        self->incr_state_ = Incr_batch_data;
-        break;
-      case Incr_batch_data:
-        // Read bulk data (batch data)
-        if (self->incr_bulk_len_+2 <= evbuffer_get_length(input)) {  // We got enough data
-          bulk_data = reinterpret_cast<char *>(evbuffer_pullup(input, self->incr_bulk_len_ + 2));
-          std::string bulk_string = std::string(bulk_data, self->incr_bulk_len_);
-          // master would send the ping heartbeat packet to check whether the slave was alive or not,
-          // don't write ping to db here.
-          if (bulk_string != "ping") {
-            auto s = self->storage_->WriteBatch(std::string(bulk_data, self->incr_bulk_len_));
-            if (!s.IsOK()) {
-              LOG(ERROR) << "[replication] CRITICAL - Failed to write batch to local, err: " << s.Msg();
-              self->stop_flag_ = true;  // This is a very critical error, data might be corrupted
-              self->srv_->ResetMaster();
-              return CBState::QUIT;
-            }
-            self->ParseWriteBatch(bulk_string);
-          }
-          evbuffer_drain(input, self->incr_bulk_len_ + 2);
-          self->incr_state_ = Incr_batch_size;
-        } else {
-          return CBState::AGAIN;
-        }
-        break;
-    }
-  }
-}
-
-ReplicationThread::CBState ReplicationThread::fullSyncWriteCB(
-    bufferevent *bev, void *ctx) {
-  send_string(bev, Redis::MultiBulkString({"_fetch_meta"}));
-  auto self = static_cast<ReplicationThread *>(ctx);
-  self->repl_state_ = kReplFetchMeta;
-  LOG(INFO) << "[replication] Start syncing data with fullsync";
-  return CBState::NEXT;
-}
-
-ReplicationThread::CBState ReplicationThread::fullSyncReadCB(bufferevent *bev,
-                                                             void *ctx) {
-  char *line;
-  size_t line_len;
-  auto self = static_cast<ReplicationThread *>(ctx);
-  auto input = bufferevent_get_input(bev);
-  switch (self->fullsync_state_) {
-    case kFetchMetaID:
-      line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
-      if (!line) return CBState::AGAIN;
-      if (line[0] == '-') {
-        LOG(ERROR) << "[replication] Failed to fetch meta id: " << line;
-        free(line);
-        return CBState::RESTART;
-      }
-      self->fullsync_meta_id_ = static_cast<rocksdb::BackupID>(
-          line_len > 0 ? std::strtoul(line, nullptr, 10) : 0);
-      free(line);
-      if (self->fullsync_meta_id_ == 0) {
-        LOG(ERROR) << "[replication] Invalid meta id received";
-        return CBState::RESTART;
-      }
-      self->storage_->PurgeBackupIfNeed(self->fullsync_meta_id_);
-      self->fullsync_state_ = kFetchMetaSize;
-      LOG(INFO) << "[replication] Success to fetch meta id: " << self->fullsync_meta_id_;
-    case kFetchMetaSize:
-      line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
-      if (!line) return CBState::AGAIN;
-      if (line[0] == '-') {
-        LOG(ERROR) << "[replication] Failed to fetch meta size: " << line;
-        free(line);
-        return CBState::RESTART;
-      }
-      self->fullsync_filesize_ = line_len > 0 ? std::strtoull(line, nullptr, 10) : 0;
-      free(line);
-      if (self->fullsync_filesize_ == 0) {
-        LOG(ERROR) << "[replication] Invalid meta file size received";
-        return CBState::RESTART;
-      }
-      self->fullsync_state_ = kFetchMetaContent;
-      LOG(INFO) << "[replication] Success to fetch meta size: " << self->fullsync_filesize_;
-    case kFetchMetaContent:
-      if (evbuffer_get_length(input) < self->fullsync_filesize_) {
-        return CBState::AGAIN;
-      }
-      auto meta = Engine::Storage::BackupManager::ParseMetaAndSave(
-          self->storage_, self->fullsync_meta_id_, input);
-      assert(evbuffer_get_length(input) == 0);
-      self->fullsync_state_ = kFetchMetaID;
-
-      LOG(INFO) << "[replication] Succeeded fetching meta file, fetching files in parallel";
-      self->repl_state_ = kReplFetchSST;
-      auto s = self->parallelFetchFile(meta.files);
-      if (!s.IsOK()) {
-        LOG(ERROR) << "[replication] Failed to parallel fetch files while " + s.Msg();
-        return CBState::RESTART;
-      }
-      LOG(INFO) << "[replication] Succeeded fetching files in parallel, restoring the backup";
-
-      // Restore DB from backup
-      self->pre_fullsync_cb_();
-      s = self->storage_->RestoreFromBackup();
-      if (!s.IsOK()) {
-        LOG(ERROR) << "[replication] Failed to restore backup while " + s.Msg();
-        self->post_fullsync_cb_();
-        return CBState::RESTART;
-      }
-      LOG(INFO) << "[replication] Succeeded restoring the backup, fullsync was finish";
-      self->post_fullsync_cb_();
-
-      // Switch to psync state machine again
-      self->psync_steps_.Start();
-      return CBState::QUIT;
-  }
-
-  LOG(ERROR) << "Should not arrive here";
-  assert(false);
-  return CBState::QUIT;
-}
-
-Status ReplicationThread::parallelFetchFile(const std::vector<std::pair<std::string, uint32_t>> &files) {
-  size_t concurrency = 1;
-  if (files.size() > 20) {
-    // Use 4 threads to download files in parallel
-    concurrency = 4;
-  }
-  std::atomic<uint32_t> fetch_cnt = {0};
-  std::atomic<uint32_t> skip_cnt = {0};
-  std::vector<std::future<Status>> results;
-  for (size_t tid = 0; tid < concurrency; ++tid) {
-    results.push_back(std::async(
-        std::launch::async, [this, &files, tid, concurrency, &fetch_cnt, &skip_cnt]() -> Status {
-          if (this->stop_flag_) {
-            return Status(Status::NotOK, "replication thread was stopped");
-          }
-          int sock_fd;
-          Status s = Util::SockConnect(this->host_, this->port_, &sock_fd);
-          if (!s.IsOK()) {
-            return Status(Status::NotOK, "connect the server err: " + s.Msg());
-          }
-          s = this->sendAuth(sock_fd);
-          if (!s.IsOK()) {
-            close(sock_fd);
-            return Status(Status::NotOK, "sned the auth command err: " + s.Msg());
-          }
-          for (auto f_idx = tid; f_idx < files.size(); f_idx += concurrency) {
-            if (this->stop_flag_) {
-              return Status(Status::NotOK, "replication thread was stopped");
-            }
-            const auto &f_name = files[f_idx].first;
-            const auto &f_crc = files[f_idx].second;
-            // Don't fetch existing files
-            if (Engine::Storage::BackupManager::FileExists(this->storage_, f_name)) {
-              skip_cnt.fetch_add(1);
-              uint32_t cur_skip_cnt = skip_cnt.load();
-              uint32_t cur_fetch_cnt = fetch_cnt.load();
-              LOG(INFO) << "[skip] "<< f_name << " " << f_crc
-                        << ", skip count: " << cur_skip_cnt << ", fetch count: " << cur_fetch_cnt
-                        << ", progress: " << cur_skip_cnt+cur_fetch_cnt<< "/" << files.size();
-              continue;
-            }
-            fetch_cnt.fetch_add(1);
-            uint32_t cur_skip_cnt = skip_cnt.load();
-            uint32_t cur_fetch_cnt = fetch_cnt.load();
-            DLOG(INFO) << "[fetch] " << f_name << " " << f_crc
-                       << ", skip count: " << cur_skip_cnt << ", fetch count: " << cur_fetch_cnt
-                       << ", progress: " << cur_skip_cnt+cur_fetch_cnt<< "/" << files.size();
-            s = this->fetchFile(sock_fd, f_name, f_crc);
-            if (!s.IsOK()) {
-              close(sock_fd);
-              return Status(Status::NotOK, "fetch file err: " + s.Msg());
-            }
-          }
-          close(sock_fd);
-          return Status::OK();
-        }));
-  }
-
-  // Wait til finish
-  for (auto &f : results) {
-    Status s = f.get();
-    if (!s.IsOK()) return s;
-  }
-  return Status::OK();
-}
-
-Status ReplicationThread::sendAuth(int sock_fd) {
-  size_t line_len;
-
-  // Send auth when needed
-  if (!auth_.empty()) {
-    evbuffer *evbuf = evbuffer_new();
-    const auto auth_command = Redis::MultiBulkString({"AUTH", auth_});
-    auto s = Util::SockSend(sock_fd, auth_command);
-    if (!s.IsOK()) return Status(Status::NotOK, "send auth command err:"+s.Msg());
-    while (true) {
-      if (evbuffer_read(evbuf, sock_fd, -1) <= 0) {
-        evbuffer_free(evbuf);
-        return Status(Status::NotOK, std::string("read auth response err: ")+strerror(errno));
-      }
-      char *line = evbuffer_readln(evbuf, &line_len, EVBUFFER_EOL_CRLF_STRICT);
-      if (!line) continue;
-      if (strncmp(line, "+OK", 3) != 0) {
-        free(line);
-        evbuffer_free(evbuf);
-        return Status(Status::NotOK, "auth got invalid response");
-      }
-      free(line);
-      break;
-    }
-    evbuffer_free(evbuf);
-  }
-  return Status::OK();
-}
-
-
-Status ReplicationThread::fetchFile(int sock_fd, std::string path,
-                                    uint32_t crc) {
-  size_t line_len, file_size;
-
-  const auto fetch_command = Redis::MultiBulkString({"_fetch_file", path});
-  auto s = Util::SockSend(sock_fd, fetch_command);
-  if (!s.IsOK()) return Status(Status::NotOK, "send fetch file command err: "+s.Msg());
-
-  evbuffer *evbuf = evbuffer_new();
-  // Read file size line
-  while (true) {
-    if (evbuffer_read(evbuf, sock_fd, -1) <= 0) {
-      evbuffer_free(evbuf);
-      return Status(Status::NotOK, std::string("read size line err: ")+strerror(errno));
-    }
-    char *line = evbuffer_readln(evbuf, &line_len, EVBUFFER_EOL_CRLF_STRICT);
-    if (!line) continue;
-    if (*line == '-') {
-      free(line);
-      evbuffer_free(evbuf);
-      return Status(Status::NotOK, std::string("_fetch_file got err: ")+line);
-    }
-    file_size = line_len > 0 ? std::strtoull(line, nullptr, 10) : 0;
-    free(line);
-    break;
-  }
-
-  // Write to tmp file
-  auto tmp_file = Engine::Storage::BackupManager::NewTmpFile(storage_, path);
-  if (!tmp_file) {
-    evbuffer_free(evbuf);
-    return Status(Status::NotOK, "unable to create tmp file");
-  }
-
-  size_t seen_bytes = 0;
-  uint32_t tmp_crc = 0;
-  char data[1024];
-  while (seen_bytes < file_size) {
-    if (evbuffer_get_length(evbuf) > 0) {
-      auto data_len = evbuffer_remove(evbuf, data, 1024);
-      if (data_len == 0) continue;
-      if (data_len < 0) {
-        evbuffer_free(evbuf);
-        return Status(Status::NotOK, "read sst file data error");
-      }
-      tmp_file->Append(rocksdb::Slice(data, data_len));
-      tmp_crc = rocksdb::crc32c::Extend(tmp_crc, data, data_len);
-      seen_bytes += data_len;
-    } else {
-      if (evbuffer_read(evbuf, sock_fd, -1) <= 0) {
-        evbuffer_free(evbuf);
-        return Status(Status::NotOK, std::string("read sst file data, err: ")+strerror(errno));
-      }
-    }
-  }
-  if (crc != tmp_crc) {
-    evbuffer_free(evbuf);
-    return Status(Status::NotOK, "CRC mismatch");
-  }
-  evbuffer_free(evbuf);
-  // File is OK, rename to formal name
-  return Engine::Storage::BackupManager::SwapTmpFile(storage_, path);
-}
-
-// Check if stop_flag_ is set, when do, tear down replication
-void ReplicationThread::EventTimerCB(int, int16_t, void *ctx) {
-  // DLOG(INFO) << "[replication] timer";
-  auto self = static_cast<ReplicationThread *>(ctx);
-  if (self->stop_flag_) {
-    LOG(INFO) << "[replication] Stop ev loop";
-    event_base_loopbreak(self->base_);
-    self->psync_steps_.Stop();
-    self->fullsync_steps_.Stop();
-  }
-}
-
-rocksdb::Status ReplicationThread::ParseWriteBatch(const std::string &batch_string) {
-  rocksdb::WriteBatch write_batch(batch_string);
-  WriteBatchHandler write_batch_handler;
-  rocksdb::Status status;
-
-  status = write_batch.Iterate(&write_batch_handler);
-  if (!status.ok()) return status;
-  if (write_batch_handler.IsPublish()) {
-    srv_->PublishMessage(write_batch_handler.GetPublishChannel().ToString(),
-                         write_batch_handler.GetPublishValue().ToString());
-  }
-
-  return rocksdb::Status::OK();
-}
-
-bool ReplicationThread::isRestoringError(const char *err) {
-  return std::string(err) == "-ERR restoring the db from backup";
-}
-
-rocksdb::Status WriteBatchHandler::PutCF(uint32_t column_family_id, const rocksdb::Slice &key,
-                                         const rocksdb::Slice &value) {
-  if (column_family_id != kColumnFamilyIDPubSub) {
-    return rocksdb::Status::OK();
-  }
-
-  publish_message_ = std::make_pair(key.ToString(), value.ToString());
-  is_publish_ = true;
-  return rocksdb::Status::OK();
-}
diff --git a/src/replication.h b/src/replication.h
deleted file mode 100644
index 52953e6..0000000
--- a/src/replication.h
+++ /dev/null
@@ -1,188 +0,0 @@
-#pragma once
-
-#include <event2/bufferevent.h>
-#include <thread>
-#include <vector>
-#include <utility>
-#include <memory>
-#include <tuple>
-#include <string>
-#include <deque>
-
-#include "status.h"
-#include "storage.h"
-#include "redis_connection.h"
-
-class Server;
-
-enum ReplState {
-  kReplConnecting = 1,
-  kReplSendAuth,
-  kReplCheckDBName,
-  kReplReplConf,
-  kReplSendPSync,
-  kReplFetchMeta,
-  kReplFetchSST,
-  kReplConnected,
-  kReplError,
-};
-
-
-class FeedSlaveThread {
- public:
-  explicit FeedSlaveThread(Server *srv, Redis::Connection *conn, rocksdb::SequenceNumber next_repl_seq)
-      : srv_(srv), conn_(conn), next_repl_seq_(next_repl_seq) {}
-  ~FeedSlaveThread();
-
-  Status Start();
-  void Stop();
-  void Join();
-  bool IsStopped() { return stop_; }
-  Redis::Connection *GetConn() { return conn_; }
-  rocksdb::SequenceNumber GetCurrentReplSeq() { return next_repl_seq_ == 0 ? 0 : next_repl_seq_-1; }
-
- private:
-  uint64_t interval = 0;
-  bool stop_ = false;
-  Server *srv_ = nullptr;
-  Redis::Connection *conn_ = nullptr;
-  rocksdb::SequenceNumber next_repl_seq_ = 0;
-  std::thread t_;
-  std::unique_ptr<rocksdb::TransactionLogIterator> iter_ = nullptr;
-
-  void loop();
-  void checkLivenessIfNeed();
-};
-
-class ReplicationThread {
- public:
-  explicit ReplicationThread(std::string host, uint32_t port,
-                             Server *srv, std::string auth = "");
-  Status Start(std::function<void()> &&pre_fullsync_cb,
-               std::function<void()> &&post_fullsync_cb);
-  void Stop();
-  ReplState State() { return repl_state_; }
-  time_t LastIOTime() { return last_io_time_; }
-
- protected:
-  event_base *base_ = nullptr;
-
-  // The state machine to manage the asynchronous steps used in replication
-  class CallbacksStateMachine {
-   public:
-    enum class State {
-      NEXT,
-      AGAIN,
-      QUIT,
-      RESTART,
-    };
-    enum EventType {
-      READ,
-      WRITE,
-    };
-    using CallbackType = std::tuple<EventType, std::string, std::function<State(bufferevent *, void *)>>;
-    using CallbackList = std::deque<CallbackType>;
-    CallbacksStateMachine(ReplicationThread *repl, CallbackList &&handlers);
-
-    void Start();
-    void Stop();
-    static void EvCallback(bufferevent *bev, void *ctx);
-    static void ConnEventCB(bufferevent *bev, int16_t events,
-                            void *state_machine_ptr);
-    static void SetReadCB(bufferevent *bev, bufferevent_data_cb cb,
-                          void *state_machine_ptr);
-    static void SetWriteCB(bufferevent *bev, bufferevent_data_cb cb,
-                           void *state_machine_ptr);
-
-   private:
-    bufferevent *bev_ = nullptr;
-    ReplicationThread *repl_;
-    CallbackList handlers_;
-    CallbackList::size_type handler_idx_ = 0;
-
-    EventType getHandlerEventType(CallbackList::size_type idx) {
-      return std::get<0>(handlers_[idx]);
-    }
-    std::string getHandlerName(CallbackList::size_type idx) {
-      return std::get<1>(handlers_[idx]);
-    }
-    std::function<State(bufferevent *, void *)> getHandlerFunc(CallbackList::size_type idx) {
-      return std::get<2>(handlers_[idx]);
-    }
-  };
-
- private:
-  std::thread t_;
-  bool stop_flag_ = false;
-  std::string host_;
-  uint32_t port_;
-  std::string auth_;
-  Server *srv_ = nullptr;
-  Engine::Storage *storage_ = nullptr;
-  ReplState repl_state_;
-  time_t last_io_time_ = 0;
-
-  std::function<void()> pre_fullsync_cb_;
-  std::function<void()> post_fullsync_cb_;
-
-  // Internal states managed by FullSync procedure
-  enum FullSyncState {
-    kFetchMetaID,
-    kFetchMetaSize,
-    kFetchMetaContent,
-  } fullsync_state_ = kFetchMetaID;
-  rocksdb::BackupID fullsync_meta_id_ = 0;
-  size_t fullsync_filesize_ = 0;
-
-  // Internal states managed by IncrementBatchLoop procedure
-  enum IncrementBatchLoopState {
-    Incr_batch_size,
-    Incr_batch_data,
-  } incr_state_ = Incr_batch_size;
-
-  size_t incr_bulk_len_ = 0;
-
-  using CBState = CallbacksStateMachine::State;
-  CallbacksStateMachine psync_steps_;
-  CallbacksStateMachine fullsync_steps_;
-
-  void run();
-
-  static CBState authWriteCB(bufferevent *bev, void *ctx);
-  static CBState authReadCB(bufferevent *bev, void *ctx);
-  static CBState checkDBNameWriteCB(bufferevent *bev, void *ctx);
-  static CBState checkDBNameReadCB(bufferevent *bev, void *ctx);
-  static CBState replConfWriteCB(bufferevent *bev, void *ctx);
-  static CBState replConfReadCB(bufferevent *bev, void *ctx);
-  static CBState tryPSyncWriteCB(bufferevent *bev, void *ctx);
-  static CBState tryPSyncReadCB(bufferevent *bev, void *ctx);
-  static CBState incrementBatchLoopCB(bufferevent *bev, void *ctx);
-  static CBState fullSyncWriteCB(bufferevent *bev, void *ctx);
-  static CBState fullSyncReadCB(bufferevent *bev, void *ctx);
-
-  // Synchronized-Blocking ops
-  Status sendAuth(int sock_fd);
-  Status fetchFile(int sock_fd, std::string path, uint32_t crc);
-  Status parallelFetchFile(const std::vector<std::pair<std::string, uint32_t>> &files);
-  static bool isRestoringError(const char *err);
-
-  static void EventTimerCB(int, int16_t, void *ctx);
-
-  rocksdb::Status ParseWriteBatch(const std::string &batch_string);
-};
-
-/*
- * An extractor to extract update from raw writebatch
- */
-class WriteBatchHandler : public rocksdb::WriteBatch::Handler {
- public:
-  rocksdb::Status PutCF(uint32_t column_family_id, const rocksdb::Slice &key,
-                        const rocksdb::Slice &value) override;
-
-  rocksdb::Slice GetPublishChannel() { return publish_message_.first; }
-  rocksdb::Slice GetPublishValue() { return publish_message_.second; }
-  bool IsPublish() { return is_publish_; }
- private:
-  std::pair<std::string, std::string> publish_message_;
-  bool is_publish_ = false;
-};
diff --git a/src/rocksdb_crc32c.h b/src/rocksdb_crc32c.h
deleted file mode 100644
index b3f6e6f..0000000
--- a/src/rocksdb_crc32c.h
+++ /dev/null
@@ -1,51 +0,0 @@
-//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
-//  This source code is licensed under both the GPLv2 (found in the
-//  COPYING file in the root directory) and Apache 2.0 License
-//  (found in the LICENSE.Apache file in the root directory).
-//
-// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file. See the AUTHORS file for names of contributors.
-
-// NOTE: this file is copy from rocksdb's source: `util/crc32c.h`
-
-#pragma once
-#include <stddef.h>
-#include <stdint.h>
-#include <string>
-
-namespace rocksdb {
-namespace crc32c {
-
-extern std::string IsFastCrc32Supported();
-
-// Return the crc32c of concat(A, data[0,n-1]) where init_crc is the
-// crc32c of some string A.  Extend() is often used to maintain the
-// crc32c of a stream of data.
-extern uint32_t Extend(uint32_t init_crc, const char* data, size_t n);
-
-// Return the crc32c of data[0,n-1]
-inline uint32_t Value(const char* data, size_t n) {
-  return Extend(0, data, n);
-}
-
-static const uint32_t kMaskDelta = 0xa282ead8ul;
-
-// Return a masked representation of crc.
-//
-// Motivation: it is problematic to compute the CRC of a string that
-// contains embedded CRCs.  Therefore we recommend that CRCs stored
-// somewhere (e.g., in files) should be masked before being stored.
-inline uint32_t Mask(uint32_t crc) {
-  // Rotate right by 15 bits and add a constant.
-  return ((crc >> 15) | (crc << 17)) + kMaskDelta;
-}
-
-// Return the crc whose masked representation is masked_crc.
-inline uint32_t Unmask(uint32_t masked_crc) {
-  uint32_t rot = masked_crc - kMaskDelta;
-  return ((rot >> 17) | (rot << 15));
-}
-
-}  // namespace crc32c
-}  // namespace rocksdb
diff --git a/src/server.cc b/src/server.cc
deleted file mode 100644
index 83d95d1..0000000
--- a/src/server.cc
+++ /dev/null
@@ -1,925 +0,0 @@
-#include "server.h"
-
-#include <fcntl.h>
-#include <sys/utsname.h>
-#include <sys/resource.h>
-#include <glog/logging.h>
-#include <utility>
-#include <memory>
-
-#include "util.h"
-#include "worker.h"
-#include "version.h"
-#include "redis_db.h"
-#include "redis_request.h"
-#include "redis_connection.h"
-
-size_t PerfLog::Len() {
-  mu_.lock();
-  size_t len = entries_.size();
-  mu_.unlock();
-  return len;
-}
-
-void PerfLog::Reset() {
-  mu_.lock();
-  entries_.clear();
-  mu_.unlock();
-}
-
-void PerfLog::PushEntry(PerfEntry entry) {
-  mu_.lock();
-  entry.id = id_++;
-  while (entries_.size() >= static_cast<size_t>(max_entries_)) {
-    entries_.pop_back();
-  }
-  entries_.push_front(std::move(entry));
-  mu_.unlock();
-}
-
-std::string PerfLog::ToString(int count) {
-  int n;
-  std::string output;
-
-  mu_.lock();
-  if (count > 0) {
-    n = entries_.size() > static_cast<size_t>(count) ? count : static_cast<int>(entries_.size());
-  } else {
-    n = static_cast<int>(entries_.size());
-  }
-  output.append(Redis::MultiLen(n));
-  for (const auto &entry : entries_) {
-    output.append(Redis::MultiLen(5));
-    output.append(Redis::Integer(entry.id));
-    output.append(Redis::BulkString(entry.cmd_name));
-    output.append(Redis::Integer(entry.duration));
-    output.append(Redis::BulkString(entry.perf_context));
-    output.append(Redis::BulkString(entry.iostats_context));
-    if (--n == 0) break;
-  }
-  mu_.unlock();
-  return output;
-}
-
-Server::Server(Engine::Storage *storage, Config *config) :
-  storage_(storage), config_(config) {
-  // init commands stats here to prevent concurrent insert, and cause core
-  std::vector<std::string> commands;
-  Redis::GetCommandList(&commands);
-  for (const auto &cmd : commands) {
-    stats_.commands_stats[cmd].calls = 0;
-    stats_.commands_stats[cmd].latency = 0;
-  }
-
-  for (int i = 0; i < config->workers; i++) {
-    auto worker = new Worker(this, config);
-    worker_threads_.emplace_back(new WorkerThread(worker));
-  }
-  uint64_t max_replication_bytes =
-      config_->max_replication_mb > 0 ? config_->max_replication_mb * 1024 * 1024 / config_->repl_workers : 0;
-  for (int i = 0; i < config->repl_workers; i++) {
-    auto repl_worker = new Worker(this, config, true);
-    repl_worker->SetReplicationRateLimit(max_replication_bytes);
-    worker_threads_.emplace_back(new WorkerThread(repl_worker));
-  }
-  perf_log_.SetMaxEntries(config->profiling_sample_record_max_len);
-  task_runner_ = new TaskRunner(2, 1024);
-  time(&start_time_);
-}
-
-Server::~Server() {
-  for (const auto &worker_thread : worker_threads_) {
-    delete worker_thread;
-  }
-  for (const auto &iter : conn_ctxs_) {
-    delete iter.first;
-  }
-  delete task_runner_;
-}
-
-Status Server::Start() {
-  if (!config_->master_host.empty()) {
-    Status s = AddMaster(config_->master_host, static_cast<uint32_t>(config_->master_port));
-    if (!s.IsOK()) return s;
-  }
-  for (const auto worker : worker_threads_) {
-    worker->Start();
-  }
-  task_runner_->Start();
-  // setup server cron thread
-  cron_thread_ = std::thread([this]() {
-    Util::ThreadSetName("server-cron");
-    this->cron();
-  });
-  return Status::OK();
-}
-
-void Server::Stop() {
-  stop_ = true;
-  if (replication_thread_) replication_thread_->Stop();
-  for (const auto worker : worker_threads_) {
-    worker->Stop();
-  }
-  slave_threads_mu_.lock();
-  for (const auto slave_thread : slave_threads_) slave_thread->Stop();
-  slave_threads_mu_.unlock();
-  cleanupExitedSlaves();
-  task_runner_->Stop();
-}
-
-void Server::Join() {
-  for (const auto worker : worker_threads_) {
-    worker->Join();
-  }
-  task_runner_->Join();
-  if (cron_thread_.joinable()) cron_thread_.join();
-}
-
-Status Server::AddMaster(std::string host, uint32_t port) {
-  slaveof_mu_.lock();
-  if (!master_host_.empty() && master_host_ == host && master_port_ == port) {
-    slaveof_mu_.unlock();
-    return Status::OK();
-  }
-
-  if (!master_host_.empty()) {
-    if (replication_thread_) replication_thread_->Stop();
-    replication_thread_ = nullptr;
-  }
-  // we use port + 1 as repl port, so incr the slaveof port here
-  replication_thread_ = std::unique_ptr<ReplicationThread>(
-      new ReplicationThread(host, port+1, this, config_->masterauth));
-  auto s = replication_thread_->Start(
-      [this]() {
-        this->is_loading_ = true;
-        ReclaimOldDBPtr();
-      },
-      [this]() { this->is_loading_ = false; });
-  if (s.IsOK()) {
-    master_host_ = host;
-    master_port_ = port;
-    config_->master_host = host;
-    config_->master_port = port;
-  } else {
-    replication_thread_ = nullptr;
-  }
-  slaveof_mu_.unlock();
-  return s;
-}
-
-Status Server::RemoveMaster() {
-  slaveof_mu_.lock();
-  if (!master_host_.empty()) {
-    master_host_.clear();
-    master_port_ = 0;
-    config_->master_host.clear();
-    config_->master_port = 0;
-    if (replication_thread_) replication_thread_->Stop();
-    replication_thread_ = nullptr;
-  }
-  slaveof_mu_.unlock();
-  return Status::OK();
-}
-
-void Server::ResetMaster() {
-  slaveof_mu_.lock();
-  master_host_.clear();
-  master_port_ = 0;
-  slaveof_mu_.unlock();
-}
-
-Status Server::AddSlave(Redis::Connection *conn, rocksdb::SequenceNumber next_repl_seq) {
-  auto t = new FeedSlaveThread(this, conn, next_repl_seq);
-  auto s = t->Start();
-  if (!s.IsOK()) {
-    delete t;
-    return s;
-  }
-  int flags;
-  if ((flags = fcntl(conn->GetFD(), F_GETFL)) == -1) {
-    return Status(Status::NotOK, std::string("fcntl(F_GETFL): ") + strerror(errno));
-  }
-  flags &= ~O_NONBLOCK;
-  if (fcntl(conn->GetFD(), F_SETFL, flags) == -1) {
-    return Status(Status::NotOK, std::string("fcntl(F_SETFL,O_BLOCK): ") + strerror(errno));
-  }
-
-  slave_threads_mu_.lock();
-  slave_threads_.emplace_back(t);
-  slave_threads_mu_.unlock();
-  return Status::OK();
-}
-
-void Server::DisconnectSlaves() {
-  slave_threads_mu_.lock();
-  for (const auto &slave_thread : slave_threads_) {
-    if (!slave_thread->IsStopped()) slave_thread->Stop();
-  }
-  while (!slave_threads_.empty()) {
-    auto slave_thread = slave_threads_.front();
-    slave_threads_.pop_front();
-    slave_thread->Join();
-    delete slave_thread;
-  }
-  slave_threads_mu_.unlock();
-}
-
-void Server::cleanupExitedSlaves() {
-  std::list<FeedSlaveThread *> exited_slave_threads;
-  slave_threads_mu_.lock();
-  for (const auto &slave_thread : slave_threads_) {
-    if (slave_thread->IsStopped())
-      exited_slave_threads.emplace_back(slave_thread);
-  }
-  while (!exited_slave_threads.empty()) {
-    auto t = exited_slave_threads.front();
-    exited_slave_threads.pop_front();
-    slave_threads_.remove(t);
-    t->Join();
-    delete t;
-  }
-  slave_threads_mu_.unlock();
-}
-
-void Server::FeedMonitorConns(Redis::Connection *conn, const std::vector<std::string> &tokens) {
-  if (monitor_clients_ <= 0) return;
-  for (const auto &worker_thread : worker_threads_) {
-    auto worker = worker_thread->GetWorker();
-    worker->FeedMonitorConns(conn, tokens);
-  }
-}
-
-int Server::PublishMessage(const std::string &channel, const std::string &msg) {
-  int cnt = 0;
-  std::string reply;
-  reply.append(Redis::MultiLen(3));
-  reply.append(Redis::BulkString("message"));
-  reply.append(Redis::BulkString(channel));
-  reply.append(Redis::BulkString(msg));
-
-  std::lock_guard<std::mutex> guard(pubsub_channels_mu_);
-  auto iter = pubsub_channels_.find(channel);
-  if (iter != pubsub_channels_.end()) {
-    for (const auto &conn_ctx : iter->second) {
-      auto s = conn_ctx->owner->Reply(conn_ctx->fd, reply);
-      if (s.IsOK()) {
-        cnt++;
-      }
-    }
-  }
-  for (const auto &iter : pubsub_patterns_) {
-    if (Util::StringMatch(iter.first, channel, 0)) {
-      for (const auto &conn_ctx : iter.second) {
-        auto s = conn_ctx->owner->Reply(conn_ctx->fd, reply);
-        if (s.IsOK()) {
-          cnt++;
-        }
-      }
-    }
-  }
-  return cnt;
-}
-
-void Server::SubscribeChannel(const std::string &channel, Redis::Connection *conn) {
-  std::lock_guard<std::mutex> guard(pubsub_channels_mu_);
-  auto conn_ctx = new ConnContext(conn->Owner(), conn->GetFD());
-  conn_ctxs_[conn_ctx] = true;
-  auto iter = pubsub_channels_.find(channel);
-  if (iter == pubsub_channels_.end()) {
-    std::list<ConnContext *> conn_ctxs;
-    conn_ctxs.emplace_back(conn_ctx);
-    pubsub_channels_.insert(std::pair<std::string, std::list<ConnContext *>>(channel, conn_ctxs));
-  } else {
-    iter->second.emplace_back(conn_ctx);
-  }
-}
-
-void Server::UnSubscribeChannel(const std::string &channel, Redis::Connection *conn) {
-  std::lock_guard<std::mutex> guard(pubsub_channels_mu_);
-  auto iter = pubsub_channels_.find(channel);
-  if (iter == pubsub_channels_.end()) {
-    return;
-  }
-  for (const auto &conn_ctx : iter->second) {
-    if (conn->GetFD() == conn_ctx->fd && conn->Owner() == conn_ctx->owner) {
-      delConnContext(conn_ctx);
-      iter->second.remove(conn_ctx);
-      if (iter->second.empty()) {
-        pubsub_channels_.erase(iter);
-      }
-      break;
-    }
-  }
-}
-
-void Server::GetChannelsByPattern(const std::string &pattern, std::vector<std::string> *channels) {
-  std::lock_guard<std::mutex> guard(pubsub_channels_mu_);
-  for (const auto &iter : pubsub_channels_) {
-    if (pattern.empty() || Util::StringMatch(pattern, iter.first, 0)) {
-      channels->emplace_back(iter.first);
-    }
-  }
-}
-
-void Server::ListChannelSubscribeNum(std::vector<std::string> channels,
-                                     std::vector<ChannelSubscribeNum> *channel_subscribe_nums) {
-  std::lock_guard<std::mutex> guard(pubsub_channels_mu_);
-  for (const auto &chan : channels) {
-    auto iter = pubsub_channels_.find(chan);
-    if (iter != pubsub_channels_.end()) {
-      channel_subscribe_nums->emplace_back(ChannelSubscribeNum{iter->first, iter->second.size()});
-    } else {
-      channel_subscribe_nums->emplace_back(ChannelSubscribeNum{chan, 0});
-    }
-  }
-}
-
-void Server::PSubscribeChannel(const std::string &pattern, Redis::Connection *conn) {
-  std::lock_guard<std::mutex> guard(pubsub_channels_mu_);
-  auto conn_ctx = new ConnContext(conn->Owner(), conn->GetFD());
-  conn_ctxs_[conn_ctx] = true;
-  auto iter = pubsub_patterns_.find(pattern);
-  if (iter == pubsub_patterns_.end()) {
-    std::list<ConnContext *> conn_ctxs;
-    conn_ctxs.emplace_back(conn_ctx);
-    pubsub_patterns_.insert(std::pair<std::string, std::list<ConnContext *>>(pattern, conn_ctxs));
-  } else {
-    iter->second.emplace_back(conn_ctx);
-  }
-}
-
-void Server::PUnSubscribeChannel(const std::string &pattern, Redis::Connection *conn) {
-  std::lock_guard<std::mutex> guard(pubsub_channels_mu_);
-  auto iter = pubsub_patterns_.find(pattern);
-  if (iter == pubsub_patterns_.end()) {
-    return;
-  }
-  for (const auto &conn_ctx : iter->second) {
-    if (conn->GetFD() == conn_ctx->fd && conn->Owner() == conn_ctx->owner) {
-      delConnContext(conn_ctx);
-      iter->second.remove(conn_ctx);
-      if (iter->second.empty()) {
-        pubsub_patterns_.erase(iter);
-      }
-      break;
-    }
-  }
-}
-
-void Server::AddBlockingKey(const std::string &key, Redis::Connection *conn) {
-  std::lock_guard<std::mutex> guard(blocking_keys_mu_);
-  auto iter = blocking_keys_.find(key);
-  auto conn_ctx = new ConnContext(conn->Owner(), conn->GetFD());
-  conn_ctxs_[conn_ctx] = true;
-  if (iter == blocking_keys_.end()) {
-    std::list<ConnContext *> conn_ctxs;
-    conn_ctxs.emplace_back(conn_ctx);
-    blocking_keys_.insert(std::pair<std::string, std::list<ConnContext *>>(key, conn_ctxs));
-  } else {
-    iter->second.emplace_back(conn_ctx);
-  }
-}
-
-void Server::UnBlockingKey(const std::string &key, Redis::Connection *conn) {
-  std::lock_guard<std::mutex> guard(blocking_keys_mu_);
-  auto iter = blocking_keys_.find(key);
-  if (iter == blocking_keys_.end()) {
-    return;
-  }
-  for (const auto &conn_ctx : iter->second) {
-    if (conn->GetFD() == conn_ctx->fd && conn->Owner() == conn_ctx->owner) {
-      delConnContext(conn_ctx);
-      iter->second.remove(conn_ctx);
-      if (iter->second.empty()) {
-        blocking_keys_.erase(iter);
-      }
-      break;
-    }
-  }
-}
-
-Status Server::WakeupBlockingConns(const std::string &key, size_t n_conns) {
-  std::lock_guard<std::mutex> guard(blocking_keys_mu_);
-  auto iter = blocking_keys_.find(key);
-  if (iter == blocking_keys_.end() || iter->second.empty()) {
-    return Status(Status::NotOK);
-  }
-  while (n_conns-- && !iter->second.empty()) {
-    auto conn_ctx = iter->second.front();
-    conn_ctx->owner->EnableWriteEvent(conn_ctx->fd);
-    delConnContext(conn_ctx);
-    iter->second.pop_front();
-  }
-  return Status::OK();
-}
-
-void Server::delConnContext(ConnContext *c) {
-  auto conn_ctx_iter = conn_ctxs_.find(c);
-  if (conn_ctx_iter != conn_ctxs_.end()) {
-    delete conn_ctx_iter->first;
-    conn_ctxs_.erase(conn_ctx_iter);
-  }
-}
-
-int Server::IncrClientNum() {
-  total_clients_.fetch_add(1, std::memory_order::memory_order_relaxed);
-  return connected_clients_.fetch_add(1, std::memory_order_relaxed);
-}
-
-int Server::DecrClientNum() {
-  return connected_clients_.fetch_sub(1, std::memory_order_relaxed);
-}
-
-int Server::IncrMonitorClientNum() {
-  return monitor_clients_.fetch_add(1, std::memory_order_relaxed);
-}
-
-int Server::DecrMonitorClientNum() {
-  return monitor_clients_.fetch_sub(1, std::memory_order_relaxed);
-}
-
-int Server::IncrExecutingCommandNum() {
-  return excuting_command_num_.fetch_add(1, std::memory_order_relaxed);
-}
-
-int Server::DecrExecutingCommandNum() {
-  return excuting_command_num_.fetch_sub(1, std::memory_order_relaxed);
-}
-
-std::atomic<uint64_t> *Server::GetClientID() {
-  return &client_id_;
-}
-
-void Server::cron() {
-  uint64_t counter = 0;
-  while (!stop_) {
-    // check every 20s (use 20s instead of 60s so that cron will execute in critical condition)
-    if (counter != 0 && counter % 200 == 0) {
-      auto t = std::time(nullptr);
-      auto now = std::localtime(&t);
-      if (config_->compact_cron.IsEnabled() && config_->compact_cron.IsTimeMatch(now)) {
-        Status s = AsyncCompactDB();
-        LOG(INFO) << "[server] Schedule to compact the db, result: " << s.Msg();
-      }
-      if (config_->bgsave_cron.IsEnabled() && config_->bgsave_cron.IsTimeMatch(now)) {
-        Status s = AsyncBgsaveDB();
-        LOG(INFO) << "[server] Schedule to bgsave the db, result: " << s.Msg();
-      }
-    }
-    // check every minutes
-    if (counter != 0 && counter % 600 == 0) {
-      storage_->PurgeOldBackups(config_->max_backup_to_keep, config_->max_backup_keep_hours);
-    }
-    cleanupExitedSlaves();
-    counter++;
-    std::this_thread::sleep_for(std::chrono::milliseconds(100));
-  }
-}
-
-void Server::GetRocksDBInfo(std::string *info) {
-  std::ostringstream string_stream;
-  rocksdb::DB *db = storage_->GetDB();
-
-  uint64_t memtable_sizes, cur_memtable_sizes, num_snapshots, num_running_flushes;
-  uint64_t num_immutable_tables, memtable_flush_pending, compaction_pending;
-  uint64_t num_running_compaction, num_live_versions, num_superversion, num_backgroud_errors;
-
-  db->GetAggregatedIntProperty("rocksdb.num-snapshots", &num_snapshots);
-  db->GetAggregatedIntProperty("rocksdb.size-all-mem-tables", &memtable_sizes);
-  db->GetAggregatedIntProperty("rocksdb.cur-size-all-mem-tables", &cur_memtable_sizes);
-  db->GetAggregatedIntProperty("rocksdb.num-running-flushes", &num_running_flushes);
-  db->GetAggregatedIntProperty("rocksdb.num-immutable-mem-table", &num_immutable_tables);
-  db->GetAggregatedIntProperty("rocksdb.mem-table-flush-pending", &memtable_flush_pending);
-  db->GetAggregatedIntProperty("rocksdb.num-running-compactions", &num_running_compaction);
-  db->GetAggregatedIntProperty("rocksdb.current-super-version-number", &num_superversion);
-  db->GetAggregatedIntProperty("rocksdb.background-errors", &num_backgroud_errors);
-  db->GetAggregatedIntProperty("rocksdb.compaction-pending", &compaction_pending);
-  db->GetAggregatedIntProperty("rocksdb.num-live-versions", &num_live_versions);
-
-  string_stream << "# RocksDB\r\n";
-  for (const auto &cf_handle : storage_->GetCFHandles()) {
-    uint64_t estimate_keys, block_cache_usage, block_cache_pinned_usage, index_and_filter_cache_usage;
-    db->GetIntProperty(cf_handle, "rocksdb.estimate-num-keys", &estimate_keys);
-    string_stream << "estimate_keys[" << cf_handle->GetName() << "]:" << estimate_keys << "\r\n";
-    db->GetIntProperty(cf_handle, "rocksdb.block-cache-usage", &block_cache_usage);
-    string_stream << "block_cache_usage[" << cf_handle->GetName() << "]:" << block_cache_usage << "\r\n";
-    db->GetIntProperty(cf_handle, "rocksdb.block-cache-pinned-usage", &block_cache_pinned_usage);
-    string_stream << "block_cache_pinned_usage[" << cf_handle->GetName() << "]:" << block_cache_pinned_usage << "\r\n";
-    db->GetIntProperty(cf_handle, "rocksdb.estimate-table-readers-mem", &index_and_filter_cache_usage);
-    string_stream << "index_and_filter_cache_usage:[" << cf_handle->GetName() << "]:" << index_and_filter_cache_usage
-                  << "\r\n";
-  }
-  string_stream << "all_mem_tables:" << memtable_sizes << "\r\n";
-  string_stream << "cur_mem_tables:" << cur_memtable_sizes << "\r\n";
-  string_stream << "snapshots:" << num_snapshots << "\r\n";
-  string_stream << "num_immutable_tables:" << num_immutable_tables << "\r\n";
-  string_stream << "num_running_flushes:" << num_running_flushes << "\r\n";
-  string_stream << "memtable_flush_pending:" << memtable_flush_pending << "\r\n";
-  string_stream << "compaction_pending:" << compaction_pending << "\r\n";
-  string_stream << "num_running_compactions:" << num_running_compaction << "\r\n";
-  string_stream << "num_live_versions:" << num_live_versions << "\r\n";
-  string_stream << "num_superversion:" << num_superversion << "\r\n";
-  string_stream << "num_background_errors:" << num_backgroud_errors << "\r\n";
-  string_stream << "flush_count:" << storage_->GetFlushCount()<< "\r\n";
-  string_stream << "compaction_count:" << storage_->GetCompactionCount()<< "\r\n";
-  string_stream << "is_bgsaving:" << (db_bgsave_ ? "yes" : "no") << "\r\n";
-  string_stream << "is_compacting:" << (db_compacting_ ? "yes" : "no") << "\r\n";
-  *info = string_stream.str();
-}
-
-void Server::GetServerInfo(std::string *info) {
-  time_t now;
-  std::ostringstream string_stream;
-  static int call_uname = 1;
-  static utsname name;
-  if (call_uname) {
-    /* Uname can be slow and is always the same output. Cache it. */
-    uname(&name);
-    call_uname = 0;
-  }
-  time(&now);
-  string_stream << "# Server\r\n";
-  string_stream << "version:" << VERSION << "\r\n";
-  string_stream << "git_sha1:" << GIT_COMMIT << "\r\n";
-  string_stream << "os:" << name.sysname << " " << name.release << " " << name.machine << "\r\n";
-#ifdef __GNUC__
-  string_stream << "gcc_version:" << __GNUC__ << "." << __GNUC_MINOR__ << "." << __GNUC_PATCHLEVEL__ << "\r\n";
-#else
-  string_stream << "gcc_version:0,0,0\r\n";
-#endif
-  string_stream << "arch_bits:" << sizeof(void *) * 8 << "\r\n";
-  string_stream << "process_id:" << getpid() << "\r\n";
-  string_stream << "tcp_port:" << config_->port << "\r\n";
-  string_stream << "uptime_in_seconds:" << now-start_time_ << "\r\n";
-  string_stream << "uptime_in_days:" << (now-start_time_)/86400 << "\r\n";
-  *info = string_stream.str();
-}
-
-void Server::GetClientsInfo(std::string *info) {
-  std::ostringstream string_stream;
-  string_stream << "# Clients\r\n";
-  string_stream << "connected_clients:" << connected_clients_ << "\r\n";
-  string_stream << "monitor_clients:" << monitor_clients_ << "\r\n";
-  *info = string_stream.str();
-}
-
-void Server::GetMemoryInfo(std::string *info) {
-  std::ostringstream string_stream;
-  char buf[16];
-  int64_t rss = Stats::GetMemoryRSS();
-  Util::BytesToHuman(buf, 16, static_cast<uint64_t>(rss));
-  string_stream << "# Memory\r\n";
-  string_stream << "used_memory_rss:" << rss <<"\r\n";
-  string_stream << "used_memory_human:" << buf <<"\r\n";
-  *info = string_stream.str();
-}
-
-void Server::GetReplicationInfo(std::string *info) {
-  time_t now;
-  std::ostringstream string_stream;
-  string_stream << "# Replication\r\n";
-  string_stream << "role:" << (IsSlave() ? "slave":"master") << "\r\n";
-  if (IsSlave()) {
-    time(&now);
-    string_stream << "master_host:" << master_host_ << "\r\n";
-    string_stream << "master_port:" << master_port_ << "\r\n";
-    ReplState state = replication_thread_->State();
-    string_stream << "master_link_status:" << (state == kReplConnected? "up":"down") << "\r\n";
-    string_stream << "master_sync_unrecoverable_error:" << (state == kReplError ? "yes" : "no") << "\r\n";
-    string_stream << "master_sync_in_progress:" << (state == kReplFetchMeta || state == kReplFetchSST) << "\r\n";
-    string_stream << "master_last_io_seconds_ago:" << now-replication_thread_->LastIOTime() << "\r\n";
-    string_stream << "slave_repl_offset:" << storage_->LatestSeq() << "\r\n";
-    string_stream << "slave_priority:" << config_->slave_priority << "\r\n";
-  }
-
-  int idx = 0;
-  rocksdb::SequenceNumber latest_seq = storage_->LatestSeq();
-  slave_threads_mu_.lock();
-  string_stream << "connected_slaves:" << slave_threads_.size() << "\r\n";
-  for (const auto &slave : slave_threads_) {
-    if (slave->IsStopped()) continue;
-    string_stream << "slave" << std::to_string(idx) << ":";
-    string_stream << "ip=" << slave->GetConn()->GetIP()
-                  << ",port=" << slave->GetConn()->GetListeningPort()
-                  << ",offset=" << slave->GetCurrentReplSeq()
-                  << ",lag=" << latest_seq - slave->GetCurrentReplSeq() << "\r\n";
-    ++idx;
-  }
-  slave_threads_mu_.unlock();
-
-  *info = string_stream.str();
-}
-
-void Server::GetStatsInfo(std::string *info) {
-  std::ostringstream string_stream;
-  string_stream << "# Stats\r\n";
-  string_stream << "total_connections_received:" << total_clients_ <<"\r\n";
-  string_stream << "total_commands_processed:" << stats_.total_calls <<"\r\n";
-  string_stream << "total_net_input_bytes:" << stats_.in_bytes <<"\r\n";
-  string_stream << "total_net_output_bytes:" << stats_.out_bytes <<"\r\n";
-  string_stream << "sync_full:" << stats_.fullsync_counter <<"\r\n";
-  string_stream << "sync_partial_ok:" << stats_.psync_ok_counter <<"\r\n";
-  string_stream << "sync_partial_err:" << stats_.psync_err_counter <<"\r\n";
-  string_stream << "pubsub_channels:" << pubsub_channels_.size() <<"\r\n";
-  *info = string_stream.str();
-}
-
-void Server::GetCommandsStatsInfo(std::string *info) {
-  std::ostringstream string_stream;
-  string_stream << "# Commandstats\r\n";
-
-  for (const auto &cmd_stat : stats_.commands_stats) {
-    auto calls = cmd_stat.second.calls.load();
-    auto latency = cmd_stat.second.latency.load();
-    if (calls == 0) continue;
-    string_stream << "cmdstat_" << cmd_stat.first << ":calls=" << calls
-                  << ",usec=" << latency << ",usec_per_call="
-                  << ((calls == 0) ? 0 : static_cast<float>(latency/calls))
-                  << "\r\n";
-  }
-  *info = string_stream.str();
-}
-
-void Server::GetInfo(const std::string &ns, const std::string &section, std::string *info) {
-  info->clear();
-  std::ostringstream string_stream;
-  bool all = section == "all";
-
-  if (all || section == "server") {
-    std::string server_info;
-    GetServerInfo(&server_info);
-    string_stream << server_info;
-  }
-  if (all || section == "clients") {
-    std::string clients_info;
-    GetClientsInfo(&clients_info);
-    string_stream << clients_info;
-  }
-  if (all || section == "memory") {
-    std::string memory_info;
-    GetMemoryInfo(&memory_info);
-    string_stream << memory_info;
-  }
-  if (all || section == "persistence") {
-    string_stream << "# Persistence\r\n";
-    string_stream << "loading:" << is_loading_ <<"\r\n";
-  }
-  if (all || section == "stats") {
-    std::string stats_info;
-    GetStatsInfo(&stats_info);
-    string_stream << stats_info;
-  }
-  if (all || section == "replication") {
-    std::string replication_info;
-    GetReplicationInfo(&replication_info);
-    string_stream << replication_info;
-  }
-  if (all || section == "cpu") {
-    struct rusage self_ru;
-    getrusage(RUSAGE_SELF, &self_ru);
-    string_stream << "# CPU\r\n";
-    string_stream << "used_cpu_sys:"
-                  << static_cast<float>(self_ru.ru_stime.tv_sec)+static_cast<float>(self_ru.ru_stime.tv_usec/1000000)
-                  << "\r\n";
-    string_stream << "used_cpu_user:"
-                  << static_cast<float>(self_ru.ru_utime.tv_sec)+static_cast<float>(self_ru.ru_utime.tv_usec/1000000)
-                  << "\r\n";
-  }
-  if (all || section == "commandstats") {
-    std::string commands_stats_info;
-    GetCommandsStatsInfo(&commands_stats_info);
-    string_stream << commands_stats_info;
-  }
-  if (all || section == "keyspace") {
-    KeyNumStats stats;
-    GetLastestKeyNumStats(ns, &stats);
-    time_t last_scan_time = GetLastScanTime(ns);
-    string_stream << "# Keyspace\r\n";
-    string_stream << "# Last scan db time: " << std::asctime(std::localtime(&last_scan_time));
-    string_stream << "db0:keys=" << stats.n_key << ",expires=" << stats.n_expires
-                  << ",avg_ttl=" << stats.avg_ttl << ",expired=" << stats.n_expired << "\r\n";
-    string_stream << "sequence:" << storage_->GetDB()->GetLatestSequenceNumber() << "\r\n";
-    string_stream << "used_db_size:" << storage_->GetTotalSize() << "\r\n";
-    string_stream << "max_db_size:" << config_->max_db_size * GiB << "\r\n";
-    double used_percent = config_->max_db_size ?
-                          storage_->GetTotalSize() * 100 / (config_->max_db_size * GiB) : 0;
-    string_stream << "used_percent: " << used_percent << "%\r\n";
-  }
-  if (all || section == "rocksdb") {
-    std::string rocksdb_info;
-    GetRocksDBInfo(&rocksdb_info);
-    string_stream << rocksdb_info;
-  }
-  *info = string_stream.str();
-}
-
-std::string Server::GetRocksDBStatsJson() {
-  char buf[256];
-  std::string output;
-
-  output.reserve(8*1024);
-  output.append("{");
-  auto stats = storage_->GetDB()->GetDBOptions().statistics;
-  for (const auto &iter : rocksdb::TickersNameMap) {
-    snprintf(buf, sizeof(buf), "\"%s\":%" PRIu64 ",",
-             iter.second.c_str(), stats->getTickerCount(iter.first));
-    output.append(buf);
-  }
-  for (const auto &iter : rocksdb::HistogramsNameMap) {
-    rocksdb::HistogramData hist_data;
-    stats->histogramData(iter.first, &hist_data);
-    /* P50 P95 P99 P100 COUNT SUM */
-    snprintf(buf, sizeof(buf), "\"%s\":[%f,%f,%f,%f,%" PRIu64 ",%" PRIu64 "],",
-             iter.second.c_str(),
-             hist_data.median, hist_data.percentile95, hist_data.percentile99,
-             hist_data.max, hist_data.count, hist_data.sum);
-    output.append(buf);
-  }
-  output.pop_back();
-  output.append("}");
-  output.shrink_to_fit();
-  return output;
-}
-
-/*
- * Reclaim the old db ptr before restore the db from backup,
- * as restore db would delete the db and column families.
- */
-void Server::ReclaimOldDBPtr() {
-  LOG(INFO) << "Disconnecting slaves...";
-  DisconnectSlaves();
-  LOG(INFO) << "Restarting the task runner...";
-  task_runner_->Restart();
-  LOG(INFO) << "Waiting for excuting command...";
-  while (excuting_command_num_ != 0) {
-    usleep(200000);
-  }
-}
-
-Status Server::AsyncCompactDB() {
-  db_mu_.lock();
-  if (db_compacting_) {
-    db_mu_.unlock();
-    return Status(Status::NotOK, "compact in-progress");
-  }
-  db_compacting_ = true;
-  db_mu_.unlock();
-
-  Task task;
-  task.arg = this;
-  task.callback = [](void *arg) {
-    auto svr = static_cast<Server*>(arg);
-    svr->storage_->Compact(nullptr, nullptr);
-    svr->db_mu_.lock();
-    svr->db_compacting_ = false;
-    svr->db_mu_.unlock();
-  };
-  return task_runner_->Publish(task);
-}
-
-Status Server::AsyncBgsaveDB() {
-  db_mu_.lock();
-  if (db_bgsave_) {
-    db_mu_.unlock();
-    return Status(Status::NotOK, "bgsave in-progress");
-  }
-  db_bgsave_ = true;
-  db_mu_.unlock();
-
-  Task task;
-  task.arg = this;
-  task.callback = [](void *arg) {
-    auto svr = static_cast<Server*>(arg);
-    svr->storage_->CreateBackup();
-    svr->db_mu_.lock();
-    svr->db_bgsave_ = false;
-    svr->db_mu_.unlock();
-  };
-  return task_runner_->Publish(task);
-}
-
-Status Server::AsyncScanDBSize(const std::string &ns) {
-  db_mu_.lock();
-  auto iter = db_scan_infos_.find(ns);
-  if (iter == db_scan_infos_.end()) {
-    db_scan_infos_[ns] = DBScanInfo{};
-  }
-  if (db_scan_infos_[ns].is_scanning) {
-    db_mu_.unlock();
-    return Status(Status::NotOK, "scanning the db now");
-  }
-  db_scan_infos_[ns].is_scanning = true;
-  db_mu_.unlock();
-
-  Task task;
-  task.arg = this;
-  task.callback = [ns](void *arg) {
-    auto svr = static_cast<Server*>(arg);
-    Redis::Database db(svr->storage_, ns);
-    KeyNumStats stats;
-    db.GetKeyNumStats("", &stats);
-
-    svr->db_mu_.lock();
-    svr->db_scan_infos_[ns].key_num_stats = stats;
-    time(&svr->db_scan_infos_[ns].last_scan_time);
-    svr->db_scan_infos_[ns].is_scanning = false;
-    svr->db_mu_.unlock();
-  };
-  return task_runner_->Publish(task);
-}
-
-void Server::GetLastestKeyNumStats(const std::string &ns, KeyNumStats *stats) {
-  auto iter = db_scan_infos_.find(ns);
-  if (iter != db_scan_infos_.end()) {
-    *stats = iter->second.key_num_stats;
-  }
-}
-
-time_t Server::GetLastScanTime(const std::string &ns) {
-  auto iter = db_scan_infos_.find(ns);
-  if (iter != db_scan_infos_.end()) {
-    return iter->second.last_scan_time;
-  }
-  return 0;
-}
-
-void Server::SlowlogReset() {
-  slowlog_.mu.lock();
-  slowlog_.entry_list.clear();
-  slowlog_.mu.unlock();
-}
-
-uint Server::SlowlogLen() {
-  std::unique_lock<std::mutex> lock(slowlog_.mu);
-  return slowlog_.entry_list.size();
-}
-
-void Server::CreateSlowlogReply(std::string *output, uint32_t count) {
-  uint32_t sent = 0;
-  slowlog_.mu.lock();
-  for (auto iter = slowlog_.entry_list.begin(); iter != slowlog_.entry_list.end() && sent < count; ++iter) {
-    sent++;
-    output->append(Redis::MultiLen(4));
-    output->append(Redis::Integer(iter->id));
-    output->append(Redis::Integer(iter->time));
-    output->append(Redis::Integer(iter->duration));
-    output->append(Redis::MultiBulkString(iter->args));
-  }
-  output->insert(0, Redis::MultiLen(sent));
-  slowlog_.mu.unlock();
-}
-
-void Server::SlowlogPushEntryIfNeeded(const std::vector<std::string>* args, uint64_t duration) {
-  if (config_->slowlog_log_slower_than < 0) return;
-  if (static_cast<int64_t>(duration) < config_->slowlog_log_slower_than) return;
-  slowlog_.mu.lock();
-  slowlog_.entry_list.emplace_front(SlowlogEntry{*args, ++slowlog_.id, duration, time(nullptr)});
-
-  while (slowlog_.entry_list.size() > config_->slowlog_max_len) {
-    slowlog_.entry_list.pop_back();
-  }
-  slowlog_.mu.unlock();
-}
-
-std::string Server::GetClientsStr() {
-  std::string clients;
-  for (const auto &t : worker_threads_) {
-    clients.append(t->GetWorker()->GetClientsStr());
-  }
-  slave_threads_mu_.lock();
-  for (const auto &st : slave_threads_) {
-    clients.append(st->GetConn()->ToString());
-  }
-  slave_threads_mu_.unlock();
-  return clients;
-}
-
-void Server::KillClient(int64_t *killed, std::string addr, uint64_t id, bool skipme, Redis::Connection *conn) {
-  *killed = 0;
-  for (const auto &t : worker_threads_) {
-    int64_t killed_in_worker = 0;
-    t->GetWorker()->KillClient(conn, id, addr, skipme, &killed_in_worker);
-    *killed += killed_in_worker;
-  }
-  slave_threads_mu_.lock();
-  for (const auto &st : slave_threads_) {
-    if ((!addr.empty() && st->GetConn()->GetAddr() == addr)
-        || (id != 0 && st->GetConn()->GetID() == id)) {
-      st->Stop();
-      (*killed)++;
-    }
-  }
-  slave_threads_mu_.unlock();
-}
-
-void Server::SetReplicationRateLimit(uint64_t max_replication_mb) {
-  uint64_t max_rate_per_repl_worker = 0;
-  if (max_replication_mb > 0) {
-    max_rate_per_repl_worker = (max_replication_mb*MiB)/config_->repl_workers;
-  }
-  for (const auto &t : worker_threads_) {
-    if (t->GetWorker()->IsRepl()) {
-      t->GetWorker()->SetReplicationRateLimit(max_rate_per_repl_worker);
-    }
-  }
-}
diff --git a/src/server.h b/src/server.h
deleted file mode 100644
index 182e5aa..0000000
--- a/src/server.h
+++ /dev/null
@@ -1,193 +0,0 @@
-#pragma once
-
-#define __STDC_FORMAT_MACROS
-#include <inttypes.h>
-
-#include <map>
-#include <list>
-#include <string>
-#include <vector>
-#include <memory>
-
-#include "stats.h"
-#include "storage.h"
-#include "task_runner.h"
-#include "replication.h"
-#include "redis_metadata.h"
-#include "worker.h"
-
-struct DBScanInfo {
-  time_t last_scan_time = 0;
-  KeyNumStats key_num_stats;
-  bool is_scanning = false;
-};
-
-struct SlowlogEntry {
-  std::vector<std::string> args;
-  uint64_t id;
-  uint64_t duration;
-  time_t time;
-};
-
-struct SlowLog {
-  std::list<SlowlogEntry> entry_list;
-  uint64_t id = 0;
-  std::mutex mu;
-};
-
-struct PerfEntry {
- public:
-  std::string cmd_name;
-  std::string perf_context;
-  std::string iostats_context;
-  uint64_t duration;
-  uint64_t id;
-};
-
-struct PerfLog {
- public:
-  size_t Len();
-  void Reset();
-  void PushEntry(PerfEntry entry);
-  std::string ToString(int count);
-  void SetMaxEntries(int max_entries) { max_entries_ = max_entries; }
-
- private:
-  int max_entries_ = 128;
-  std::list<PerfEntry> entries_;
-  uint64_t id_ = 0;
-  std::mutex mu_;
-};
-
-struct ConnContext {
-  Worker *owner;
-  int fd;
-  ConnContext(Worker *w, int fd) : owner(w), fd(fd) {}
-};
-
-typedef struct {
-  std::string channel;
-  size_t subscribe_num;
-} ChannelSubscribeNum;
-
-
-class Server {
- public:
-  explicit Server(Engine::Storage *storage, Config *config);
-  ~Server();
-
-  Status Start();
-  void Stop();
-  void Join();
-  bool IsStopped() { return stop_; }
-  bool IsLoading() { return is_loading_; }
-  Config *GetConfig() { return config_; }
-
-  Status AddMaster(std::string host, uint32_t port);
-  Status RemoveMaster();
-  void ResetMaster();
-  Status AddSlave(Redis::Connection *conn, rocksdb::SequenceNumber next_repl_seq);
-  void DisconnectSlaves();
-  void cleanupExitedSlaves();
-  bool IsSlave() { return !master_host_.empty(); }
-  void FeedMonitorConns(Redis::Connection *conn, const std::vector<std::string> &tokens);
-
-  int PublishMessage(const std::string &channel, const std::string &msg);
-  void SubscribeChannel(const std::string &channel, Redis::Connection *conn);
-  void UnSubscribeChannel(const std::string &channel, Redis::Connection *conn);
-  void GetChannelsByPattern(const std::string &pattern, std::vector<std::string> *channels);
-  void ListChannelSubscribeNum(std::vector<std::string> channels,
-                               std::vector<ChannelSubscribeNum> *channel_subscribe_nums);
-  void PSubscribeChannel(const std::string &pattern, Redis::Connection *conn);
-  void PUnSubscribeChannel(const std::string &pattern, Redis::Connection *conn);
-  int GetPubSubPatternSize() { return pubsub_patterns_.size(); }
-
-  void AddBlockingKey(const std::string &key, Redis::Connection *conn);
-  void UnBlockingKey(const std::string &key, Redis::Connection *conn);
-  Status WakeupBlockingConns(const std::string &key, size_t n_conns);
-
-  std::string GetLastRandomKeyCursor() { return last_random_key_cursor_; }
-  void SetLastRandomKeyCursor(const std::string &cursor) { last_random_key_cursor_ = cursor; }
-
-  void GetStatsInfo(std::string *info);
-  void GetServerInfo(std::string *info);
-  void GetMemoryInfo(std::string *info);
-  void GetRocksDBInfo(std::string *info);
-  void GetClientsInfo(std::string *info);
-  void GetReplicationInfo(std::string *info);
-  void GetCommandsStatsInfo(std::string *info);
-  void GetInfo(const std::string &ns, const std::string &section, std::string *info);
-  std::string GetRocksDBStatsJson();
-
-  void ReclaimOldDBPtr();
-  Status AsyncCompactDB();
-  Status AsyncBgsaveDB();
-  Status AsyncScanDBSize(const std::string &ns);
-  void GetLastestKeyNumStats(const std::string &ns, KeyNumStats *stats);
-  time_t GetLastScanTime(const std::string &ns);
-
-  void SlowlogReset();
-  uint32_t SlowlogLen();
-  void CreateSlowlogReply(std::string *output, uint32_t count);
-  void SlowlogPushEntryIfNeeded(const std::vector<std::string>* args, uint64_t duration);
-
-  int DecrClientNum();
-  int IncrClientNum();
-  int IncrMonitorClientNum();
-  int DecrMonitorClientNum();
-  int IncrExecutingCommandNum();
-  int DecrExecutingCommandNum();
-  std::string GetClientsStr();
-  std::atomic<uint64_t> *GetClientID();
-  void KillClient(int64_t *killed, std::string addr, uint64_t id, bool skipme, Redis::Connection *conn);
-  void SetReplicationRateLimit(uint64_t max_replication_mb);
-
-  PerfLog *GetPerfLog() { return &perf_log_; }
-
-  Stats stats_;
-  Engine::Storage *storage_;
-
- private:
-  void cron();
-  void delConnContext(ConnContext *c);
-
-  bool stop_ = false;
-  bool is_loading_ = false;
-  time_t start_time_ = 0;
-  std::mutex slaveof_mu_;
-  std::string master_host_;
-  uint32_t master_port_ = 0;
-  Config *config_ = nullptr;
-  std::string last_random_key_cursor_;
-
-  // client counters
-  std::atomic<uint64_t> client_id_{1};
-  std::atomic<int> connected_clients_{0};
-  std::atomic<int> monitor_clients_{0};
-  std::atomic<uint64_t> total_clients_{0};
-  std::atomic<int> excuting_command_num_{0};
-
-  // slave
-  std::mutex slave_threads_mu_;
-  std::list<FeedSlaveThread *> slave_threads_;
-
-  std::mutex db_mu_;
-  bool db_compacting_ = false;
-  bool db_bgsave_ = false;
-  std::map<std::string, DBScanInfo> db_scan_infos_;
-
-  SlowLog slowlog_;
-  PerfLog perf_log_;
-  std::map<ConnContext *, bool> conn_ctxs_;
-  std::map<std::string, std::list<ConnContext *>> pubsub_channels_;
-  std::map<std::string, std::list<ConnContext *>> pubsub_patterns_;
-  std::mutex pubsub_channels_mu_;
-  std::map<std::string, std::list<ConnContext *>> blocking_keys_;
-  std::mutex blocking_keys_mu_;
-
-  // threads
-  std::thread cron_thread_;
-  TaskRunner *task_runner_ = nullptr;
-  std::vector<WorkerThread *> worker_threads_;
-  std::unique_ptr<ReplicationThread> replication_thread_;
-};
diff --git a/src/stats.cc b/src/stats.cc
deleted file mode 100644
index 3651fab..0000000
--- a/src/stats.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-#include "stats.h"
-
-#if defined(__APPLE__)
-#include <mach/task.h>
-#include <mach/mach_init.h>
-
-int64_t Stats::GetMemoryRSS() {
-  task_t task = MACH_PORT_NULL;
-  struct task_basic_info t_info;
-  mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT;
-  if (task_for_pid(current_task(), getpid(), &task) != KERN_SUCCESS) return 0;
-  task_info(task, TASK_BASIC_INFO, (task_info_t)&t_info, &t_info_count);
-  return static_cast<int64_t>(t_info.resident_size);
-}
-#else
-#include <fcntl.h>
-
-#include <string>
-#include <cstdio>
-#include <cstring>
-
-int64_t Stats::GetMemoryRSS() {
-  int fd, count;
-  char buf[4096], filename[256];
-  snprintf(filename, sizeof(filename), "/proc/%d/stat", getpid());
-  if ((fd = open(filename, O_RDONLY)) == -1) return 0;
-  if (read(fd, buf, sizeof(buf)) <= 0) {
-    close(fd);
-    return 0;
-  }
-  close(fd);
-
-  char *start = buf;
-  count = 23; /* RSS is the 24th field in /proc/<pid>/stat */
-  while (start && count--) {
-    start = strchr(start, ' ');
-    if (start) start++;
-  }
-  if (!start) return 0;
-  char *stop = strchr(start, ' ');
-  if (!stop) return 0;
-  *stop = '\0';
-  int rss = std::stoi(start);
-  return static_cast<int64_t>(rss * sysconf(_SC_PAGESIZE));
-}
-#endif
-
-void Stats::IncrCalls(const std::string &command_name) {
-  total_calls.fetch_add(1, std::memory_order_relaxed);
-  commands_stats[command_name].calls.fetch_add(1, std::memory_order_relaxed);
-}
-
-void Stats::IncrLatency(uint64_t latency, const std::string &command_name) {
-  commands_stats[command_name].latency.fetch_add(latency, std::memory_order_relaxed);
-}
diff --git a/src/stats.h b/src/stats.h
deleted file mode 100644
index 890e795..0000000
--- a/src/stats.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#pragma once
-
-#include <unistd.h>
-#include <map>
-#include <atomic>
-#include <string>
-
-struct command_stat {
-  std::atomic<uint64_t> calls;
-  std::atomic<uint64_t> latency;
-};
-
-class Stats {
- public:
-  std::atomic<uint64_t> total_calls = {0};
-  std::atomic<uint64_t> in_bytes = {0};
-  std::atomic<uint64_t> out_bytes = {0};
-
-  std::atomic<uint64_t> fullsync_counter = {0};
-  std::atomic<uint64_t> psync_err_counter = {0};
-  std::atomic<uint64_t> psync_ok_counter = {0};
-  std::map<std::string, command_stat> commands_stats;
-
- public:
-  void IncrCalls(const std::string &command_name);
-  void IncrLatency(uint64_t latency, const std::string &command_name);
-  void IncrInbondBytes(uint64_t bytes) { in_bytes.fetch_add(bytes, std::memory_order_relaxed); }
-  void IncrOutbondBytes(uint64_t bytes) { out_bytes.fetch_add(bytes, std::memory_order_relaxed); }
-  void IncrFullSyncCounter() { fullsync_counter.fetch_add(1, std::memory_order_relaxed); }
-  void IncrPSyncErrCounter() { psync_err_counter.fetch_add(1, std::memory_order_relaxed); }
-  void IncrPSyncOKCounter() { psync_ok_counter.fetch_add(1, std::memory_order_relaxed); }
-  static int64_t GetMemoryRSS();
-};
diff --git a/src/status.h b/src/status.h
deleted file mode 100644
index 248e1dc..0000000
--- a/src/status.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#pragma once
-
-#include <string>
-#include <utility>
-
-class Status {
- public:
-  enum Code {
-    cOK,
-    NotOK,
-
-    // DB
-    DBOpenErr,
-    DBBackupErr,
-    DBGetWALErr,
-    DBBackupFileErr,
-
-    // Replication
-    DBMismatched,
-
-    // Redis
-    RedisUnknownCmd,
-    RedisInvalidCmd,
-    RedisParseErr,
-    RedisExecErr,
-    RedisReplicationConflict,
-
-    // Network
-    NetSendErr,
-  };
-
-  Status() : Status(cOK, "ok") {}
-  explicit Status(Code code, std::string msg = "") : code_(code), msg_(std::move(msg)) {}
-  bool IsOK() { return code_ == cOK; }
-  std::string Msg() { return msg_; }
-  static Status OK() { return Status(cOK, "ok"); }
-
- private:
-  Code code_;
-  std::string msg_;
-};
diff --git a/src/storage.cc b/src/storage.cc
deleted file mode 100644
index 6203f70..0000000
--- a/src/storage.cc
+++ /dev/null
@@ -1,582 +0,0 @@
-#include "storage.h"
-
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <event2/buffer.h>
-#include <glog/logging.h>
-#include <rocksdb/filter_policy.h>
-#include <rocksdb/table.h>
-#include <rocksdb/sst_file_manager.h>
-#include <rocksdb/utilities/table_properties_collectors.h>
-#include <rocksdb/rate_limiter.h>
-#include <iostream>
-#include <memory>
-
-#include "config.h"
-#include "redis_metadata.h"
-#include "event_listener.h"
-#include "compact_filter.h"
-
-namespace Engine {
-
-const char *kPubSubColumnFamilyName = "pubsub";
-const char *kZSetScoreColumnFamilyName = "zset_score";
-const char *kMetadataColumnFamilyName = "metadata";
-const uint64_t kIORateLimitMaxMb = 1024000;
-using rocksdb::Slice;
-
-Storage::~Storage() {
-  DestroyBackup();
-  CloseDB();
-}
-
-void Storage::CloseDB() {
-  db_->SyncWAL();
-  // prevent to destroy the cloumn family while the compact filter was using
-  db_mu_.lock();
-  db_closing_ = true;
-  while (db_refs_ != 0) {
-    db_mu_.unlock();
-    usleep(10000);
-    db_mu_.lock();
-  }
-  db_mu_.unlock();
-  for (auto handle : cf_handles_) db_->DestroyColumnFamilyHandle(handle);
-  delete db_;
-}
-
-void Storage::InitOptions(rocksdb::Options *options) {
-  options->create_if_missing = true;
-  options->create_missing_column_families = true;
-  // options.IncreaseParallelism(2);
-  // NOTE: the overhead of statistics is 5%-10%, so it should be configurable in prod env
-  // See: https://github.com/facebook/rocksdb/wiki/Statistics
-  options->statistics = rocksdb::CreateDBStatistics();
-  options->stats_dump_period_sec = 0;
-  options->OptimizeLevelStyleCompaction();
-  options->max_open_files = config_->rocksdb_options.max_open_files;
-  options->max_subcompactions = config_->rocksdb_options.max_sub_compactions;
-  options->max_background_flushes = config_->rocksdb_options.max_background_flushes;
-  options->max_background_compactions = config_->rocksdb_options.max_background_compactions;
-  options->max_write_buffer_number = config_->rocksdb_options.max_write_buffer_number;
-  options->write_buffer_size =  config_->rocksdb_options.write_buffer_size;
-  options->compression = config_->rocksdb_options.compression;
-  options->enable_pipelined_write = config_->rocksdb_options.enable_pipelined_write;
-  options->target_file_size_base = config_->rocksdb_options.target_file_size_base;
-  options->max_manifest_file_size = 64 * MiB;
-  options->max_log_file_size = 256 * MiB;
-  options->keep_log_file_num = 12;
-  options->WAL_ttl_seconds = config_->rocksdb_options.WAL_ttl_seconds;
-  options->WAL_size_limit_MB = config_->rocksdb_options.WAL_size_limit_MB;
-  options->listeners.emplace_back(new EventListener(this));
-  options->dump_malloc_stats = true;
-  sst_file_manager_ = std::shared_ptr<rocksdb::SstFileManager>(rocksdb::NewSstFileManager(rocksdb::Env::Default()));
-  options->sst_file_manager = sst_file_manager_;
-  options->table_properties_collector_factories.emplace_back(
-      rocksdb::NewCompactOnDeletionCollectorFactory(128000, 64000));
-  uint64_t max_io_mb = kIORateLimitMaxMb;
-  if (config_->max_io_mb > 0) {
-    max_io_mb = config_->max_io_mb;
-  }
-  rate_limiter_ = std::shared_ptr<rocksdb::RateLimiter>(rocksdb::NewGenericRateLimiter(max_io_mb * MiB));
-  options->rate_limiter = rate_limiter_;
-  options->delayed_write_rate = config_->rocksdb_options.delayed_write_rate;
-  options->compaction_readahead_size = config_->rocksdb_options.compaction_readahead_size;
-  options->level0_slowdown_writes_trigger = config_->rocksdb_options.level0_slowdown_writes_trigger;
-  options->level0_stop_writes_trigger = config_->rocksdb_options.level0_stop_writes_trigger;
-}
-
-Status Storage::CreateColumnFamiles(const rocksdb::Options &options) {
-  rocksdb::DB *tmp_db;
-  rocksdb::ColumnFamilyOptions cf_options(options);
-  rocksdb::Status s = rocksdb::DB::Open(options, config_->db_dir, &tmp_db);
-  if (s.ok()) {
-    std::vector<std::string> cf_names = {kMetadataColumnFamilyName,
-                                         kZSetScoreColumnFamilyName,
-                                         kPubSubColumnFamilyName};
-    std::vector<rocksdb::ColumnFamilyHandle *> cf_handles;
-    s = tmp_db->CreateColumnFamilies(cf_options, cf_names, &cf_handles);
-    if (!s.ok()) {
-      delete tmp_db;
-      return Status(Status::DBOpenErr, s.ToString());
-    }
-    for (auto handle : cf_handles) tmp_db->DestroyColumnFamilyHandle(handle);
-    tmp_db->Close();
-    delete tmp_db;
-  }
-  // Open db would be failed if the column families have already exists,
-  // so we return ok here.
-  return Status::OK();
-}
-
-Status Storage::Open(bool read_only) {
-  db_mu_.lock();
-  db_closing_ = false;
-  db_refs_ = 0;
-  db_mu_.unlock();
-
-  rocksdb::Options options;
-  InitOptions(&options);
-  CreateColumnFamiles(options);
-  rocksdb::BlockBasedTableOptions metadata_table_opts;
-  metadata_table_opts.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true));
-  metadata_table_opts.block_cache =
-      rocksdb::NewLRUCache(config_->rocksdb_options.metadata_block_cache_size, -1, false, 0.75);
-  metadata_table_opts.cache_index_and_filter_blocks = true;
-  metadata_table_opts.cache_index_and_filter_blocks_with_high_priority = true;
-  rocksdb::ColumnFamilyOptions metadata_opts(options);
-  metadata_opts.table_factory.reset(rocksdb::NewBlockBasedTableFactory(metadata_table_opts));
-  metadata_opts.compaction_filter_factory = std::make_shared<MetadataFilterFactory>();
-
-  rocksdb::BlockBasedTableOptions subkey_table_opts;
-  subkey_table_opts.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true));
-  subkey_table_opts.block_cache =
-      rocksdb::NewLRUCache(config_->rocksdb_options.subkey_block_cache_size, -1, false, 0.75);
-  subkey_table_opts.cache_index_and_filter_blocks = true;
-  subkey_table_opts.cache_index_and_filter_blocks_with_high_priority = true;
-  rocksdb::ColumnFamilyOptions subkey_opts(options);
-  subkey_opts.table_factory.reset(rocksdb::NewBlockBasedTableFactory(subkey_table_opts));
-  subkey_opts.compaction_filter_factory = std::make_shared<SubKeyFilterFactory>(this);
-
-  rocksdb::BlockBasedTableOptions pubsub_table_opts;
-  pubsub_table_opts.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true));
-  rocksdb::ColumnFamilyOptions pubsub_opts(options);
-  pubsub_opts.table_factory.reset(rocksdb::NewBlockBasedTableFactory(pubsub_table_opts));
-  pubsub_opts.compaction_filter_factory = std::make_shared<PubSubFilterFactory>();
-
-  std::vector<rocksdb::ColumnFamilyDescriptor> column_families;
-  // Caution: don't change the order of column family, or the handle will be mismatched
-  column_families.emplace_back(rocksdb::ColumnFamilyDescriptor(rocksdb::kDefaultColumnFamilyName, subkey_opts));
-  column_families.emplace_back(rocksdb::ColumnFamilyDescriptor(kMetadataColumnFamilyName, metadata_opts));
-  column_families.emplace_back(rocksdb::ColumnFamilyDescriptor(kZSetScoreColumnFamilyName, subkey_opts));
-  column_families.emplace_back(rocksdb::ColumnFamilyDescriptor(kPubSubColumnFamilyName, pubsub_opts));
-
-  auto start = std::chrono::high_resolution_clock::now();
-  rocksdb::Status s;
-  if (read_only) {
-    s = rocksdb::DB::OpenForReadOnly(options, config_->db_dir, column_families, &cf_handles_, &db_);
-  } else {
-    s = rocksdb::DB::Open(options, config_->db_dir, column_families, &cf_handles_, &db_);
-  }
-  auto end = std::chrono::high_resolution_clock::now();
-  int64_t duration = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
-  if (!s.ok()) {
-    LOG(INFO) << "[storage] Failed to load the data from disk: " << duration << " ms";
-    return Status(Status::DBOpenErr, s.ToString());
-  }
-  LOG(INFO) << "[storage] Success to load the data from disk: " << duration << " ms";
-  if (!read_only) {
-    // open backup engine
-    rocksdb::BackupableDBOptions bk_option(config_->backup_dir);
-    s = rocksdb::BackupEngine::Open(db_->GetEnv(), bk_option, &backup_);
-    if (!s.ok()) return Status(Status::DBBackupErr, s.ToString());
-  }
-  return Status::OK();
-}
-
-Status Storage::Open() {
-  return Open(false);
-}
-
-Status Storage::OpenForReadOnly() {
-  return Open(true);
-}
-
-Status Storage::CreateBackup() {
-  LOG(INFO) << "[storage] Start to create new backup";
-  auto tm = std::time(nullptr);
-  char time_str[25];
-  if (!std::strftime(time_str, sizeof(time_str), "%c", std::localtime(&tm))) {
-    return Status(Status::DBBackupErr, "Fail to format local time_str");
-  }
-  auto s = backup_->CreateNewBackupWithMetadata(db_, time_str);
-  if (!s.ok()) return Status(Status::DBBackupErr, s.ToString());
-  LOG(INFO) << "[storage] Success to create new backup";
-  return Status::OK();
-}
-
-Status Storage::DestroyBackup() {
-  backup_->StopBackup();
-  delete backup_;
-  return Status();
-}
-
-Status Storage::RestoreFromBackup() {
-  // TODO(@ruoshan): assert role to be slave
-  // We must reopen the backup engine every time, as the files is changed
-  rocksdb::BackupableDBOptions bk_option(config_->backup_dir);
-  auto s = rocksdb::BackupEngine::Open(db_->GetEnv(), bk_option, &backup_);
-  if (!s.ok()) return Status(Status::DBBackupErr, s.ToString());
-  CloseDB();
-
-  s = backup_->RestoreDBFromLatestBackup(config_->db_dir, config_->db_dir);
-  if (!s.ok()) {
-    LOG(ERROR) << "[storage] Failed to restore: " << s.ToString();
-    return Status(Status::DBBackupErr, s.ToString());
-  }
-  LOG(INFO) << "[storage] Restore from backup";
-
-  // Reopen DB
-  auto s2 = Open();
-  if (!s2.IsOK()) {
-    LOG(ERROR) << "[storage] Failed to reopen db: " << s2.Msg();
-    return Status(Status::DBOpenErr);
-  }
-  return Status::OK();
-}
-
-void Storage::PurgeOldBackups(uint32_t num_backups_to_keep, uint32_t backup_max_keep_hours) {
-  std::vector<rocksdb::BackupInfo> backup_infos;
-  backup_->GetBackupInfo(&backup_infos);
-  if (backup_infos.size() > num_backups_to_keep) {
-    uint32_t num_backups_to_purge = static_cast<uint32_t>(backup_infos.size()) - num_backups_to_keep;
-    LOG(INFO) << "[storage] Going to purge " << num_backups_to_purge << " old backups";
-    for (uint32_t i = 0; i < num_backups_to_purge; i++) {
-      LOG(INFO) << "[storage] The old backup(id: "
-                << backup_infos[i].backup_id << ") would be purged, "
-                << " created at: " << backup_infos[i].timestamp
-                << ", size: " << backup_infos[i].size
-                << ", num files: " << backup_infos[i].number_files;
-    }
-    auto s = backup_->PurgeOldBackups(num_backups_to_keep);
-    LOG(INFO) << "[storage] Purge old backups, result: " << s.ToString();
-  }
-
-  if (backup_max_keep_hours == 0) return;
-  backup_infos.clear();
-  backup_->GetBackupInfo(&backup_infos);
-  time_t now = time(nullptr);
-  for (uint32_t i = 0; i < backup_infos.size(); i++) {
-    if (backup_infos[i].timestamp + backup_max_keep_hours*3600 >= now) break;
-    LOG(INFO) << "[storage] The old backup(id:"
-              << backup_infos[i].backup_id << ") would be purged because expired"
-              << ", created at: " << backup_infos[i].timestamp
-              << ", size: " << backup_infos[i].size
-              << ", num files: " << backup_infos[i].number_files;
-    backup_->DeleteBackup(backup_infos[i].backup_id);
-  }
-}
-
-Status Storage::GetWALIter(
-    rocksdb::SequenceNumber seq,
-    std::unique_ptr<rocksdb::TransactionLogIterator> *iter) {
-  auto s = db_->GetUpdatesSince(seq, iter);
-  if (!s.ok()) return Status(Status::DBGetWALErr, s.ToString());
-  if (!(*iter)->Valid()) return Status(Status::DBGetWALErr, "iterator not valid");
-  return Status::OK();
-}
-
-rocksdb::SequenceNumber Storage::LatestSeq() {
-  return db_->GetLatestSequenceNumber();
-}
-
-rocksdb::Status Storage::Write(const rocksdb::WriteOptions &options, rocksdb::WriteBatch *updates) {
-  if (reach_db_size_limit_) {
-    return rocksdb::Status::SpaceLimit();
-  }
-  return db_->Write(options, updates);
-}
-
-Status Storage::WriteBatch(std::string &&raw_batch) {
-  if (reach_db_size_limit_) {
-    return Status(Status::NotOK, "reach space limit");
-  }
-  auto bat = rocksdb::WriteBatch(std::move(raw_batch));
-  auto s = db_->Write(rocksdb::WriteOptions(), &bat);
-  if (!s.ok()) {
-    return Status(Status::NotOK, s.ToString());
-  }
-  return Status::OK();
-}
-
-rocksdb::ColumnFamilyHandle *Storage::GetCFHandle(const std::string &name) {
-  if (name == kMetadataColumnFamilyName) {
-    return cf_handles_[1];
-  } else if (name == kZSetScoreColumnFamilyName) {
-    return cf_handles_[2];
-  } else if (name == kPubSubColumnFamilyName) {
-    return cf_handles_[3];
-  }
-  return cf_handles_[0];
-}
-
-rocksdb::Status Storage::Compact(const Slice *begin, const Slice *end) {
-  rocksdb::CompactRangeOptions compact_opts;
-  compact_opts.change_level = true;
-  for (auto cf_handle : cf_handles_) {
-    rocksdb::Status s =
-        db_->CompactRange(compact_opts, cf_handle, begin, end);
-    if (!s.ok()) return s;
-  }
-  return rocksdb::Status::OK();
-}
-
-uint64_t Storage::GetTotalSize() {
-  return sst_file_manager_->GetTotalSize();
-}
-
-Status Storage::CheckDBSizeLimit() {
-  bool reach_db_size_limit;
-  if (config_->max_db_size == 0) {
-    reach_db_size_limit = false;
-  } else {
-    reach_db_size_limit = GetTotalSize() >= config_->max_db_size * GiB;
-  }
-  if (reach_db_size_limit_ == reach_db_size_limit) {
-    return Status::OK();
-  }
-  reach_db_size_limit_ = reach_db_size_limit;
-  if (reach_db_size_limit_) {
-    LOG(WARNING) << "[storage] ENABLE db_size limit " << config_->max_db_size << " GB"
-                 << "set kvrocks to read-only mode";
-  } else {
-    LOG(WARNING) << "[storage] DISABLE db_size limit, set kvrocks to read-write mode ";
-  }
-  return Status::OK();
-}
-
-void Storage::SetIORateLimit(uint64_t max_io_mb) {
-  if (max_io_mb == 0) {
-    max_io_mb = kIORateLimitMaxMb;
-  }
-  rate_limiter_->SetBytesPerSecond(max_io_mb * MiB);
-}
-
-rocksdb::DB *Storage::GetDB() { return db_; }
-
-Status Storage::IncrDBRefs() {
-  db_mu_.lock();
-  if (db_closing_) {
-    db_mu_.unlock();
-    return Status(Status::NotOK, "db is closing");
-  }
-  db_refs_++;
-  db_mu_.unlock();
-  return Status::OK();
-}
-
-Status Storage::DecrDBRefs() {
-  db_mu_.lock();
-  if (db_refs_ == 0) {
-    db_mu_.unlock();
-    return Status(Status::NotOK, "db refs was zero");
-  }
-  db_refs_--;
-  db_mu_.unlock();
-  return Status::OK();
-}
-
-Status Storage::BackupManager::OpenLatestMeta(Storage *storage,
-                                              int *fd,
-                                              rocksdb::BackupID *meta_id,
-                                              uint64_t *file_size) {
-  Status status = storage->CreateBackup();
-  if (!status.IsOK())  return status;
-  std::vector<rocksdb::BackupInfo> backup_infos;
-  storage->backup_->GetBackupInfo(&backup_infos);
-  auto latest_backup = backup_infos.back();
-  rocksdb::Status r_status = storage->backup_->VerifyBackup(latest_backup.backup_id);
-  if (!r_status.ok()) {
-    return Status(Status::NotOK, r_status.ToString());
-  }
-  *meta_id = latest_backup.backup_id;
-  std::string meta_file =
-      storage->config_->backup_dir + "/meta/" + std::to_string(*meta_id);
-  auto s = storage->backup_env_->FileExists(meta_file);
-  storage->backup_env_->GetFileSize(meta_file, file_size);
-  // NOTE: here we use the system's open instead of using rocksdb::Env to open
-  // a sequential file, because we want to use sendfile syscall.
-  *fd = open(meta_file.c_str(), O_RDONLY);
-  if (*fd < 0) {
-    return Status(Status::NotOK, strerror(errno));
-  }
-  return Status::OK();
-}
-
-int Storage::BackupManager::OpenDataFile(Storage *storage, const std::string &rel_path,
-                                         uint64_t *file_size) {
-  std::string abs_path = storage->config_->backup_dir + "/" + rel_path;
-  auto s = storage->backup_env_->FileExists(abs_path);
-  if (!s.ok()) {
-    LOG(ERROR) << "[storage] Data file [" << abs_path << "] not found";
-    return -1;
-  }
-  storage->backup_env_->GetFileSize(abs_path, file_size);
-  auto rv = open(abs_path.c_str(), O_RDONLY);
-  if (rv < 0) {
-    LOG(ERROR) << "[storage] Failed to open file: " << strerror(errno);
-  }
-  return rv;
-}
-
-Storage::BackupManager::MetaInfo Storage::BackupManager::ParseMetaAndSave(
-    Storage *storage, rocksdb::BackupID meta_id, evbuffer *evbuf) {
-  char *line;
-  size_t len;
-  Storage::BackupManager::MetaInfo meta;
-  auto meta_file = "meta/" + std::to_string(meta_id);
-  DLOG(INFO) << "[meta] id: " << meta_id;
-
-  // Save the meta to tmp file
-  auto wf = NewTmpFile(storage, meta_file);
-  auto data = evbuffer_pullup(evbuf, -1);
-  wf->Append(rocksdb::Slice(reinterpret_cast<char *>(data),
-                            evbuffer_get_length(evbuf)));
-  wf->Close();
-
-  // timestamp;
-  line = evbuffer_readln(evbuf, &len, EVBUFFER_EOL_LF);
-  DLOG(INFO) << "[meta] timestamp: " << line;
-  meta.timestamp = std::strtoll(line, nullptr, 10);
-  free(line);
-  // sequence
-  line = evbuffer_readln(evbuf, &len, EVBUFFER_EOL_LF);
-  DLOG(INFO) << "[meta] seq:" << line;
-  meta.seq = std::strtoull(line, nullptr, 10);
-  free(line);
-  // optional metadata
-  line = evbuffer_readln(evbuf, &len, EVBUFFER_EOL_LF);
-  if (strncmp(line, "metadata", 8) == 0) {
-    DLOG(INFO) << "[meta] meta: " << line;
-    meta.meta_data = std::string(line, len);
-    free(line);
-    line = evbuffer_readln(evbuf, &len, EVBUFFER_EOL_LF);
-  }
-  DLOG(INFO) << "[meta] file count: " << line;
-  free(line);
-  // file list
-  while (true) {
-    line = evbuffer_readln(evbuf, &len, EVBUFFER_EOL_LF);
-    if (!line) {
-      break;
-    }
-    DLOG(INFO) << "[meta] file info: " << line;
-    auto cptr = line;
-    while (*(cptr++) != ' ') {}
-    auto filename = std::string(line, cptr - line - 1);
-    while (*(cptr++) != ' ') {}
-    auto crc32 = std::strtoul(cptr, nullptr, 10);
-    meta.files.emplace_back(filename, crc32);
-    free(line);
-  }
-  SwapTmpFile(storage, meta_file);
-  return meta;
-}
-
-Status MkdirRecursively(rocksdb::Env *env, const std::string &dir) {
-  if (env->CreateDirIfMissing(dir).ok()) return Status::OK();
-
-  std::string parent;
-  for (auto pos = dir.find('/', 1); pos != std::string::npos;
-       pos = dir.find('/', pos + 1)) {
-    parent = dir.substr(0, pos);
-    if (!env->CreateDirIfMissing(parent).ok()) {
-      LOG(ERROR) << "[storage] Failed to create directory recursively";
-      return Status(Status::NotOK);
-    }
-  }
-  if (env->CreateDirIfMissing(dir).ok()) return Status::OK();
-  return Status(Status::NotOK);
-}
-
-std::unique_ptr<rocksdb::WritableFile> Storage::BackupManager::NewTmpFile(
-    Storage *storage, const std::string &rel_path) {
-  std::string tmp_path = storage->config_->backup_dir + "/" + rel_path + ".tmp";
-  auto s = storage->backup_env_->FileExists(tmp_path);
-  if (s.ok()) {
-    LOG(ERROR) << "[storage] Data file exists, override";
-    storage->backup_env_->DeleteFile(tmp_path);
-  }
-  // Create directory if missing
-  auto abs_dir = tmp_path.substr(0, tmp_path.rfind('/'));
-  if (!MkdirRecursively(storage->backup_env_, abs_dir).IsOK()) {
-    return nullptr;
-  }
-  std::unique_ptr<rocksdb::WritableFile> wf;
-  s = storage->backup_env_->NewWritableFile(tmp_path, &wf, rocksdb::EnvOptions());
-  if (!s.ok()) {
-    LOG(ERROR) << "[storage] Failed to create data file: " << s.ToString();
-    return nullptr;
-  }
-  return wf;
-}
-
-Status Storage::BackupManager::SwapTmpFile(Storage *storage,
-                                           const std::string &rel_path) {
-  std::string tmp_path = storage->config_->backup_dir + "/" + rel_path + ".tmp";
-  std::string orig_path = storage->config_->backup_dir + "/" + rel_path;
-  if (!storage->backup_env_->RenameFile(tmp_path, orig_path).ok()) {
-    return Status(Status::NotOK, "unable to rename: "+tmp_path);
-  }
-  return Status::OK();
-}
-
-bool Storage::BackupManager::FileExists(Storage *storage, const std::string &rel_path) {
-  auto s = storage->backup_env_->FileExists(storage->config_->backup_dir + "/" + rel_path);
-  return s.ok();
-}
-
-bool isDir(const char* name) {
-  struct stat st{};
-  if (stat(name, &st) != 0) {
-    return false;
-  }
-  return (st.st_mode & S_IFDIR) != 0;
-}
-
-bool PathExists(const char* name) {
-  struct stat st{};
-  if (stat(name, &st) != 0) {
-    if (errno == ENOENT) {
-      return false;
-    }
-    // Other types of error are treated as the path exists (might be a bad idea)
-  }
-  return true;
-}
-
-Status RmdirRecursively(rocksdb::Env *env, const std::string &dir) {
-  if (!PathExists(dir.c_str())) {
-    return Status::OK();
-  }
-
-  std::vector<std::string> children;
-  env->GetChildren(dir, &children);
-  rocksdb::Status s;
-  for (const auto &c : children) {
-    if (c == "." || c == "..") continue;
-    auto abs_path = dir + "/" + c;
-    if (isDir(abs_path.c_str())) {
-      if (!RmdirRecursively(env, abs_path).IsOK()) {
-        return Status(Status::NotOK);
-      }
-    } else {
-      s = env->DeleteFile(abs_path);
-      if (!s.ok()) {
-        LOG(ERROR) << "[storage] Failed to delete file: " << s.ToString();
-        return Status(Status::NotOK);
-      }
-    }
-  }
-  s = env->DeleteDir(dir);
-  if (s.ok()) {
-    return Status::OK();
-  }
-  LOG(ERROR) << "[storage] Failed to delete dir: " << s.ToString();
-  return Status(Status::NotOK);
-}
-
-Status Storage::BackupManager::PurgeBackup(Storage *storage) {
-  return RmdirRecursively(storage->backup_env_, storage->config_->backup_dir);
-}
-
-void Storage::PurgeBackupIfNeed(uint32_t next_backup_id) {
-  std::vector<rocksdb::BackupInfo> backup_infos;
-  backup_->GetBackupInfo(&backup_infos);
-  size_t num_backup = backup_infos.size();
-  if (num_backup > 0 && backup_infos[num_backup-1].backup_id != next_backup_id-1)  {
-    RmdirRecursively(backup_env_, config_->backup_dir);
-    rocksdb::Env::Default()->CreateDirIfMissing(config_->backup_dir);
-  }
-}
-}  // namespace Engine
diff --git a/src/storage.h b/src/storage.h
deleted file mode 100644
index 14e6ca1..0000000
--- a/src/storage.h
+++ /dev/null
@@ -1,120 +0,0 @@
-#pragma once
-
-#include <inttypes.h>
-#include <rocksdb/db.h>
-#include <rocksdb/options.h>
-#include <rocksdb/utilities/backupable_db.h>
-#include <event2/bufferevent.h>
-#include <utility>
-#include <memory>
-#include <string>
-#include <vector>
-#include <atomic>
-
-#include "status.h"
-#include "lock_manager.h"
-#include "config.h"
-
-enum ColumnFamilyID{
-  kColumnFamilyIDDefault,
-  kColumnFamilyIDMetadata,
-  kColumnFamilyIDZSetScore,
-  kColumnFamilyIDPubSub,
-};
-
-namespace Engine {
-
-class Storage {
- public:
-  explicit Storage(Config *config)
-      :backup_env_(rocksdb::Env::Default()),
-       config_(config),
-       lock_mgr_(16) {}
-  ~Storage();
-
-  Status Open(bool read_only);
-  Status Open();
-  Status OpenForReadOnly();
-  void CloseDB();
-  void InitOptions(rocksdb::Options *options);
-  Status CreateColumnFamiles(const rocksdb::Options &options);
-  Status CreateBackup();
-  Status DestroyBackup();
-  Status RestoreFromBackup();
-  Status GetWALIter(rocksdb::SequenceNumber seq,
-                    std::unique_ptr<rocksdb::TransactionLogIterator> *iter);
-  Status WriteBatch(std::string &&raw_batch);
-  rocksdb::SequenceNumber LatestSeq();
-  rocksdb::Status Write(const rocksdb::WriteOptions& options, rocksdb::WriteBatch* updates);
-  bool WALHasNewData(rocksdb::SequenceNumber seq) { return seq <= LatestSeq(); }
-  void PurgeBackupIfNeed(uint32_t next_backup_id);
-
-  rocksdb::Status Compact(const rocksdb::Slice *begin, const rocksdb::Slice *end);
-  rocksdb::DB *GetDB();
-  bool IsClosing();
-  Status IncrDBRefs();
-  Status DecrDBRefs();
-  const std::string GetName() {return config_->db_name; }
-  rocksdb::ColumnFamilyHandle *GetCFHandle(const std::string &name);
-  std::vector<rocksdb::ColumnFamilyHandle *> GetCFHandles() { return cf_handles_; }
-  LockManager *GetLockManager() { return &lock_mgr_; }
-  void PurgeOldBackups(uint32_t num_backups_to_keep, uint32_t backup_max_keep_hours);
-  uint64_t GetTotalSize();
-  Status CheckDBSizeLimit();
-  void SetIORateLimit(uint64_t max_io_mb);
-
-  uint64_t GetFlushCount() { return flush_count_; }
-  void IncrFlushCount(uint64_t n) { flush_count_.fetch_add(n); }
-  uint64_t GetCompactionCount() { return compaction_count_; }
-  void IncrCompactionCount(uint64_t n) { compaction_count_.fetch_add(n); }
-
-  Storage(const Storage &) = delete;
-  Storage &operator=(const Storage &) = delete;
-
-  class BackupManager {
-   public:
-    // Master side
-    static Status OpenLatestMeta(Storage *storage,
-                                 int *fd,
-                                 rocksdb::BackupID *meta_id,
-                                 uint64_t *file_size);
-    static int OpenDataFile(Storage *storage, const std::string &rel_path,
-                            uint64_t *file_size);
-
-    // Slave side
-    struct MetaInfo {
-      int64_t timestamp;
-      rocksdb::SequenceNumber seq;
-      std::string meta_data;
-      // [[filename, checksum]...]
-      std::vector<std::pair<std::string, uint32_t>> files;
-    };
-    static MetaInfo ParseMetaAndSave(Storage *storage,
-                                     rocksdb::BackupID meta_id,
-                                     evbuffer *evbuf);
-    static std::unique_ptr<rocksdb::WritableFile> NewTmpFile(
-        Storage *storage, const std::string &rel_path);
-    static Status SwapTmpFile(Storage *storage, const std::string &rel_path);
-    static bool FileExists(Storage *storage, const std::string &rel_path);
-    static Status PurgeBackup(Storage *storage);
-  };
-
- private:
-  rocksdb::DB *db_ = nullptr;
-  rocksdb::BackupEngine *backup_ = nullptr;
-  rocksdb::Env *backup_env_;
-  std::shared_ptr<rocksdb::SstFileManager> sst_file_manager_;
-  std::shared_ptr<rocksdb::RateLimiter> rate_limiter_;
-  Config *config_ = nullptr;
-  std::vector<rocksdb::ColumnFamilyHandle *> cf_handles_;
-  LockManager lock_mgr_;
-  bool reach_db_size_limit_ = false;
-  std::atomic<uint64_t> flush_count_{0};
-  std::atomic<uint64_t> compaction_count_{0};
-
-  std::mutex db_mu_;
-  int db_refs_ = 0;
-  bool db_closing_ = true;
-};
-
-}  // namespace Engine
diff --git a/src/task_runner.cc b/src/task_runner.cc
deleted file mode 100644
index 62c4ba7..0000000
--- a/src/task_runner.cc
+++ /dev/null
@@ -1,71 +0,0 @@
-#include "task_runner.h"
-
-#include <thread>
-#include "util.h"
-
-Status TaskRunner::Publish(Task task) {
-  mu_.lock();
-  if (stop_) {
-    mu_.unlock();
-    return Status(Status::NotOK, "the runner was stopped");
-  }
-  if (task_queue_.size() >= max_queue_size_) {
-    mu_.unlock();
-    return Status(Status::NotOK, "the task queue was reached max length");
-  }
-  task_queue_.emplace_back(task);
-  cond_.notify_all();
-  mu_.unlock();
-  return Status::OK();
-}
-
-void TaskRunner::Start() {
-  for (int i = 0; i < n_thread_; i++) {
-    threads_.emplace_back(std::thread([this]() {
-      Util::ThreadSetName("task-runner");
-      this->run();
-    }));
-  }
-}
-
-void TaskRunner::Restart() {
-  Stop();
-  Join();
-  mu_.lock();
-  threads_.clear();
-  task_queue_.clear();
-  stop_ = false;
-  mu_.unlock();
-  Start();
-}
-
-void TaskRunner::Stop() {
-  mu_.lock();
-  stop_ = true;
-  cond_.notify_all();
-  mu_.unlock();
-}
-
-void TaskRunner::Join() {
-  for (size_t i = 0; i < threads_.size(); i++) {
-    if (threads_[i].joinable()) threads_[i].join();
-  }
-}
-
-void TaskRunner::run() {
-  Task task;
-  std::unique_lock<std::mutex> lock(mu_);
-  while (!stop_) {
-    cond_.wait(lock, [this]() -> bool { return stop_ || !task_queue_.empty();});
-    while (!stop_ && !task_queue_.empty()) {
-      task = task_queue_.front();
-      task_queue_.pop_front();
-      lock.unlock();
-      if (task.callback) task.callback(task.arg);
-      lock.lock();
-    }
-  }
-  task_queue_.clear();
-  lock.unlock();
-  // CAUTION: drop the rest of tasks, don't use task runner if the task can't be drop
-}
diff --git a/src/task_runner.h b/src/task_runner.h
deleted file mode 100644
index 396f4f5..0000000
--- a/src/task_runner.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#pragma once
-
-#include <cstdint>
-#include <vector>
-#include <list>
-#include <thread>
-#include <mutex>
-#include <condition_variable>
-#include <functional>
-
-#include "status.h"
-
-struct Task {
-  std::function<void(void*)> callback;
-  void *arg;
-};
-
-class TaskRunner {
- public:
-  explicit TaskRunner(int n_thread = 2, uint32_t max_queue_size = 10240)
-  :max_queue_size_(max_queue_size), n_thread_(n_thread) {}
-  ~TaskRunner() = default;
-  Status Publish(Task task);
-  size_t QueueSize() { return task_queue_.size(); }
-  void Start();
-  void Restart();
-  void Stop();
-  void Join();
- private:
-  void run();
-  bool stop_ = false;
-  uint32_t max_queue_size_;
-  std::list<Task> task_queue_;
-  std::mutex mu_;
-  std::condition_variable cond_;
-  int n_thread_;
-  std::vector<std::thread> threads_;
-};
diff --git a/src/util.cc b/src/util.cc
deleted file mode 100644
index 12a4677..0000000
--- a/src/util.cc
+++ /dev/null
@@ -1,291 +0,0 @@
-#define __STDC_FORMAT_MACROS
-#include <unistd.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/uio.h>
-#include <event2/util.h>
-#include <glog/logging.h>
-#include <netinet/tcp.h>
-#include <sys/socket.h>
-#include <poll.h>
-#include <errno.h>
-#include <pthread.h>
-
-#include <string>
-#include <algorithm>
-
-#include "util.h"
-#include "status.h"
-
-#ifndef POLLIN
-# define POLLIN      0x0001    /* There is data to read */
-# define POLLPRI     0x0002    /* There is urgent data to read */
-# define POLLOUT     0x0004    /* Writing now will not block */
-# define POLLERR     0x0008    /* Error condition */
-# define POLLHUP     0x0010    /* Hung up */
-# define POLLNVAL    0x0020    /* Invalid request: fd not open */
-#endif
-
-namespace Util {
-sockaddr_in NewSockaddrInet(const std::string &host, uint32_t port) {
-  sockaddr_in sin{};
-  sin.sin_family = AF_INET;
-  sin.sin_addr.s_addr = inet_addr(host.c_str());
-  sin.sin_port = htons(port);
-  return sin;
-}
-
-Status SockConnect(std::string host, uint32_t port, int *fd) {
-  sockaddr_in sin{};
-  sin.sin_family = AF_INET;
-  sin.sin_addr.s_addr = inet_addr(host.c_str());
-  sin.sin_port = htons(port);
-  *fd = socket(AF_INET, SOCK_STREAM, 0);
-  auto rv = connect(*fd, reinterpret_cast<sockaddr *>(&sin), sizeof(sin));
-  if (rv < 0) {
-    close(*fd);
-    *fd = -1;
-    return Status(Status::NotOK, strerror(errno));
-  }
-  setsockopt(*fd, SOL_SOCKET, SO_KEEPALIVE, nullptr, 0);
-  setsockopt(*fd, IPPROTO_TCP, TCP_NODELAY, nullptr, 0);
-  return Status::OK();
-}
-
-// NOTE: fd should be blocking here
-Status SockSend(int fd, const std::string &data) {
-  ssize_t n = 0;
-  while (n < static_cast<ssize_t>(data.size())) {
-    ssize_t nwritten = write(fd, data.c_str()+n, data.size()-n);
-    if (nwritten == -1) {
-      return Status(Status::NotOK, strerror(errno));
-    }
-    n += nwritten;
-  }
-  return Status::OK();
-}
-
-int GetPeerAddr(int fd, std::string *addr, uint32_t *port) {
-  sockaddr_storage sa{};
-  socklen_t sa_len = sizeof(sa);
-  if (getpeername(fd, reinterpret_cast<sockaddr *>(&sa), &sa_len) < 0) {
-    return -1;
-  }
-  if (sa.ss_family == AF_INET) {
-    auto sa4 = reinterpret_cast<sockaddr_in *>(&sa);
-    char buf[INET_ADDRSTRLEN];
-    inet_ntop(AF_INET, reinterpret_cast<void *>(&sa4->sin_addr), buf, INET_ADDRSTRLEN);
-    addr->clear();
-    addr->append(buf);
-    *port = ntohs(sa4->sin_port);
-    return 0;
-  }
-  return -2;  // only support AF_INET currently
-}
-
-Status StringToNum(const std::string &str, int64_t *n, int64_t min, int64_t max) {
-  try {
-    *n = static_cast<int64_t>(std::stoll(str));
-    if (max > min && (*n < min || *n > max)) {
-      return Status(Status::NotOK, "value shoud between "+std::to_string(min)+" and "+std::to_string(max));
-    }
-  } catch (std::exception &e) {
-    return Status(Status::NotOK, "value is not an integer or out of range");
-  }
-  return Status::OK();
-}
-
-std::string ToLower(std::string in) {
-  std::transform(in.begin(), in.end(), in.begin(),
-                 [](char c) -> char { return static_cast<char>(std::tolower(c)); });
-  return in;
-}
-
-void Trim(const std::string &in, const std::string &chars, std::string *out) {
-  out->clear();
-  if (in.empty()) return;
-  out->assign(in);
-  out->erase(0, out->find_first_not_of(chars));
-  out->erase(out->find_last_not_of(chars)+1);
-}
-
-void Split(std::string in, std::string delim, std::vector<std::string> *out) {
-  if (in.empty() || !out) return;
-  out->clear();
-
-  std::string::size_type pos = 0;
-  std::string elem, trimed_elem;
-  do {
-    pos = in.find_first_of(delim);
-    elem = in.substr(0, pos);
-    Trim(elem, delim, &trimed_elem);
-    if (!trimed_elem.empty()) out->push_back(trimed_elem);
-    in = in.substr(pos+1);
-  } while (pos != std::string::npos);
-}
-
-int StringMatch(const std::string &pattern, const std::string &in, int nocase) {
-  return StringMatchLen(pattern.c_str(), pattern.length(), in.c_str(), in.length(), nocase);
-}
-
-// Glob-style pattern matching.
-int StringMatchLen(const char *pattern, int patternLen,
-                   const char *string, int stringLen, int nocase) {
-  while (patternLen && stringLen) {
-    switch (pattern[0]) {
-      case '*':
-        while (pattern[1] == '*') {
-          pattern++;
-          patternLen--;
-        }
-        if (patternLen == 1)
-          return 1; /* match */
-        while (stringLen) {
-          if (StringMatchLen(pattern + 1, patternLen - 1,
-                             string, stringLen, nocase))
-            return 1; /* match */
-          string++;
-          stringLen--;
-        }
-        return 0; /* no match */
-        break;
-      case '?':
-        if (stringLen == 0)
-          return 0; /* no match */
-        string++;
-        stringLen--;
-        break;
-      case '[': {
-        int not_symbol, match;
-
-        pattern++;
-        patternLen--;
-        not_symbol = pattern[0] == '^';
-        if (not_symbol) {
-          pattern++;
-          patternLen--;
-        }
-        match = 0;
-        while (1) {
-          if (pattern[0] == '\\' && patternLen >= 2) {
-            pattern++;
-            patternLen--;
-            if (pattern[0] == string[0])
-              match = 1;
-          } else if (pattern[0] == ']') {
-            break;
-          } else if (patternLen == 0) {
-            pattern--;
-            patternLen++;
-            break;
-          } else if (pattern[1] == '-' && patternLen >= 3) {
-            int start = pattern[0];
-            int end = pattern[2];
-            int c = string[0];
-            if (start > end) {
-              int t = start;
-              start = end;
-              end = t;
-            }
-            if (nocase) {
-              start = tolower(start);
-              end = tolower(end);
-              c = tolower(c);
-            }
-            pattern += 2;
-            patternLen -= 2;
-            if (c >= start && c <= end)
-              match = 1;
-          } else {
-            if (!nocase) {
-              if (pattern[0] == string[0])
-                match = 1;
-            } else {
-              if (tolower(static_cast<int>(pattern[0])) == tolower(static_cast<int>(string[0])))
-                match = 1;
-            }
-          }
-          pattern++;
-          patternLen--;
-        }
-        if (not_symbol)
-          match = !match;
-        if (!match)
-          return 0; /* no match */
-        string++;
-        stringLen--;
-        break;
-      }
-      case '\\':
-        if (patternLen >= 2) {
-          pattern++;
-          patternLen--;
-        }
-        /* fall through */
-      default:
-        if (!nocase) {
-          if (pattern[0] != string[0])
-            return 0; /* no match */
-        } else {
-          if (tolower(static_cast<int>(pattern[0])) != tolower(static_cast<int>(string[0])))
-            return 0; /* no match */
-        }
-        string++;
-        stringLen--;
-        break;
-    }
-    pattern++;
-    patternLen--;
-    if (stringLen == 0) {
-      while (*pattern == '*') {
-        pattern++;
-        patternLen--;
-      }
-      break;
-    }
-  }
-  if (patternLen == 0 && stringLen == 0)
-    return 1;
-  return 0;
-}
-
-void BytesToHuman(char *buf, size_t size, uint64_t n) {
-  double d;
-
-  if (n < 1024) {
-    snprintf(buf, size, "%" PRIu64 "B", n);
-  } else if (n < (1024*1024)) {
-    d = static_cast<double>(n)/(1024);
-    snprintf(buf, size, "%.2fK", d);
-  } else if (n < (1024LL*1024*1024)) {
-    d = static_cast<double>(n)/(1024*1024);
-    snprintf(buf, size, "%.2fM", d);
-  } else if (n < (1024LL*1024*1024*1024)) {
-    d = static_cast<double>(n)/(1024LL*1024*1024);
-    snprintf(buf, size, "%.2fG", d);
-  } else if (n < (1024LL*1024*1024*1024*1024)) {
-    d = static_cast<double>(n)/(1024LL*1024*1024*1024);
-    snprintf(buf, size, "%.2fT", d);
-  } else if (n < (1024LL*1024*1024*1024*1024*1024)) {
-    d = static_cast<double>(n)/(1024LL*1024*1024*1024*1024);
-    snprintf(buf, size, "%.2fP", d);
-  } else {
-    snprintf(buf, size, "%" PRIu64 "B", n);
-  }
-}
-
-bool IsPortInUse(int port) {
-  int fd;
-  Status s = SockConnect("0.0.0.0", static_cast<uint32_t>(port), &fd);
-  if (fd > 0) close(fd);
-  return s.IsOK();
-}
-
-void ThreadSetName(const char *name) {
-#ifdef __APPLE__
-  pthread_setname_np(name);
-#else
-  pthread_setname_np(pthread_self(), name);
-#endif
-}
-}  // namespace Util
diff --git a/src/util.h b/src/util.h
deleted file mode 100644
index 53c9a5a..0000000
--- a/src/util.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#pragma once
-
-#define __STDC_FORMAT_MACROS
-#include <inttypes.h>
-#include <arpa/inet.h>
-
-#include <cctype>
-#include <string>
-#include <vector>
-
-#include "status.h"
-
-namespace Util {
-// sock util
-sockaddr_in NewSockaddrInet(const std::string &host, uint32_t port);
-Status SockConnect(std::string host, uint32_t port, int *fd);
-Status SockSend(int fd, const std::string &data);
-int GetPeerAddr(int fd, std::string *addr, uint32_t *port);
-bool IsPortInUse(int port);
-
-// string util
-Status StringToNum(const std::string &str, int64_t *n, int64_t min = INT64_MIN, int64_t max = INT64_MAX);
-std::string ToLower(std::string in);
-void BytesToHuman(char *buf, size_t size, uint64_t n);
-void Trim(const std::string &in, const std::string &chars, std::string *out);
-void Split(std::string in, std::string delim, std::vector<std::string> *out);
-int StringMatch(const std::string &pattern, const std::string &in, int nocase);
-int StringMatchLen(const char *p, int plen, const char *s, int slen, int nocase);
-
-void ThreadSetName(const char *name);
-}  // namespace Util
diff --git a/src/valgrind.sup b/src/valgrind.sup
deleted file mode 100644
index ab4dd7e..0000000
--- a/src/valgrind.sup
+++ /dev/null
@@ -1,17 +0,0 @@
-{
-        rocksdb thread local singletons
-        Memcheck:Leak
-        ...
-        fun:_ZN7rocksdb3Env7DefaultEv
-        fun:_ZN6Config4LoadESs
-        ...
-}
-
-{
-        rocksdb column thread local leaks
-        Memcheck:Leak
-        ...
-        fun:_ZN7rocksdb14ThreadLocalPtr10StaticMeta10SetHandlerEjPFvPvE
-        fun:_ZN7rocksdb16ColumnFamilyDataC1EjRKSsPNS_7VersionEPNS_5CacheEPNS_18WriteBufferManagerERKNS_1
-        ...
-}
diff --git a/src/version.h.in b/src/version.h.in
deleted file mode 100644
index 2a46b8d..0000000
--- a/src/version.h.in
+++ /dev/null
@@ -1,4 +0,0 @@
-#pragma once
-
-#define VERSION "@PROJECT_VERSION@"
-#define GIT_COMMIT "@GIT_SHA@"
diff --git a/src/worker.cc b/src/worker.cc
deleted file mode 100644
index 91fdef6..0000000
--- a/src/worker.cc
+++ /dev/null
@@ -1,372 +0,0 @@
-#include "worker.h"
-
-#include <glog/logging.h>
-#include <list>
-#include <cctype>
-#include <utility>
-#include <algorithm>
-
-#include "redis_request.h"
-#include "redis_connection.h"
-#include "server.h"
-#include "util.h"
-
-Worker::Worker(Server *svr, Config *config, bool repl) : svr_(svr), repl_(repl) {
-  base_ = event_base_new();
-  if (!base_) throw std::exception();
-
-  timer_ = event_new(base_, -1, EV_PERSIST, TimerCB, this);
-  timeval tm = {10, 0};
-  evtimer_add(timer_, &tm);
-
-  int port = repl ? config->repl_port : config->port;
-  auto binds = repl ? config->repl_binds : config->binds;
-  for (const auto &bind : binds) {
-    Status s = listen(bind, port, config->backlog);
-    if (!s.IsOK()) {
-      LOG(ERROR) << "[worker] Failed to listen on: "<< bind << ":" << port
-                 << ", encounter error: " << s.Msg();
-      exit(1);
-    }
-  }
-}
-
-Worker::~Worker() {
-  std::list<Redis::Connection*> conns;
-  for (const auto &iter : conns_) {
-    conns.emplace_back(iter.second);
-  }
-  for (const auto &iter : conns) {
-    iter->Close();
-  }
-  event_free(timer_);
-  if (rate_limit_group_ != nullptr) {
-    bufferevent_rate_limit_group_free(rate_limit_group_);
-  }
-  if (rate_limit_group_cfg_ != nullptr) {
-    ev_token_bucket_cfg_free(rate_limit_group_cfg_);
-  }
-  event_base_free(base_);
-}
-
-void Worker::TimerCB(int, int16_t events, void *ctx) {
-  auto worker = static_cast<Worker*>(ctx);
-  auto config = worker->svr_->GetConfig();
-  if (config->timeout == 0) return;
-  worker->KickoutIdleClients(config->timeout);
-}
-
-void Worker::newConnection(evconnlistener *listener, evutil_socket_t fd,
-                           sockaddr *address, int socklen, void *ctx) {
-  auto worker = static_cast<Worker *>(ctx);
-  if (worker->IsRepl()) {
-    DLOG(INFO) << "[worker] New connection: fd=" << fd
-               << " from port: " << worker->svr_->GetConfig()->repl_port << " thread #"
-               << worker->tid_;
-  } else {
-    DLOG(INFO) << "[worker] New connection: fd=" << fd
-               << " from port: " << worker->svr_->GetConfig()->port << " thread #"
-               << worker->tid_;
-  }
-  int enable = 1;
-  if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, reinterpret_cast<void*>(&enable), sizeof(enable)) < 0) {
-    LOG(ERROR) << "[worker] Failed to set tcp-keepalive, err:" << evutil_socket_geterror(fd);
-    evutil_closesocket(fd);
-    return;
-  }
-  event_base *base = evconnlistener_get_base(listener);
-  bufferevent *bev = bufferevent_socket_new(base, fd, BEV_OPT_CLOSE_ON_FREE);
-  auto conn = new Redis::Connection(bev, worker);
-  bufferevent_setcb(bev, Redis::Connection::OnRead, Redis::Connection::OnWrite,
-                    Redis::Connection::OnEvent, conn);
-  bufferevent_enable(bev, EV_READ);
-  Status status = worker->AddConnection(conn);
-  std::string ip;
-  uint32_t port;
-  if (Util::GetPeerAddr(fd, &ip, &port) == 0) {
-    conn->SetAddr(ip, port);
-  }
-  if (!status.IsOK()) {
-    std::string err_msg = Redis::Error("ERR " + status.Msg());
-    write(fd, err_msg.data(), err_msg.size());
-    conn->Close();
-  }
-
-  if (worker->rate_limit_group_ != nullptr) {
-    bufferevent_add_to_rate_limit_group(bev, worker->rate_limit_group_);
-  }
-}
-
-Status Worker::listen(const std::string &host, int port, int backlog) {
-  sockaddr_in sin{};
-  sin.sin_family = AF_INET;
-  evutil_inet_pton(AF_INET, host.data(), &(sin.sin_addr));
-  sin.sin_port = htons(port);
-  int fd = socket(AF_INET, SOCK_STREAM, 0);
-  int sock_opt = 1;
-  if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &sock_opt, sizeof(sock_opt)) < 0) {
-    return Status(Status::NotOK, evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
-  }
-  // to support multi-thread binding on macOS
-  if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &sock_opt, sizeof(sock_opt)) < 0) {
-    return Status(Status::NotOK, evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
-  }
-  if (bind(fd, (struct sockaddr *)&sin, sizeof(sin)) < 0) {
-    return Status(Status::NotOK, evutil_socket_error_to_string(EVUTIL_SOCKET_ERROR()));
-  }
-  evutil_make_socket_nonblocking(fd);
-  auto lev = evconnlistener_new(base_, newConnection, this,
-                                LEV_OPT_CLOSE_ON_FREE, backlog, fd);
-  listen_events_.emplace_back(lev);
-  return Status::OK();
-}
-
-void Worker::Run(std::thread::id tid) {
-  tid_ = tid;
-  if (event_base_dispatch(base_) != 0) {
-    LOG(ERROR) << "[worker] Failed to run server, err: " << strerror(errno);
-  }
-}
-
-void Worker::Stop() {
-  event_base_loopbreak(base_);
-  for (const auto &lev : listen_events_) {
-    evutil_socket_t fd = evconnlistener_get_fd(lev);
-    if (fd > 0) close(fd);
-    evconnlistener_free(lev);
-  }
-}
-
-Status Worker::AddConnection(Redis::Connection *c) {
-  std::unique_lock<std::mutex> lock(conns_mu_);
-  auto iter = conns_.find(c->GetFD());
-  if (iter != conns_.end()) {
-    return Status(Status::NotOK, "connection was exists");
-  }
-  int max_clients = svr_->GetConfig()->maxclients;
-  if (svr_->IncrClientNum() >= max_clients) {
-    svr_->DecrClientNum();
-    return Status(Status::NotOK, "max number of clients reached");
-  }
-  conns_.insert(std::pair<int, Redis::Connection*>(c->GetFD(), c));
-  uint64_t id = svr_->GetClientID()->fetch_add(1, std::memory_order_relaxed);
-  c->SetID(id);
-  return Status::OK();
-}
-
-
-Redis::Connection *Worker::removeConnection(int fd) {
-  Redis::Connection *conn = nullptr;
-  std::unique_lock<std::mutex> lock(conns_mu_);
-  auto iter = conns_.find(fd);
-  if (iter != conns_.end()) {
-    conn = iter->second;
-    conns_.erase(iter);
-    svr_->DecrClientNum();
-  }
-  iter = monitor_conns_.find(fd);
-  if (iter != monitor_conns_.end()) {
-    conn = iter->second;
-    monitor_conns_.erase(iter);
-    svr_->DecrClientNum();
-    svr_->DecrMonitorClientNum();
-  }
-  return conn;
-}
-
-void Worker::DetachConnection(Redis::Connection *conn) {
-  if (!conn) return;
-  removeConnection(conn->GetFD());
-  if (rate_limit_group_ != nullptr) {
-    bufferevent_remove_from_rate_limit_group(conn->GetBufferEvent());
-  }
-  auto bev = conn->GetBufferEvent();
-  bufferevent_disable(bev, EV_READ|EV_WRITE);
-  bufferevent_setcb(bev, nullptr, nullptr, nullptr, nullptr);
-}
-
-void Worker::FreeConnection(Redis::Connection *conn) {
-  if (!conn) return;
-  removeConnection(conn->GetFD());
-  if (rate_limit_group_ != nullptr) {
-    bufferevent_remove_from_rate_limit_group(conn->GetBufferEvent());
-  }
-  delete conn;
-}
-
-void Worker::FreeConnectionByID(int fd, uint64_t id) {
-  std::unique_lock<std::mutex> lock(conns_mu_);
-  auto conn_iter = conns_.find(fd);
-  if (conn_iter != conns_.end() && conn_iter->second->GetID() == id) {
-    if (rate_limit_group_ != nullptr) {
-      bufferevent_remove_from_rate_limit_group(conn_iter->second->GetBufferEvent());
-    }
-    delete conn_iter->second;
-    conns_.erase(conn_iter);
-    svr_->DecrClientNum();
-  }
-  auto monitor_conn_iter = monitor_conns_.find(fd);
-  if (monitor_conn_iter != monitor_conns_.end() && monitor_conn_iter->second->GetID() == id) {
-    delete monitor_conn_iter->second;
-    monitor_conns_.erase(monitor_conn_iter);
-    svr_->DecrClientNum();
-    svr_->DecrMonitorClientNum();
-  }
-}
-
-Status Worker::EnableWriteEvent(int fd) {
-  std::unique_lock<std::mutex> lock(conns_mu_);
-  auto iter = conns_.find(fd);
-  if (iter != conns_.end()) {
-    auto bev = iter->second->GetBufferEvent();
-    bufferevent_enable(bev, EV_WRITE);
-    return Status::OK();
-  }
-  return Status(Status::NotOK);
-}
-
-Status Worker::Reply(int fd, const std::string &reply) {
-  std::unique_lock<std::mutex> lock(conns_mu_);
-  auto iter = conns_.find(fd);
-  if (iter != conns_.end()) {
-    Redis::Reply(iter->second->Output(), reply);
-    return Status::OK();
-  }
-  return Status(Status::NotOK, "connection doesn't exist");
-}
-
-int Worker::SetReplicationRateLimit(uint64_t max_replication_bytes) {
-  auto write_limit = EV_RATE_LIMIT_MAX;
-  if (max_replication_bytes > 0) {
-    write_limit = max_replication_bytes;
-  }
-  struct timeval cfg_tick = {1, 0};
-  auto old_cfg = rate_limit_group_cfg_;
-  rate_limit_group_cfg_ = ev_token_bucket_cfg_new(
-      EV_RATE_LIMIT_MAX, EV_RATE_LIMIT_MAX,
-      write_limit, write_limit,
-      &cfg_tick);
-  if (rate_limit_group_cfg_ == nullptr) {
-    LOG(ERROR) << "[server] ev_token_bucket_cfg_new error";
-    rate_limit_group_cfg_ = old_cfg;
-    return -1;
-  }
-
-  if (rate_limit_group_ != nullptr) {
-    bufferevent_rate_limit_group_set_cfg(rate_limit_group_, rate_limit_group_cfg_);
-  } else {
-    rate_limit_group_ = bufferevent_rate_limit_group_new(base_, rate_limit_group_cfg_);
-  }
-
-  if (old_cfg != nullptr) {
-    ev_token_bucket_cfg_free(old_cfg);
-  }
-
-  return 0;
-}
-
-void Worker::BecomeMonitorConn(Redis::Connection *conn) {
-  conns_mu_.lock();
-  conns_.erase(conn->GetFD());
-  monitor_conns_[conn->GetFD()] = conn;
-  conns_mu_.unlock();
-  svr_->IncrMonitorClientNum();
-  conn->EnableFlag(Redis::Connection::kMonitor);
-}
-
-void Worker::FeedMonitorConns(Redis::Connection *conn, const std::vector<std::string> &tokens) {
-  struct timeval tv;
-  gettimeofday(&tv, nullptr);
-  std::string output;
-  output += std::to_string(tv.tv_sec) + "." + std::to_string(tv.tv_usec);
-  output += " [" + conn->GetNamespace() + " " + conn->GetAddr() + "]";
-  for (const auto &token : tokens) {
-    output += " \"" + token + "\"";
-  }
-  std::unique_lock<std::mutex> lock(conns_mu_);
-  for (const auto &iter : monitor_conns_) {
-    if (conn == iter.second) continue;  // skip the monitor command
-    if (conn->GetNamespace() == iter.second->GetNamespace()
-        || iter.second->GetNamespace() == kDefaultNamespace) {
-      iter.second->Reply(Redis::SimpleString(output));
-    }
-  }
-}
-
-std::string Worker::GetClientsStr() {
-  std::unique_lock<std::mutex> lock(conns_mu_);
-  std::string output;
-  for (const auto iter : conns_) {
-    Redis::Connection *conn = iter.second;
-    output.append(conn->ToString());
-  }
-  return output;
-}
-
-void Worker::KillClient(Redis::Connection *self, uint64_t id, std::string addr, bool skipme, int64_t *killed) {
-  conns_mu_.lock();
-  for (const auto iter : conns_) {
-    Redis::Connection* conn = iter.second;
-    if (skipme && self == conn) continue;
-    if ((!addr.empty() && conn->GetAddr() == addr) || (id != 0 && conn->GetID() == id)) {
-      conn->EnableFlag(Redis::Connection::kCloseAfterReply);
-      // enable write event to notify worker wake up ASAP, and remove the connection
-      if (!conn->IsFlagEnabled(Redis::Connection::kSlave)) {  // don't enable any event in slave connection
-        auto bev = conn->GetBufferEvent();
-        bufferevent_enable(bev, EV_WRITE);
-      }
-      (*killed)++;
-    }
-  }
-  conns_mu_.unlock();
-}
-
-void Worker::KickoutIdleClients(int timeout) {
-  conns_mu_.lock();
-  std::list<std::pair<int, uint64_t>> to_be_killed_conns;
-  if (conns_.empty()) {
-    conns_mu_.unlock();
-    return;
-  }
-  int iterations = std::min(static_cast<int>(conns_.size()), 50);
-  auto iter = conns_.upper_bound(last_iter_conn_fd);
-  while (iterations--) {
-    if (iter == conns_.end()) iter = conns_.begin();
-    if (static_cast<int>(iter->second->GetIdleTime()) >= timeout) {
-      to_be_killed_conns.emplace_back(std::make_pair(iter->first, iter->second->GetID()));
-    }
-    iter++;
-  }
-  iter--;
-  last_iter_conn_fd = iter->first;
-  conns_mu_.unlock();
-
-  for (const auto conn : to_be_killed_conns) {
-    FreeConnectionByID(conn.first, conn.second);
-  }
-}
-
-void WorkerThread::Start() {
-  try {
-    t_ = std::thread([this]() {
-      if (this->worker_->IsRepl()) {
-        Util::ThreadSetName("repl-worker");
-      } else {
-        Util::ThreadSetName("worker");
-      }
-      this->worker_->Run(t_.get_id());
-    });
-  } catch (const std::system_error &e) {
-    LOG(ERROR) << "[worker] Failed to start worker thread, err: " << e.what();
-    return;
-  }
-  LOG(INFO) << "[worker] Thread #" << t_.get_id() << " started";
-}
-
-void WorkerThread::Stop() {
-  worker_->Stop();
-}
-
-void WorkerThread::Join() {
-  if (t_.joinable()) t_.join();
-}
diff --git a/src/worker.h b/src/worker.h
deleted file mode 100644
index 9b0ba88..0000000
--- a/src/worker.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#pragma once
-
-#include <event2/buffer.h>
-#include <event2/bufferevent.h>
-#include <event2/listener.h>
-#include <event2/util.h>
-#include <cstring>
-#include <iostream>
-#include <map>
-#include <memory>
-#include <thread>
-#include <string>
-#include <vector>
-
-#include "storage.h"
-#include "redis_connection.h"
-
-class Server;
-
-class Worker {
- public:
-  Worker(Server *svr, Config *config, bool repl = false);
-  ~Worker();
-  Worker(const Worker &) = delete;
-  Worker(Worker &&) = delete;
-  void Stop();
-  void Run(std::thread::id tid);
-
-  void DetachConnection(Redis::Connection *conn);
-  void FreeConnection(Redis::Connection *conn);
-  void FreeConnectionByID(int fd, uint64_t id);
-  Status AddConnection(Redis::Connection *c);
-  Status EnableWriteEvent(int fd);
-  Status Reply(int fd, const std::string &reply);
-  bool IsRepl() { return repl_; }
-  int SetReplicationRateLimit(uint64_t max_replication_bytes);
-  void BecomeMonitorConn(Redis::Connection *conn);
-  void FeedMonitorConns(Redis::Connection *conn, const std::vector<std::string> &tokens);
-
-  std::string GetClientsStr();
-  void KillClient(Redis::Connection *self, uint64_t id, std::string addr, bool skipme, int64_t *killed);
-  void KickoutIdleClients(int timeout);
-
-  Server *svr_;
-
- private:
-  Status listen(const std::string &host, int port, int backlog);
-  static void newConnection(evconnlistener *listener, evutil_socket_t fd,
-                            sockaddr *address, int socklen, void *ctx);
-  static void TimerCB(int, int16_t events, void *ctx);
-  Redis::Connection *removeConnection(int fd);
-
-
-  event_base *base_;
-  event *timer_;
-  std::thread::id tid_;
-  std::vector<evconnlistener*> listen_events_;
-  std::mutex conns_mu_;
-  std::map<int, Redis::Connection*> conns_;
-  std::map<int, Redis::Connection*> monitor_conns_;
-  int last_iter_conn_fd = 0;   // fd of last processed connection in previous cron
-
-  bool repl_;
-  struct bufferevent_rate_limit_group *rate_limit_group_ = nullptr;
-  struct ev_token_bucket_cfg *rate_limit_group_cfg_ = nullptr;
-};
-
-class WorkerThread {
- public:
-  explicit WorkerThread(Worker *worker) : worker_(worker) {}
-  ~WorkerThread() { delete worker_; }
-  WorkerThread(const WorkerThread&) = delete;
-  WorkerThread(WorkerThread&&) = delete;
-  Worker *GetWorker() { return worker_; }
-  void Start();
-  void Stop();
-  void Join();
-
- private:
-  std::thread t_;
-  Worker *worker_;
-};
diff --git a/tests/compact_test.cc b/tests/compact_test.cc
deleted file mode 100644
index c5d1ee0..0000000
--- a/tests/compact_test.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-#include <gtest/gtest.h>
-
-#include "config.h"
-#include "storage.h"
-#include "redis_metadata.h"
-#include "redis_hash.h"
-#include "redis_zset.h"
-
-TEST(Compact, Filter) {
-  Config config;
-  config.db_dir = "compactdb";
-  config.backup_dir = "compactdb/backup";
-
-  auto storage_ = new Engine::Storage(&config);
-  Status s = storage_->Open();
-  assert(s.IsOK());
-
-  int ret;
-  std::string ns = "test_compact";
-  auto hash = new Redis::Hash(storage_, ns);
-  std::string expired_hash_key = "expire_hash_key";
-  std::string live_hash_key = "live_hash_key";
-  hash->Set(expired_hash_key, "f1", "v1", &ret);
-  hash->Set(expired_hash_key, "f2", "v2", &ret);
-  hash->Expire(expired_hash_key, 1); // expired
-  usleep(10000);
-  hash->Set(live_hash_key, "f1", "v1", &ret);
-  hash->Set(live_hash_key, "f2", "v2", &ret);
-  auto status = storage_->Compact(nullptr, nullptr);
-  assert(status.ok());
-
-  rocksdb::DB *db = storage_->GetDB();
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = db->GetSnapshot();
-  read_options.fill_cache = false;
-  auto iter = db->NewIterator(read_options, storage_->GetCFHandle("metadata"));
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    std::string user_key, user_ns;
-    ExtractNamespaceKey(iter->key(), &user_ns, &user_key);
-    EXPECT_EQ(user_key, live_hash_key);
-  }
-  delete iter;
-
-  iter = db->NewIterator(read_options, storage_->GetCFHandle("subkey"));
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    InternalKey ikey(iter->key());
-    EXPECT_EQ(ikey.GetKey().ToString(), live_hash_key);
-  }
-  delete iter;
-  delete hash;
-
-  auto zset = new Redis::ZSet(storage_, ns);
-  std::string expired_zset_key = "expire_zset_key";
-  std::vector<MemberScore> member_scores =  {MemberScore{"z1", 1.1}, MemberScore{"z2", 0.4}};
-  zset->Add(expired_zset_key, 0, &member_scores, &ret);
-  zset->Expire(expired_zset_key, 1); // expired
-  usleep(10000);
-
-  status = storage_->Compact(nullptr, nullptr);
-  assert(status.ok());
-
-  iter = db->NewIterator(read_options, storage_->GetCFHandle("default"));
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    InternalKey ikey(iter->key());
-    EXPECT_EQ(ikey.GetKey().ToString(), live_hash_key);
-  }
-  delete iter;
-
-  iter = db->NewIterator(read_options, storage_->GetCFHandle("zset_score"));
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    EXPECT_TRUE(false);  // never reach here
-  }
-  delete iter;
-
-  delete zset;
-}
diff --git a/tests/config_test.cc b/tests/config_test.cc
deleted file mode 100644
index 041649f..0000000
--- a/tests/config_test.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-#include "config.h"
-#include "server.h"
-#include <map>
-#include <vector>
-#include <gtest/gtest.h>
-
-TEST(Config, Profiling) {
-  const char *path = "test.conf";
-  Config config;
-  Server srv(nullptr, &config);
-
-  config.Load(path);
-  std::map<std::string, std::string> cases = {
-      {"profiling-sample-ratio" , "50"},
-      {"profiling-sample-record-max-len" , "1"},
-      {"profiling-sample-record-threshold-ms" , "50"},
-      {"profiling-sample-commands" , "get,set"},
-  };
-  std::vector<std::string> values;
-  for (const auto &iter : cases) {
-    config.Set(iter.first, iter.second, &srv);
-    config.Get(iter.first, &values);
-    ASSERT_EQ(values.size(), 2);
-    EXPECT_EQ(values[0], iter.first);
-    EXPECT_EQ(values[1], iter.second);
-  }
-  ASSERT_TRUE(config.Rewrite().IsOK());
-  config.Load(path);
-  for (const auto &iter : cases) {
-    config.Set(iter.first, iter.second, &srv);
-    config.Get(iter.first, &values);
-    ASSERT_EQ(values.size(), 2);
-    EXPECT_EQ(values[0], iter.first);
-    EXPECT_EQ(values[1], iter.second);
-  }
-  unlink(path);
-}
-
-TEST(Config, ProfilingMaxRecordLen) {
-  Config config;
-  config.profiling_sample_record_max_len = 1;
-  Server srv(nullptr, &config);
-  srv.GetPerfLog()->PushEntry(PerfEntry{});
-  srv.GetPerfLog()->PushEntry(PerfEntry{});
-  EXPECT_EQ(srv.GetPerfLog()->Len(), 1);
-  config.Set("profiling-sample-record-max-len", "2", &srv);
-  srv.GetPerfLog()->PushEntry(PerfEntry{});
-  srv.GetPerfLog()->PushEntry(PerfEntry{});
-  EXPECT_EQ(srv.GetPerfLog()->Len(), 2);
-}
-
-TEST(Namespace, Add) {
-  Config config;
-  EXPECT_TRUE(!config.AddNamespace("ns", "t0").IsOK());
-  config.requirepass = "foobared";
-  std::vector<std::string> namespaces= {"n1", "n2", "n3", "n4"};
-  std::vector<std::string> tokens = {"t1", "t2", "t3", "t4"};
-  for(int i = 0; i < namespaces.size(); i++) {
-    EXPECT_TRUE(config.AddNamespace(namespaces[i], tokens[i]).IsOK());
-  }
-  for(int i = 0; i < namespaces.size(); i++) {
-    std::string token;
-    config.GetNamespace(namespaces[i], &token);
-    EXPECT_EQ(token, tokens[i]);
-  }
-  for(int i = 0; i < namespaces.size(); i++) {
-    auto s = config.AddNamespace(namespaces[i], tokens[i]);
-    EXPECT_FALSE(s.IsOK());
-    EXPECT_EQ(s.Msg(), "the token has already exists");
-  }
-  auto s = config.AddNamespace("n1", "t0");
-  EXPECT_FALSE(s.IsOK());
-  EXPECT_EQ(s.Msg(), "the namespace has already exists");
-}
-
-TEST(Namespace, Set) {
-  Config config;
-  config.requirepass = "foobared";
-  std::vector<std::string> namespaces= {"n1", "n2", "n3", "n4"};
-  std::vector<std::string> tokens = {"t1", "t2", "t3", "t4"};
-  std::vector<std::string> new_tokens = {"nt1", "nt2'", "nt3", "nt4"};
-  for(int i = 0; i < namespaces.size(); i++) {
-    auto s = config.SetNamespace(namespaces[i], tokens[i]);
-    EXPECT_FALSE(s.IsOK());
-    EXPECT_EQ(s.Msg(), "the namespace was not found");
-  }
-  for(int i = 0; i < namespaces.size(); i++) {
-    EXPECT_TRUE(config.AddNamespace(namespaces[i], tokens[i]).IsOK());
-  }
-  for(int i = 0; i < namespaces.size(); i++) {
-    std::string token;
-    config.GetNamespace(namespaces[i], &token);
-    EXPECT_EQ(token, tokens[i]);
-  }
-  for(int i = 0; i < namespaces.size(); i++) {
-    EXPECT_TRUE(config.SetNamespace(namespaces[i], new_tokens[i]).IsOK());
-  }
-  for(int i = 0; i < namespaces.size(); i++) {
-    std::string token;
-    config.GetNamespace(namespaces[i], &token);
-    EXPECT_EQ(token, new_tokens[i]);
-  }
-}
-
-TEST(Namespace, Delete) {
-  Config config;
-  config.requirepass = "foobared";
-  std::vector<std::string> namespaces= {"n1", "n2", "n3", "n4"};
-  std::vector<std::string> tokens = {"t1", "t2", "t3", "t4"};
-  for(int i = 0; i < namespaces.size(); i++) {
-    EXPECT_TRUE(config.AddNamespace(namespaces[i], tokens[i]).IsOK());
-  }
-  for(int i = 0; i < namespaces.size(); i++) {
-    std::string token;
-    config.GetNamespace(namespaces[i], &token);
-    EXPECT_EQ(token, tokens[i]);
-  }
-  for (const auto &ns : namespaces) {
-    config.DelNamespace(ns);
-    std::string token;
-    config.GetNamespace(ns, &token);
-    EXPECT_TRUE(token.empty());
-  }
-}
-
-TEST(Namespace, RewriteNamespaces) {
-  const char *path = "test.conf";
-  unlink(path);
-  Config config;
-  config.requirepass = "test";
-  config.backup_dir = "test";
-  config.Load(path) ;
-  std::vector<std::string> namespaces= {"n1", "n2", "n3", "n4"};
-  std::vector<std::string> tokens = {"t1", "t2", "t3", "t4"};
-  for(int i = 0; i < namespaces.size(); i++) {
-    EXPECT_TRUE(config.AddNamespace(namespaces[i], tokens[i]).IsOK());
-  }
-  EXPECT_TRUE(config.Rewrite().IsOK());
-  Config new_config;
-  auto s = new_config.Load(path) ;
-  std::cout << s.Msg() << std::endl;
-  for(int i = 0; i < namespaces.size(); i++) {
-    std::string token;
-    new_config.GetNamespace(namespaces[i], &token);
-    EXPECT_EQ(token, tokens[i]);
-  }
-  unlink(path);
-}
diff --git a/tests/cron_test.cc b/tests/cron_test.cc
deleted file mode 100644
index 2f747e7..0000000
--- a/tests/cron_test.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-#include "cron.h"
-#include <gtest/gtest.h>
-
-class CronTest : public testing::Test {
- protected:
-  explicit CronTest() {
-    cron = new Cron();
-    std::vector<std::string> schedule{"*", "3", "*", "*", "*"};
-    cron->SetScheduleTime(schedule);
-  }
-  ~CronTest() {
-    delete cron;
-  }
-
- protected:
-  Cron *cron;
-};
-
-TEST_F(CronTest, IsTimeMatch) {
-  std::time_t t = std::time(0);
-  std::tm *now = std::localtime(&t);
-  now->tm_hour = 3;
-  ASSERT_TRUE(cron->IsTimeMatch(now));
-  now->tm_hour = 4;
-  ASSERT_FALSE(cron->IsTimeMatch(now));
-}
-
-TEST_F(CronTest, ToString) {
-  std::string got = cron->ToString();
-  ASSERT_EQ("* 3 * * *", got);
-}
diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tests/functional/__init__.py
+++ /dev/null
diff --git a/tests/functional/assert_helper.py b/tests/functional/assert_helper.py
deleted file mode 100644
index 757e14a..0000000
--- a/tests/functional/assert_helper.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import os
-import sys
-
-PWD = os.path.dirname(os.path.realpath(__file__))
-WORKDIR = os.path.join(PWD,'../')
-
-def assert_raise(exception_cls, callable, *args, **kwargs):
-    try:
-        callable(*args, **kwargs)
-    except exception_cls as e:
-        return e
-    except Exception as e:
-        assert False, 'assert_raises %s but raised: %s' % (exception_cls, e)
-    assert False, 'assert_raises %s but nothing raise' % (exception_cls)
diff --git a/tests/functional/bitmap_test.py b/tests/functional/bitmap_test.py
deleted file mode 100644
index cbb1604..0000000
--- a/tests/functional/bitmap_test.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import redis
-from assert_helper import *
-from conn import *
-
-def test_getbit_and_setbit():
-    key = "test_getbit_and_setbit"
-    conn = get_redis_conn()
-    bits = [0, 1, 2, 3, 1024, 1024*8, 1024*8+1, 1024*8+2, 1024*8+3, 4*1024*8, 4*1024*8+1]
-    for pos in bits:
-        ret = conn.getbit(key, pos)
-        assert(ret == 0)
-        ret = conn.setbit(key, pos, 1)
-        assert(ret == 0)
-        ret = conn.getbit(key, pos)
-        assert(ret == 1)
-        ret = conn.setbit(key, pos, 0)
-        assert(ret == 1)
-        ret = conn.getbit(key, pos)
-        assert(ret == 0)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_bitcount():
-    key = "test_bitcount"
-    conn = get_redis_conn()
-    bits = [0, 1, 2, 3, 1024, 1024*8, 1024*8+1, 1024*8+2, 1024*8+3, 4*1024*8, 4*1024*8+1]
-    for pos in bits:
-        ret = conn.getbit(key, pos)
-        assert(ret == 0)
-        ret = conn.setbit(key, pos, 1)
-        assert(ret == 0)
-    ret = conn.bitcount(key)
-    assert(ret == len(bits))
-    for pos in bits:
-        ret = conn.setbit(key, pos, 0)
-        assert(ret == 1)
-    ret = conn.bitcount(key)
-    assert(ret == 0)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_bitpos():
-    key = "test_bitpos"
-    conn = get_redis_conn()
-    bits = [0, 1, 2, 3, 1024, 1024*8, 1024*8+1, 1024*8+2, 1024*8+3, 4*1024*8, 4*1024*8+1]
-    for pos in bits:
-        ret = conn.getbit(key, pos)
-        assert(ret == 0)
-        ret = conn.setbit(key, pos, 1)
-        assert(ret == 0)
-    ret = conn.bitpos(key, 0, 0, 3)
-    assert(ret == 4)
-    ret = conn.bitpos(key, 1, 0, 3)
-    assert(ret == 0)
-    ret = conn.bitpos(key, 0, 1024)
-    assert(ret == 1024*8+4)
-    ret = conn.bitpos(key, 1, 1024)
-    assert(ret == 1024*8)
-    ret = conn.delete(key)
-    assert(ret == 1)
diff --git a/tests/functional/conn.py b/tests/functional/conn.py
deleted file mode 100644
index 5645164..0000000
--- a/tests/functional/conn.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import os
-import sys
-import redis
-
-PWD = os.path.dirname(os.path.realpath(__file__))
-WORKDIR = os.path.join(PWD,'../')
-
-def get_redis_conn(master=True):
-    if master:
-        r = redis.Redis("127.0.0.1", 6666, 0, "foobared")
-    else:
-        r = redis.Redis("127.0.0.1", 6668, 0, "foobared")
-    return r
-
diff --git a/tests/functional/hash_test.py b/tests/functional/hash_test.py
deleted file mode 100644
index 0c1c911..0000000
--- a/tests/functional/hash_test.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import redis
-from assert_helper import *
-from conn import *
-
-def test_hget_and_hset():
-    key = "test_hget_and_hset"
-    conn = get_redis_conn()
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    keys = kvs.keys()
-    for i, k in enumerate(keys):
-        ret = conn.hset(key, k, kvs[k])
-        assert(ret == 1)
-        ret = conn.hget(key, k)
-        assert(ret == kvs[k])
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-    for i, k in enumerate(keys):
-        ret = conn.hget(key, k)
-        assert(ret == None)
-
-def test_hincrby():
-    key = "test_hincrby"
-    conn = get_redis_conn()
-    for i in range(1, 10):
-        ret = conn.hincrby(key, "f1", 1)
-        assert(ret == i)
-    ret = conn.delete(key)
-    assert(ret == 1)
-    # TODO(linty): not number of overflow case
-    assert_raise(redis.RedisError, conn.hincrby, key, "f1", "invalid")
-
-def test_hincrbyfloat():
-    key = "test_hincrbyfloat"
-    conn = get_redis_conn()
-    for i in range(1, 10):
-        ret = conn.hincrbyfloat(key, "f1", 1.234)
-        assert(ret == i*1.234)
-    ret = conn.delete(key)
-    assert(ret == 1)
-    # TODO(linty): not number of overflow case
-    assert_raise(redis.RedisError, conn.hincrbyfloat, key, "f1", "invalid")
-
-def test_hsetnx():
-    key = "test_hsetnx"
-    conn = get_redis_conn()
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    keys = kvs.keys()
-    for i, k in enumerate(keys):
-        ret = conn.hsetnx(key, k, kvs[k])
-        assert(ret == 1)
-        ret = conn.hget(key, k)
-        assert(ret == kvs[k])
-    for i, k in enumerate(keys):
-        ret = conn.hsetnx(key, k, kvs[k])
-        assert(ret == 0)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_hstrlen():
-    key = "test_hstrlen"
-    conn = get_redis_conn()
-    ret = conn.hset(key, "f1", "hello")
-    assert(ret == True)
-    ret = conn.hstrlen(key, "f1")
-    assert(ret == 5)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_hdel():
-    key = "test_hdel"
-    conn = get_redis_conn()
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    keys = kvs.keys()
-    conn.hmset(key, kvs)
-    ret = conn.hdel(key, *keys)
-    assert(ret == len(kvs))
-
-def test_hexists():
-    key = "test_hexists"
-    conn = get_redis_conn()
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    keys = kvs.keys()
-    for i, k in enumerate(keys):
-        ret = conn.hset(key, k, kvs[k])
-        assert(ret == True)
-        ret = conn.hexists(key, k)
-        assert(ret == True)
-    ret = conn.delete(key)
-    assert(ret == 1)
-    for i, k in enumerate(keys):
-        ret = conn.hexists(key, k)
-        assert(ret == False)
-
-def test_hlen():
-    key = "test_hlen"
-    conn = get_redis_conn()
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    keys = kvs.keys()
-    ret = conn.hmset(key, kvs)
-    assert(ret == True)
-    ret = conn.hlen(key)
-    assert(ret == len(kvs))
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_mget_and_mset():
-    key = "test_mget_and_mset"
-    conn = get_redis_conn()
-    conn.delete(key)
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    ret = conn.hmset(key, kvs)
-    assert(ret == True)
-    ret = conn.hmget(key, kvs.keys())
-    assert(ret == kvs.values())
-    ret = conn.hmget(key, ['kkk-1', 'f-no-exist'])
-    assert(ret == ['vvv-1', None])
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_hkeys():
-    key = "test_hkeys"
-    conn = get_redis_conn()
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    keys = kvs.keys()
-    ret = conn.hmset(key, kvs)
-    assert(ret == True)
-    ret = conn.hkeys(key)
-    assert(sorted(keys) == ret)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_hvals():
-    key = "test_hvals"
-    conn = get_redis_conn()
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    keys = kvs.keys()
-    ret = conn.hmset(key, kvs)
-    assert(ret == True)
-    ret = conn.hvals(key)
-    assert(sorted(kvs.values()) == ret)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_hgetall():
-    key = "test_hgetall"
-    conn = get_redis_conn()
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    keys = kvs.keys()
-    ret = conn.hmset(key, kvs)
-    assert(ret == True)
-    ret = conn.hgetall(key)
-    assert(ret == kvs)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_hscan():
-    conn = get_redis_conn()
-    key = "test_hscan"
-    ret = conn.hset(key, 'a', 1.3)
-    assert (ret == 1)
-    ret = conn.execute_command("HSCAN " + key + " 0")
-    assert (ret == ['a', ['a']])
-
-    ret = conn.delete(key)
-    assert (ret == 1)
diff --git a/tests/functional/key_test.py b/tests/functional/key_test.py
deleted file mode 100644
index 9871d7a..0000000
--- a/tests/functional/key_test.py
+++ /dev/null
@@ -1,291 +0,0 @@
-import redis
-import time
-from assert_helper import *
-from conn import *
-
-def test_type():
-    conn = get_redis_conn()
-    string_key = "test_string_type"
-    ret = conn.set(string_key, "bar")
-    assert(ret == True)
-    ret = conn.type(string_key)
-    assert(ret == "string")
-    hash_key = "test_hash_type"
-    ret = conn.hset(hash_key, "f1", "v1")
-    assert(ret == 1)
-    ret = conn.type(hash_key)
-    assert(ret == "hash")
-    list_key = "test_list_type"
-    ret = conn.lpush(list_key, "v1")
-    assert(ret == 1)
-    ret = conn.type(list_key)
-    assert(ret == "list")
-    set_key = "test_set_type"
-    ret = conn.sadd(set_key, "s1")
-    assert(ret == 1)
-    ret = conn.type(set_key)
-    assert(ret == "set")
-    zset_key = "test_zset_type"
-    ret = conn.zadd(zset_key, "s1", 0.1)
-    assert(ret == 1)
-    ret = conn.type(zset_key)
-    assert(ret == "zset")
-    ret = conn.delete(string_key, hash_key, list_key, set_key, zset_key)
-    assert(ret == 5)
-
-def test_expire():
-    key = "test_expire"
-    conn = get_redis_conn()
-    ret = conn.lpush(key, "v1")
-    assert(ret == 1)
-    ret = conn.expire(key, 2)
-    assert(ret == 1)
-    ret = conn.ttl(key)
-    assert(ret >= 1 and ret <= 2)
-    time.sleep(3)
-    ret = conn.exists(key)
-    assert(ret == False)
-    
-def test_exists():
-    key = "test_exists"
-    conn = get_redis_conn()
-    ret = conn.set(key, "bar")
-    assert(ret)
-    ret = conn.exists(key)
-    assert(ret)
-    ret = conn.delete(key)
-    assert(ret == 1)
-    ret = conn.exists(key)
-    assert(not ret)
-
-def test_ttl():
-    key = "test_ttl"
-    conn = get_redis_conn()
-    ret = conn.set(key, "bar")
-    assert(ret)
-    ret = conn.ttl(key)
-    assert(ret == None)
-    ret = conn.ttl("notexistskey")
-    assert(ret == None)
-    ret = conn.expire(key, 2)
-    assert(ret == 1)
-    ret = conn.ttl(key)
-    assert(ret >= 1 and ret <= 2)
-
-
-def test_object_dump():
-    default_namespace = "__namespace"
-    conn = get_redis_conn()
-
-    string_key = "test_string_dump"
-    ret = conn.set(string_key, "bar")
-    assert(ret == True)
-    ret = conn.object("dump", string_key)
-    assert(ret[1] == default_namespace)
-    assert(ret[3] == "string")
-    assert(ret[7] == "0")
-    assert(ret[9] == "0")
-    ret = conn.expire(string_key, 2)
-    assert (ret == True)
-    ret = conn.object("dump", string_key)
-    ttl = int(ret[7]) - int(time.time())
-    assert (1 <= ttl <= 2)
-
-    hash_key = "test_hash_dump"
-    ret = conn.hset(hash_key, "f1", "v1")
-    assert(ret == 1)
-    ret = conn.object("dump", hash_key)
-    assert(ret[1] == default_namespace)
-    assert(ret[3] == "hash")
-    assert(ret[7] == "-1")
-    assert(ret[9] == "1")
-    ret = conn.hset(hash_key, "f2", "v2")
-    assert (ret == 1)
-    ret = conn.object("dump", hash_key)
-    assert (ret[1] == default_namespace)
-    assert (ret[3] == "hash")
-    assert (ret[7] == "-1")
-    assert (ret[9] == "2")
-    ret = conn.hdel(hash_key, "f2")
-    assert (ret == 1)
-    ret = conn.object("dump", hash_key)
-    assert (ret[1] == default_namespace)
-    assert (ret[3] == "hash")
-    assert (ret[7] == "-1")
-    assert (ret[9] == "1")
-
-    list_key = "test_list_dump"
-    ret = conn.lpush(list_key, "v1")
-    assert(ret == 1)
-    ret = conn.object("dump", list_key)
-    assert(ret[1] == default_namespace)
-    assert(ret[3] == "list")
-    assert(ret[7] == "-1")
-    assert(ret[9] == "1")
-    ret = conn.lpush(list_key, "v2")
-    assert (ret == 2)
-    ret = conn.object("dump", list_key)
-    assert (ret[1] == default_namespace)
-    assert (ret[3] == "list")
-    assert (ret[7] == "-1")
-    assert (ret[9] == "2")
-    ret = conn.lpop(list_key)
-    assert (ret == "v2")
-    ret = conn.object("dump", list_key)
-    assert (ret[1] == default_namespace)
-    assert (ret[3] == "list")
-    assert (ret[7] == "-1")
-    assert (ret[9] == "1")
-
-    set_key = "test_set_dump"
-    ret = conn.sadd(set_key, "s1")
-    assert(ret == 1)
-    ret = conn.object("dump", set_key)
-    assert(ret[1] == default_namespace)
-    assert(ret[3] == "set")
-    assert(ret[7] == "-1")
-    assert(ret[9] == "1")
-    ret = conn.sadd(set_key, "s2")
-    assert (ret == 1)
-    ret = conn.object("dump", set_key)
-    assert (ret[1] == default_namespace)
-    assert (ret[3] == "set")
-    assert (ret[7] == "-1")
-    assert (ret[9] == "2")
-    ret = conn.spop(set_key)
-    assert (ret == ['s1'])
-    ret = conn.object("dump", set_key)
-    assert (ret[1] == default_namespace)
-    assert (ret[3] == "set")
-    assert (ret[7] == "-1")
-    assert (ret[9] == "1")
-
-    zset_key = "test_zset_dump"
-    ret = conn.zadd(zset_key, "s1", 0.1)
-    assert(ret == 1)
-    ret = conn.object("dump", zset_key)
-    assert(ret[1] == default_namespace)
-    assert(ret[3] == "zset")
-    assert(ret[7] == "-1")
-    assert(ret[9] == "1")
-    ret = conn.zadd(zset_key, "s2", 0.2)
-    assert (ret == 1)
-    ret = conn.object("dump", zset_key)
-    assert (ret[1] == default_namespace)
-    assert (ret[3] == "zset")
-    assert (ret[7] == "-1")
-    assert (ret[9] == "2")
-    ret = conn.zrem(zset_key, "s2")
-    assert (ret == 1)
-    ret = conn.object("dump", zset_key)
-    assert (ret[1] == default_namespace)
-    assert (ret[3] == "zset")
-    assert (ret[7] == "-1")
-    assert (ret[9] == "1")
-    ret = conn.expire(zset_key, 2)
-    assert (ret == True)
-    ret = conn.object("dump", zset_key)
-    ttl = int(ret[7]) - int(time.time())
-    assert (1 <= ttl <= 2)
-
-    ret = conn.delete(string_key, hash_key, list_key, set_key, zset_key)
-    assert(ret == 5)
-
-
-def test_persist():
-    key = "test_persist"
-    conn = get_redis_conn()
-    ret = conn.persist(key)
-    assert(not ret)
-    ret = conn.set(key, "bar")
-    assert(ret)
-    ret = conn.persist(key)
-    assert(not ret)
-    ret = conn.expire(key, 100)
-    assert(ret == 1)
-    ret = conn.persist(key)
-    assert(ret)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_expireat():
-    key = "test_expireat"
-    conn = get_redis_conn()
-    ret = conn.sadd(key, "s1")
-    assert(ret == 1)
-    ret = conn.expireat(key, time.time()+2)
-    assert(ret == 1)
-    ret = conn.ttl(key)
-    assert(ret >= 1 and ret <= 2)
-    time.sleep(3)
-    ret = conn.exists(key)
-    assert(ret == False)
-
-def test_pexpire():
-    key = "test_pexpire"
-    conn = get_redis_conn()
-    ret = conn.hset(key, "f1", "v1")
-    assert(ret == 1)
-    ret = conn.pexpire(key, 2000)
-    assert(ret == 1)
-    ret = conn.pttl(key)
-    assert(ret >= 1000 and ret <= 2000)
-    time.sleep(3)
-    ret = conn.exists(key)
-    assert(ret == False)
-
-def test_pexpireat():
-    key = "test_pexpireat"
-    conn = get_redis_conn()
-    ret = conn.sadd(key, "s1")
-    assert(ret == 1)
-    ret = conn.pexpireat(key, (time.time()+2)*1000)
-    assert(ret == 1)
-    ret = conn.pttl(key)
-    assert(ret >= 1000 and ret <= 2000)
-    time.sleep(3)
-    ret = conn.exists(key)
-    assert(ret == False)
-    
-def test_pttl():
-    key = "test_ttl"
-    conn = get_redis_conn()
-    ret = conn.set(key, "bar")
-    assert(ret)
-    ret = conn.ttl(key)
-    assert(ret == None)
-    ret = conn.ttl("notexistskey")
-    assert(ret == None)
-    ret = conn.pexpire(key, 2000)
-    assert(ret == 1)
-    ret = conn.ttl(key)
-    assert(ret >= 1 and ret <= 2)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_randomkey():
-    keys = ["test_randomkey", "test_randomkey_1", "test_randomkey_2"]
-    conn = get_redis_conn()
-    for key in keys:
-        ret = conn.set(key, "bar")
-        assert(ret)
-
-    ret = conn.execute_command("RANDOMKEY")
-    assert(ret in keys)
-
-    for key in keys:
-        ret = conn.delete(key)
-        assert(ret == 1)
-
-def test_scan():
-    key = "test_scan"
-    conn = get_redis_conn()
-    ret = conn.set(key, "bar")
-    assert(ret)
-
-    ret = conn.execute_command("SCAN" + " 0")
-    assert (ret == [key, [key]])
-
-    ret = conn.delete(key)
-    assert (ret == 1)
diff --git a/tests/functional/list_test.py b/tests/functional/list_test.py
deleted file mode 100644
index 4aaaa3e..0000000
--- a/tests/functional/list_test.py
+++ /dev/null
@@ -1,236 +0,0 @@
-import redis
-import threading
-import time
-from assert_helper import *
-from conn import *
-
-def test_lpush_and_rpop():
-    key = "test_lpush_and_rpop"
-    conn = get_redis_conn()
-    for i in range (10):
-        ret = conn.lpush(key, "val-"+str(i))
-        assert((i+1 == ret))
-    for i in range (10):
-        ret = conn.rpop(key)
-        assert(ret == "val-"+str(i))
-
-def test_lpush_multi_elems():
-    key = "test_lpush_multi_elems"
-    conn = get_redis_conn()
-    elems = ["a", "b", "c"]
-    ret = conn.lpush(key, *elems)
-    assert(ret == len(elems))
-    ret = conn.delete(key)
-    assert(ret == 1)
-    
-def test_rpush_and_lpop():
-    key = "test_rpush_and_lpop"
-    conn = get_redis_conn()
-    for i in range (10):
-        ret = conn.rpush(key, "val-"+str(i))
-        assert((i+1 == ret))
-    for i in range (10):
-        ret = conn.lpop(key)
-        assert(ret == "val-"+str(i))
-
-def test_lpushx():
-    key = "test_lpushx"
-    conn = get_redis_conn()
-    ret = conn.lpushx(key, "noop")
-    assert(ret == 0)
-    ret = conn.lpush(key, "val-0")
-    assert(ret == 1)
-    for i in range (10):
-        ret = conn.lpushx(key, "val-"+str(i))
-        assert(i+2 == ret)
-    ret = conn.rpop(key)
-    assert(ret == "val-0")
-    for i in range (10):
-        ret = conn.rpop(key)
-        assert(ret == "val-"+str(i))
-
-def test_rpushx():
-    key = "test_rpushx"
-    conn = get_redis_conn()
-    ret = conn.rpushx(key, "noop")
-    assert(ret == 0)
-    ret = conn.rpush(key, "val-0")
-    assert(ret == 1)
-    for i in range (10):
-        ret = conn.rpushx(key, "val-"+str(i))
-        assert(i+2 == ret)
-    ret = conn.lpop(key)
-    assert(ret == "val-0")
-    for i in range (10):
-        ret = conn.lpop(key)
-        assert(ret == "val-"+str(i))
-
-def test_lindex():
-    key = "test_lindex"
-    conn = get_redis_conn()
-    elems = ["a", "b", "c", "d", "e"]
-    ret = conn.rpush(key, *elems)
-    assert(ret == len(elems))
-    for i in range(len(elems)):
-        ret = conn.lindex(key, i)
-        assert(ret == elems[i])
-    for i in range(-1*len(elems), 0):
-        ret = conn.lindex(key, i)
-        assert(ret == elems[i])
-    ret = conn.lindex(key, len(elems))
-    assert(None == ret)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_lset():
-    key = "test_lset"
-    conn = get_redis_conn()
-    elems = ["a", "b", "c", "d", "e"]
-    ret = conn.rpush(key, *elems)
-    assert(ret == len(elems))
-    for i in range (len(elems)): 
-        assert(conn.lset(key, i, str(i)))
-    for i in range(len(elems)):
-        ret = conn.lindex(key, i)
-        assert(ret == str(i))
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_llen():
-    key = "test_lset"
-    conn = get_redis_conn()
-    ret = conn.llen(key)
-    assert(ret == 0)
-    elems = ["a", "b", "c", "d", "e"]
-    ret = conn.rpush(key, *elems)
-    assert(ret == len(elems))
-    ret = conn.llen(key)
-    assert(ret == len(elems))
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_lrange():
-    key = "test_lrange"
-    conn = get_redis_conn()
-    elems = ["one", "two", "three"]
-    ret = conn.rpush(key, *elems)
-    assert(ret == len(elems))
-    ret = conn.lrange(key, 0, 0)
-    assert(ret == [elems[0]])
-    ret = conn.lrange(key, -3, 2)
-    assert(ret == elems)
-    ret = conn.lrange(key, -100, 100)
-    assert(ret == elems)
-    ret = conn.lrange(key, 5, 10)
-    assert(ret == [])
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_ltrim():
-    key = "test_ltrim"
-    conn = get_redis_conn()
-    elems = ["one", "two", "three"]
-    ret = conn.rpush(key, *elems)
-    assert(ret == len(elems))
-    ret = conn.ltrim(key, 0, 2000)
-    assert(ret)
-    ret = conn.llen(key)
-    assert(ret == len(elems))
-    ret = conn.ltrim(key, 1, -1)
-    assert(ret)
-    ret = conn.lrange(key, 0, -1)
-    assert(ret == elems[1:])
-    ret = conn.ltrim(key, -100, 0)
-    assert(ret)
-    ret = conn.lrange(key, 0, -1)
-    assert(ret == [elems[1]])
-    ret = conn.ltrim(key, 100, 0)
-    assert(ret)
-    ret = conn.lrange(key, 0, -1)
-    assert(ret == [])
-
-def test_lrem():
-    key = "test_lrem"
-    conn = get_redis_conn()
-    elems = ["E1", "E2", "E3", "hello", "E4", "E5", "hello", "E6"]
-    elems_without_hello =  ["E1", "E2", "E3", "E4", "E5", "E6"]
-    ret = conn.rpush(key, *elems)
-    assert(ret == len(elems))
-    ret = conn.execute_command("LREM", key, 0, "hello")
-    assert (ret == 2)
-    ret = conn.lrange(key, 0, -1)
-    assert(ret == elems_without_hello)
-    ret = conn.execute_command("LREM", key, 1, elems_without_hello[0])
-    assert (ret == 1)
-    ret = conn.lrange(key, 0, -1)
-    assert(ret == elems_without_hello[1:])
-    ret = conn.execute_command("LREM", key, -1, elems_without_hello[5])
-    assert (ret == 1)
-    ret = conn.lrange(key, 0, -1)
-    assert(ret == elems_without_hello[1:5])
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_linsert():
-    key = "test_linsert"
-    conn = get_redis_conn()
-    ret = conn.delete(key)
-    elems = ["E1", "E2", "E3", "E3"]
-    ret = conn.rpush(key, *elems)
-    assert(ret == len(elems))
-
-    ret = conn.linsert(key, "after", "E3", "E4")
-    assert (ret == 5)
-    ret = conn.lrange(key, 0, -1)
-    assert(ret == ["E1", "E2", "E3", "E4", "E3"])
-    ret = conn.linsert(key, "before", "E3", "E5")
-    assert (ret == 6)
-    ret = conn.lrange(key, 0, -1)
-    print ret
-    assert(ret == ["E1", "E2", "E5", "E3", "E4", "E3"])
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_rpoplpush():
-    key = "test_rpoplpush"
-    new_key = "new_test_rpoplpush"
-    conn = get_redis_conn()
-    elems = ["one", "two", "three"]
-    ret = conn.rpush(key, *elems)
-    assert(ret == len(elems))
-    ret = conn.rpoplpush(key, new_key)
-    assert(ret == elems[-1])
-    ret = conn.lrange(key, 0, -1)
-    assert(ret == elems[0:-1])
-    ret = conn.lrange(new_key, 0, -1)
-    assert(ret == elems[-1:])
-    ret = conn.delete(key, new_key)
-    assert(ret == 2)
-
-
-def bpop(key):
-    conn = get_redis_conn()
-    ret = conn.execute_command("brpop", key, 1)
-    assert (ret == None)
-    ret = conn.execute_command("brpop", key, 0)
-    assert (ret == 'a')
-
-    ret = conn.execute_command("blpop", key, 0)
-    assert (ret == 'b')
-
-
-def test_bpop():
-    key = "test_bpop"
-    conn = get_redis_conn()
-    x = threading.Thread(target=bpop, args=(key,))
-    x.start()
-
-    time.sleep(3)
-    ret = conn.rpush(key, "a")
-    assert(ret == 1)
-    time.sleep(3)
-    ret = conn.lpush(key, "b")
-    assert(ret == 1)
-
diff --git a/tests/functional/namespace_test.py b/tests/functional/namespace_test.py
deleted file mode 100644
index 4fe4032..0000000
--- a/tests/functional/namespace_test.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import redis
-from assert_helper import *
-from conn import *
-
-def test_namespace_get():
-    conn = get_redis_conn()
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    keys = kvs.keys()
-    for i, k in enumerate(keys):
-        ret = conn.execute_command("namespace", "get", k)
-        assert(ret == None)
-        ret = conn.execute_command("namespace", "add", k, kvs[k])
-        assert(ret == "OK")
-        ret = conn.execute_command("namespace", "get", k)
-        assert(ret == kvs[k])
-    for i, k in enumerate(keys):
-        ret = conn.execute_command("namespace", "del", k)
-        assert(ret)
-
-def test_namespace_add():
-    conn = get_redis_conn()
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    keys = kvs.keys()
-    for i, k in enumerate(keys):
-        ret = conn.execute_command("namespace", "get", k)
-        assert(ret == None)
-        ret = conn.execute_command("namespace", "add", k, kvs[k])
-        assert(ret == "OK")
-    for i, k in enumerate(keys):
-        assert_raise(redis.RedisError, conn.execute_command, "namespace", "add", k, kvs[k]+"-new")
-    for i, k in enumerate(keys):
-        ret = conn.execute_command("namespace", "del", k)
-        assert(ret)
-
-def test_namespace_set():
-    conn = get_redis_conn()
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    keys = kvs.keys()
-    for i, k in enumerate(keys):
-        ret = conn.execute_command("namespace", "get", k)
-        assert(ret == None)
-        assert_raise(redis.RedisError, conn.execute_command, "namespace", "set", k, kvs[k]+"-new")
-        ret = conn.execute_command("namespace", "add", k, kvs[k])
-        assert(ret == "OK")
-        ret = conn.execute_command("namespace", "set", k, kvs[k]+"-new")
-        assert(ret == "OK")
-    for i, k in enumerate(keys):
-        ret = conn.execute_command("namespace", "del", k)
-        assert(ret)
diff --git a/tests/functional/pipeline_test.py b/tests/functional/pipeline_test.py
deleted file mode 100644
index 561cce7..0000000
--- a/tests/functional/pipeline_test.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import redis
-from assert_helper import *
-from conn import *
-
-def test_pipeline_without_transaction():
-    key = "test_pipeline_without_transaction"
-    conn = get_redis_conn()
-    pipe = conn.pipeline(False)
-    pipe.hset(key, "f1", "v1")
-    pipe.hset(key, "f2", "v2")
-    pipe.hset(key, "f3", "v3")
-    pipe.hset(key, "f4", "v4")
-    pipe.hlen(key)
-    ret = pipe.execute()
-    assert(ret == [1, 1, 1, 1, 4])
-    ret = conn.delete(key)
-    assert(ret == 1)
diff --git a/tests/functional/psync_test.py b/tests/functional/psync_test.py
deleted file mode 100644
index b372fae..0000000
--- a/tests/functional/psync_test.py
+++ /dev/null
@@ -1,157 +0,0 @@
-import redis
-import time
-from assert_helper import *
-from conn import *
-
-def test_psync_string_set_and_del():
-    key = "test_psync_string_set_and_del"
-    conn = get_redis_conn()
-    conn_slave = get_redis_conn(False)
-    ret = conn.set(key, "bar")
-    assert (ret == True)
-    time.sleep(0.01)
-    value = conn_slave.get(key)
-    assert (value == "bar")
-    ret = conn.delete(key)
-    assert (ret == 1)
-    time.sleep(0.01)
-    ret = conn_slave.get(key)
-    assert(ret == None)
-
-def test_psync_string_setex():
-    key = "test_psync_string_setex"
-    conn = get_redis_conn()
-    conn_slave = get_redis_conn(False)
-    ret = conn.setex(key, "bar", 1024)
-    assert(ret == True)
-    time.sleep(0.01)
-    ret = conn_slave.ttl(key)
-    assert(ret >= 1023 and ret <= 1025)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_psync_expire():
-    key = "test_psync_expire"
-    conn = get_redis_conn()
-    conn_slave = get_redis_conn(False)
-    ret = conn.set(key, "bar")
-    assert(ret == True)
-    ret = conn.expire(key, 1024)
-    time.sleep(0.01)
-    ret = conn_slave.ttl(key)
-    assert(ret >= 1023 and ret <= 1025)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_psync_zset():
-    key = "test_psync_zset"
-    conn = get_redis_conn()
-    conn_slave = get_redis_conn(False)
-    rst = conn.zadd(key, 'a', 1.3)
-    assert(rst == 1)
-    time.sleep(0.01)
-    ret = conn_slave.zscore(key, 'a')
-    assert(ret == 1.3)
-    rst = conn.zrem(key, 'a')
-    assert(rst == 1)
-    time.sleep(0.01)
-    ret = conn_slave.zscore(key, 'a')
-    assert(None == ret)
-
-def test_psync_list():
-    key = "test_psync_list"
-    conn = get_redis_conn()
-    conn_slave = get_redis_conn(False)
-    ret = conn.lpush(key, 'a')
-    assert (ret == 1)
-    time.sleep(0.01)
-    ret = conn_slave.lindex(key, 0)
-    assert (ret == 'a')
-    ret = conn.lset(key, 0, 'b')
-    assert(ret == 1)
-    time.sleep(0.01)
-    ret = conn_slave.lindex(key, 0)
-    assert(ret == 'b')
-
-    ret = conn.rpop(key)
-    assert (ret == 'b')
-    time.sleep(0.01)
-    ret = conn_slave.lindex(key, 0)
-    assert (None == ret)
-
-    ret = conn.rpush(key, 'a')
-    assert (ret == 1)
-    time.sleep(0.01)
-    ret = conn_slave.lindex(key, 0)
-    assert (ret == 'a')
-
-    ret = conn.lpop(key)
-    assert (ret == 'a')
-    time.sleep(0.01)
-    ret = conn_slave.lrange(key, 0, 1)
-    assert (ret == [])
-
-    elems = ["one", "two", "three"]
-    ret = conn.rpush(key, *elems)
-    assert (ret == len(elems))
-    ret = conn.ltrim(key, 1, -1)
-    assert (ret)
-    time.sleep(0.01)
-    ret = conn_slave.lrange(key, 0, -1)
-    assert (ret == elems[1:])
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_psync_set():
-    key = "test_psync_set"
-    conn = get_redis_conn()
-    conn_slave = get_redis_conn(False)
-    rst = conn.sadd(key, 'a')
-    assert(rst == 1)
-    time.sleep(0.01)
-    ret = conn_slave.sismember(key, 'a')
-    assert(ret == 1)
-    ret = conn.srem(key, 'a')
-    assert(ret == 1)
-    time.sleep(0.01)
-    ret = conn_slave.sismember(key, 'a')
-    assert(ret == 0)
-
-
-def test_psync_hash():
-    key = "test_psync_hash"
-    conn = get_redis_conn()
-    conn_slave = get_redis_conn(False)
-    ret = conn.hset(key, 'a', '1.3')
-    assert(ret == 1)
-    time.sleep(0.01)
-    ret = conn_slave.hget(key, 'a')
-    assert(ret == '1.3')
-    ret = conn.hdel(key, 'a')
-    assert(ret == 1)
-    time.sleep(0.01)
-    ret = conn_slave.hget(key, 'a')
-    assert(None == ret)
-
-def test_psync_bitmap():
-    key = "test_psync_bitmap"
-    conn = get_redis_conn()
-    conn_slave = get_redis_conn(False)
-    ret = conn.setbit(key, 1, 1)
-    assert(ret == 0)
-    time.sleep(0.01)
-    ret = conn_slave.getbit(key, 1)
-    assert(ret == 1)
-    ret = conn.setbit(key, 1, 0)
-    assert(ret == 1)
-    time.sleep(0.01)
-    ret = conn_slave.getbit(key, 1)
-    assert(ret == 0)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
diff --git a/tests/functional/pub_sub_test.py b/tests/functional/pub_sub_test.py
deleted file mode 100644
index ee6bf74..0000000
--- a/tests/functional/pub_sub_test.py
+++ /dev/null
@@ -1,165 +0,0 @@
-import redis
-from assert_helper import *
-import time
-import threading
-from conn import *
-
-
-def subscribe(channel, master=True):
-    conn = get_redis_conn(master)
-    p = conn.pubsub()
-    p.subscribe(channel)
-
-    for item in p.listen():
-        if item['type'] == "message":
-            assert (item['data'] == "a")
-            p.unsubscribe()
-            break
-
-
-def psubscribe(pattern, master=True):
-    conn = get_redis_conn(master)
-    p = conn.pubsub()
-    p.psubscribe(pattern)
-
-    for item in p.listen():
-        if item['type'] == "message":
-            assert (item['data'] == "a")
-            p.punsubscribe()
-            break
-
-
-def test_replication():
-    channel = "test_publish"
-
-    x = threading.Thread(target=subscribe, args=(channel,))
-    x.start()
-
-    y = threading.Thread(target=subscribe, args=(channel, False))
-    y.start()
-
-    time.sleep(1)
-
-    conn = get_redis_conn()
-    ret = conn.publish(channel, "a")
-    assert (ret == 1)
-
-    time.sleep(0.01)
-
-    ret = conn.execute_command("pubsub", "channels")
-    assert (ret == [])
-
-
-def test_pubsub_channels():
-    channel = "test_pubsub_channels"
-    channel_two = "two_test_pubsub_channels"
-    pattern_match_all = "test*"
-    pattern_unmatch_all = "a*"
-    pattern_match_question_mark = "test?pubsub_channels"
-    pattern_unmatch_question_mark = "tes?pubsub_channels"
-    pattern_match_or = "tes[ta]_pubsub_channels"
-    pattern_unmatch_or = "tes[sa]_pubsub_channels"
-
-    x = threading.Thread(target=subscribe, args=(channel,))
-    x.start()
-
-    time.sleep(1)
-
-    conn = get_redis_conn()
-    ret = conn.execute_command("pubsub", "channels")
-    assert (ret == [channel])
-    ret = conn.execute_command("pubsub", "channels", pattern_match_all)
-    assert (ret == [channel])
-    ret = conn.execute_command("pubsub", "channels", pattern_unmatch_all)
-    assert (ret == [])
-    ret = conn.execute_command("pubsub", "channels", pattern_match_question_mark)
-    assert (ret == [channel])
-    ret = conn.execute_command("pubsub", "channels", pattern_unmatch_question_mark)
-    assert (ret == [])
-    ret = conn.execute_command("pubsub", "channels", pattern_match_or)
-    assert (ret == [channel])
-    ret = conn.execute_command("pubsub", "channels", pattern_unmatch_or)
-    assert (ret == [])
-
-    y = threading.Thread(target=subscribe, args=(channel_two,))
-    y.start()
-
-    time.sleep(1)
-
-    ret = conn.execute_command("pubsub", "channels")
-    assert (ret == [channel, channel_two])
-
-    ret = conn.publish(channel, "a")
-    assert (ret == 1)
-    ret = conn.publish(channel_two, "a")
-    assert (ret == 1)
-
-    time.sleep(0.01)
-
-    ret = conn.execute_command("pubsub", "channels")
-    assert (ret == [])
-
-
-def test_pubsub_numsub():
-    channel = "test_pubsub_numsub"
-
-    x = threading.Thread(target=subscribe, args=(channel,))
-    x.start()
-
-    time.sleep(1)
-
-    conn = get_redis_conn()
-
-    ret = conn.execute_command("pubsub", "numsub", channel)
-    assert (ret == [channel, 1L])
-
-    y = threading.Thread(target=subscribe, args=(channel,))
-    y.start()
-
-    time.sleep(1)
-
-    ret = conn.execute_command("pubsub", "numsub", channel)
-    assert (ret == [channel, 2L])
-
-    ret = conn.publish(channel, "a")
-    assert (ret == 2)
-
-    time.sleep(0.01)
-
-    ret = conn.execute_command("pubsub", "numsub", channel)
-    assert (ret == [channel, 0L])
-
-
-def test_pubsub_numpat():
-    channel = "test_publish"
-    channel_two = "2_test_publish"
-    pattern_match_all = "test*"
-    pattern_match_all_two = "2*"
-
-    conn = get_redis_conn()
-
-    x = threading.Thread(target=psubscribe, args=(pattern_match_all,))
-    x.start()
-
-    time.sleep(1)
-
-    ret = conn.execute_command("pubsub", "numpat")
-    assert (ret == 1)
-
-    y = threading.Thread(target=psubscribe, args=(pattern_match_all_two,))
-    y.start()
-
-    time.sleep(1)
-
-    ret = conn.execute_command("pubsub", "numpat")
-    assert (ret == 2)
-
-    ret = conn.publish(channel, "a")
-    assert (ret == 1)
-    ret = conn.publish(channel_two, "a")
-    assert (ret == 1)
-
-    time.sleep(0.01)
-
-    ret = conn.execute_command("pubsub", "numpat")
-    assert (ret == 0)
\ No newline at end of file
diff --git a/tests/functional/set_test.py b/tests/functional/set_test.py
deleted file mode 100644
index a81138c..0000000
--- a/tests/functional/set_test.py
+++ /dev/null
@@ -1,197 +0,0 @@
-import redis
-from assert_helper import *
-from conn import *
-
-
-def test_sadd_and_sismember():
-    conn = get_redis_conn()
-    key = "test_sadd_and_srem"
-    ret = conn.sadd(key, 'a')
-    assert(ret == 1)
-    value = conn.sismember(key, 'a')
-    assert(value == 1)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_srem():
-    conn = get_redis_conn()
-    key = "test_srem"
-    ret = conn.sadd(key, 'a')
-    ret = conn.srem(key, 'a')
-    assert(ret == 1)
-
-
-def test_spop():
-    conn = get_redis_conn()
-    key = "test_spop"
-    ret = conn.sadd(key, 'a')
-    ret = conn.spop(key)
-    assert(ret[0] == 'a')
-
-def test_smove():
-    conn = get_redis_conn()
-    key = "test_smove"
-    key_o = 'test_smove_o'
-    ret = conn.sadd(key, 'a', 'b')
-    ret = conn.sadd(key_o, 'c')
-    ret = conn.smove(key, key_o, 'a')
-    assert(ret == 1)
-    ret = conn.smembers(key_o)
-    assert (ret == {'c', 'a'})
-
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-    ret = conn.delete(key_o)
-    assert (ret == 1)
-
-
-def test_scard():
-    conn = get_redis_conn()
-    key = "test_scard"
-    ret = conn.sadd(key, 'a')
-    ret = conn.scard(key)
-    assert(ret == 1)
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-
-
-def test_srandmember():
-    conn = get_redis_conn()
-    key = "test_srandmember"
-    ret = conn.sadd(key, 'a')
-    ret = conn.srandmember(key)
-    assert(ret[0] == 'a')
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-
-
-def test_smembers():
-    conn = get_redis_conn()
-    key = "test_smembers"
-    ret = conn.sadd(key, 'a')
-    ret = conn.smembers(key)
-    assert(ret == {'a'})
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-
-
-def test_sdiff():
-    conn = get_redis_conn()
-    key = "test_sdiff"
-    key_o = 'test_sdiff_o'
-    ret = conn.sadd(key, 'a', 'b')
-    ret = conn.sadd(key_o, 'b')
-    ret = conn.sdiff(key, key_o)
-    assert(ret == {'a'})
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-    ret = conn.delete(key_o)
-    assert (ret == 1)
-
-
-def test_sdiffstore():
-    conn = get_redis_conn()
-    key = 'test_sdiffstore'
-    key_main = "test_sdiff"
-    key_o = 'test_sdiff_o'
-    ret = conn.sadd(key_main, 'a', 'b')
-    ret = conn.sadd(key_o, 'b')
-    ret = conn.sdiffstore(key, key_main, key_o)
-    assert(ret == 1)
-    ret = conn.smembers(key)
-    assert (ret == {'a'})
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-    ret = conn.delete(key_main)
-    assert (ret == 1)
-    ret = conn.delete(key_o)
-    assert (ret == 1)
-
-
-def test_sinter():
-    conn = get_redis_conn()
-    key = "test_sinter"
-    key_o = 'test_sinter_o'
-    ret = conn.sadd(key, 'a', 'b')
-    ret = conn.sadd(key_o, 'a')
-    ret = conn.sinter(key, key_o)
-    assert(ret == {'a'})
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-    ret = conn.delete(key_o)
-    assert (ret == 1)
-
-
-def test_sinterstore():
-    conn = get_redis_conn()
-    key = 'test_sinterstore'
-    key_main = "test_sinter"
-    key_o = 'test_sinter_o'
-    ret = conn.sadd(key_main, 'a', 'b')
-    ret = conn.sadd(key_o, 'a')
-    ret = conn.sinterstore(key, key_main, key_o)
-    assert(ret == 1)
-    ret = conn.smembers(key)
-    assert (ret == {'a'})
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-    ret = conn.delete(key_main)
-    assert (ret == 1)
-    ret = conn.delete(key_o)
-    assert (ret == 1)
-
-
-def test_sunion():
-    conn = get_redis_conn()
-    key = "test_sunion"
-    key_o = 'test_sunion_o'
-    ret = conn.sadd(key, 'a', 'b')
-    ret = conn.sadd(key_o, 'a')
-    ret = conn.sunion(key, key_o)
-    assert(ret == {'a', 'b'})
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-    ret = conn.delete(key_o)
-    assert (ret == 1)
-
-
-def test_sunionstore():
-    conn = get_redis_conn()
-    key = 'test_sunionstore'
-    key_main = "test_sunion"
-    key_o = 'test_sunion_o'
-    ret = conn.sadd(key_main, 'a', 'b')
-    ret = conn.sadd(key_o, 'a')
-    ret = conn.sunionstore(key, key_main, key_o)
-    assert(ret == 2)
-    ret = conn.smembers(key)
-    assert (ret == {'a', 'b'})
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-    ret = conn.delete(key_main)
-    assert (ret == 1)
-    ret = conn.delete(key_o)
-    assert (ret == 1)
-
-
-def test_sscan():
-    conn = get_redis_conn()
-    key = "test_sscan"
-    ret = conn.sadd(key, 'a')
-    ret = conn.execute_command("SSCAN " + key + " 0")
-    assert (ret == ['a', ['a']])
-
-    ret = conn.delete(key)
-    assert(ret == 1)
diff --git a/tests/functional/string_test.py b/tests/functional/string_test.py
deleted file mode 100644
index 1f230af..0000000
--- a/tests/functional/string_test.py
+++ /dev/null
@@ -1,172 +0,0 @@
-import redis
-from assert_helper import *
-from conn import *
-
-def test_get_and_set():
-    key = "test_get_and_set"
-    conn = get_redis_conn()
-    ret = conn.set(key, "bar")
-    assert(ret == True)
-    value = conn.get(key)
-    assert(value == "bar")
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_append():
-    key = "test_append"
-    conn = get_redis_conn()
-    ret = conn.append(key, "Hello")
-    assert(ret == 5)
-    ret = conn.append(key, " World")
-    assert(ret == 11)
-    ret = conn.get(key)
-    assert(ret == "Hello World")
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_strlen():
-    key = "test_strlen"
-    conn = get_redis_conn()
-    ret = conn.set(key, "Hello World")
-    assert(ret)
-    ret = conn.strlen(key)
-    assert(ret == 11)
-    ret = conn.strlen("noexistskey")
-    assert(ret == 0)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_delete():
-    key = "test_delete"
-    conn = get_redis_conn()
-    ret = conn.delete(key)
-    assert(ret == 0)
-    ret = conn.set(key, "bar")
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_getset():
-    key = "test_getset"
-    conn = get_redis_conn()
-    ret = conn.getset(key, "bar")
-    assert(ret == None)
-    ret = conn.getset(key, "new_bar")
-    assert(ret == "bar")
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_set_with_option():
-    key = "test_set_with_option"
-    conn = get_redis_conn()
-    ret = conn.set(key, "bar", nx=True)
-    assert(ret == True)
-    ret = conn.set(key, "bar", nx=True)
-    assert(ret == None)
-    ret = conn.set(key, "bar", xx=True)
-    assert(ret == True)
-    ret = conn.set(key, "bar", px=1024000, xx=True)
-    assert(ret == True)
-    ret = conn.ttl(key)
-    assert(ret >= 1023 and ret <= 1025)
-    ret = conn.set(key, "bar", ex=1024, xx=True)
-    assert(ret == True)
-    ret = conn.ttl(key)
-    assert(ret >= 1023 and ret <= 1025)
-    ret = conn.set(key, "bar", ex=1024)
-    assert(ret == True)
-    ret = conn.ttl(key)
-    assert(ret >= 1023 and ret <= 1025)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_setex():
-    key = "test_setex"
-    conn = get_redis_conn()
-    ret = conn.setex(key, "bar", 1024)
-    assert(ret == True)
-    ret = conn.ttl(key)
-    assert(ret >= 1023 and ret <= 1025)
-    ret = conn.delete(key)
-    assert(ret == 1)
-    assert_raise(redis.RedisError, conn.setex, "foo", "bar", "invalid")
-
-def test_setnx():
-    key = "test_setnx"
-    conn = get_redis_conn()
-    ret = conn.setnx(key, "bar")
-    assert(ret == 1)
-    ret = conn.setnx(key, "bar")
-    assert(ret == 0)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_getrange():
-    key = "test_getrange"
-    value = "This is a string"
-    conn = get_redis_conn()
-    ret = conn.set(key, value)
-    assert(ret)
-    ret = conn.getrange(key, 0, 3)
-    assert(ret == value[0:4])
-    ret = conn.getrange(key, -3, -1)
-    assert(value[-3:] == ret)
-    ret = conn.getrange(key, 0, -1)
-    assert(value == ret)
-    ret = conn.getrange(key, 10, 100)
-    assert(value[10:] == ret)
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_setrange():
-    key = "test_setrange"
-    conn = get_redis_conn()
-    ret = conn.set(key, "hello world")
-    assert(ret == 1)
-    ret = conn.setrange(key, 6, "redis")
-    assert(ret == 11) 
-    ret = conn.get(key)
-    assert(ret == "hello redis")
-    ret = conn.delete(key)
-    assert(ret == 1)
-    ret = conn.setrange(key, 6, "redis")
-    assert(ret == 11) 
-    ret = conn.get(key)
-    assert(ret == ("\0"*6+"redis"))
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-def test_incrby():
-    key = "test_incrby"
-    conn = get_redis_conn()
-    ret = conn.incrby(key, 100)
-    assert(ret == 100) 
-    ret = conn.incrby(key, -100)
-    assert(ret == 0) 
-    ret = conn.delete(key)
-    assert(ret == 1)
-    # TODO: not number of overflow case
-
-def test_mset_and_mget():
-    key = "test_mset_and_mget"
-    conn = get_redis_conn()
-    kvs = {'kkk-%s' % i :'vvv-%s' % i for i in range(10)}
-    ret = conn.mset(**kvs)
-    assert(ret == True)
-    keys = kvs.keys()
-    vals = conn.mget(keys)
-    for i, k in enumerate(keys):
-        assert(kvs[k] == vals[i])
-    for i, k in enumerate(keys):
-        ret = conn.delete(k)
-        assert(ret == True)
-
-def test_incr_by_float():
-    key = "test_incr_by_float"
-    conn = get_redis_conn()
-    ret = conn.incrbyfloat(key, 1.11)
-    assert(ret == 1.11) 
-    ret = conn.incrbyfloat(key, -1.11)
-    assert(ret == 0) 
-    ret = conn.delete(key)
-    assert(ret == 1)
-
diff --git a/tests/functional/zset_test.py b/tests/functional/zset_test.py
deleted file mode 100644
index fcf97a6..0000000
--- a/tests/functional/zset_test.py
+++ /dev/null
@@ -1,384 +0,0 @@
-import redis
-import sys
-from assert_helper import *
-from conn import *
-
-
-def test_zadd_and_zscore():
-    conn = get_redis_conn()
-    key = "test_sadd_and_srem"
-    ret = conn.zadd(key, 'a', 1.3)
-    assert(ret == 1)
-    ret = conn.zscore(key, 'a')
-    assert(ret == 1.3)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_zcard():
-    conn = get_redis_conn()
-    key = "test_zcard"
-    ret = conn.zadd(key, 'a', 1.3)
-    ret = conn.zcard(key)
-    assert(ret == 1)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_zcount():
-    conn = get_redis_conn()
-    key = "test_zcount"
-    ret = conn.zadd(key, 'a', 1.3, 'b', 5.3)
-    ret = conn.zcount(key, 1, 100)
-    assert(ret == 2)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_zincrby():
-    conn = get_redis_conn()
-    key = "test_zincrby"
-    ret = conn.zincrby(key, 'a', 1.3)
-    assert (ret == 1.3)
-    ret = conn.zscore(key, 'a')
-    assert(ret == 1.3)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_zpopmax():
-    conn = get_redis_conn()
-    key = "test_zpopmax"
-    ret = conn.zadd(key, 'a', 1.3, 'b', 1.8, 'c', sys.float_info.max)
-    assert (ret == 3)
-    ret = conn.execute_command("ZPOPMAX", key)
-    assert (ret[0] == 'c')
-    assert (float(ret[1]) == sys.float_info.max)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_zpopmin():
-    conn = get_redis_conn()
-    key = "test_zpopmin"
-    ret = conn.zadd(key, 'a', 1.3, 'b', 1.8)
-    assert (ret == 2)
-    ret = conn.execute_command("ZPOPMIN", key)
-    assert (ret == ['a', '1.300000'])
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_zrange():
-    conn = get_redis_conn()
-    key = "test_zrange"
-    ret = conn.delete(key)
-    ret = conn.zadd(key, 'a', 1.3, 'b', 1.8)
-    assert (ret == 2)
-    ret = conn.zrange(key, 0, 1)
-    assert(ret == ['a', 'b'])
-    ret = conn.zrange(key, 0, 1, False, True)
-    assert(ret == [('a', 1.3), ('b', 1.8)])
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_zrangebyscore():
-    conn = get_redis_conn()
-    key = "test_zrangebyscore"
-    ret = conn.zadd(key, 'a', 1.3, 'b', 1.8, 'c', 2.5)
-    assert (ret == 3)
-    ret = conn.zrangebyscore(key, 1, 3)
-    assert(ret == ['a', 'b', 'c'])
-
-    ret = conn.zrangebyscore(key, 1, 3, None, None, True)
-    assert (ret == [('a', 1.3), ('b', 1.8), ('c', 2.5)])
-
-    ret = conn.zrangebyscore(key, 1, 3, 0, 2, True)
-    assert (ret == [('a', 1.3), ('b', 1.8)])
-
-    ret = conn.zrangebyscore(key, 1, 3, 1, 2)
-    assert (ret == ['b', 'c'])
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_zlexcount():
-    conn = get_redis_conn()
-    key = "test_zlexcount"
-    ret = conn.zadd(key, 'a', 0, 'b', 0, 'c', 0)
-    assert (ret == 3)
-    ret = conn.zlexcount(key, '-', '+')
-    assert(ret == 3)
-
-    ret = conn.zlexcount(key, '(a', '(c')
-    assert (ret == 1)
-
-    ret = conn.zlexcount(key, '(a', '[c')
-    assert (ret == 2)
-
-    ret = conn.zlexcount(key, '[a', '[c')
-    assert (ret == 3)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_zrangebylex():
-    conn = get_redis_conn()
-    key = "test_zrangebylex"
-    ret = conn.zadd(key, 'a', 0, 'b', 0, 'c', 0)
-    assert (ret == 3)
-    ret = conn.zrangebylex(key, '-', '+')
-    assert(ret == ['a', 'b', 'c'])
-
-    ret = conn.zrangebylex(key, '(a', '(c')
-    assert (ret == ['b'])
-
-    ret = conn.zrangebylex(key, '(a', '[c')
-    assert (ret == ['b', 'c'])
-
-    ret = conn.zrangebylex(key, '[a', '[c')
-    assert (ret == ['a', 'b', 'c'])
-
-    ret = conn.zrangebylex(key, '[a', '[c', 0, 2)
-    assert (ret == ['a', 'b'])
-
-    ret = conn.zrangebylex(key, '[a', '[c', 1, 2)
-    assert (ret == ['b', 'c'])
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_zrank():
-    conn = get_redis_conn()
-    key = "test_zrank"
-    ret = conn.zadd(key, 'a', 1.3, 'b', 1.8)
-    assert (ret == 2)
-    ret = conn.zrank(key, 'b')
-    assert(ret == 1)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_zrevrange():
-    conn = get_redis_conn()
-    key = "test_zrevrange"
-    ret = conn.zadd(key, 'a', 1.3, 'b', 1.8, 'c', sys.float_info.max)
-    assert (ret == 3)
-    ret = conn.zrevrange(key, 0, 1)
-    assert (ret == ['c', 'b'])
-
-    ret = conn.zrevrange(key, 1, 2, True)
-    assert (ret == [('b', 1.8), ('a', 1.3)])
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-
-
-def test_zrevrangebyscore():
-    conn = get_redis_conn()
-    key = "test_zrevrangebyscore"
-    ret = conn.zadd(key, 'a', 1.3, 'b', 1.8, 'c', 2.5, 'd', sys.float_info.max)
-    assert (ret == 4)
-    ret = conn.zrevrangebyscore(key, sys.float_info.max, 1)
-    assert (ret == ['d', 'c', 'b', 'a'])
-    ret = conn.zrevrangebyscore(key, 3, 1)
-    assert(ret == ['c', 'b', 'a'])
-
-    ret = conn.zrevrangebyscore(key, "+inf", "-inf")
-    assert(ret == ['d', 'c', 'b', 'a'])
-
-    ret = conn.zrevrangebyscore(key, 1.8, 1.3)
-    assert(ret == ['b', 'a'])
-
-    ret = conn.zrevrangebyscore(key, 1.8, "(1.3")
-    assert(ret == ['b'])
-
-    ret = conn.zrevrangebyscore(key, "(1.8", "(1.3")
-    assert(ret == [])
-
-    ret = conn.zrevrangebyscore(key, 3, 1, None, None, True)
-    assert (ret == [('c', 2.5), ('b', 1.8), ('a', 1.3)])
-
-    ret = conn.zrevrangebyscore(key, 3, 1, 0, 2, True)
-    assert (ret == [('c', 2.5), ('b', 1.8)])
-
-    ret = conn.zrevrangebyscore(key, 3, 1, 1, 2, True)
-    assert (ret == [('b', 1.8), ('a', 1.3)])
-
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-    
-
-def test_zrevrank():
-    conn = get_redis_conn()
-    key = "test_zrevrank"
-    ret = conn.zadd(key, 'a', 1.3, 'b', 1.8, 'c', sys.float_info.max)
-    assert (ret == 3)
-    ret = conn.zrevrank(key, 'a')
-    assert (ret == 2)
-
-    ret = conn.zrevrank(key, 'c')
-    assert (ret == 0)
-
-    ret = conn.zrevrank(key, 'd')
-    assert (ret == None)
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-
-
-def test_zrem():
-    conn = get_redis_conn()
-    key = "test_zrem"
-    ret = conn.zadd(key, 'a', 1.3, 'b', 1.8)
-    assert (ret == 2)
-    ret = conn.zrem(key, 'a')
-    assert(ret == 1)
-
-    ret = conn.delete(key)
-    assert(ret == 1)
-
-
-def test_zremrangebyrank():
-    conn = get_redis_conn()
-    key = "test_zremrangebyrank"
-    ret = conn.zadd(key, 'a', 1.3, 'b', 1.8)
-    assert (ret == 2)
-    ret = conn.zremrangebyrank(key, 0, 1)
-    assert (ret == 2)
-
-
-def test_zremrangebyscore():
-    conn = get_redis_conn()
-    key = "test_zremrangebyscore"
-    ret = conn.zadd(key, 'a', 1.3, 'b', 1.8)
-    assert (ret == 2)
-    ret = conn.zremrangebyscore(key, 0, 3)
-    assert (ret == 2)
-
-
-def test_zremrangebylex():
-    conn = get_redis_conn()
-    key = "test_zremrangebylex"
-    ret = conn.zadd(key, 'aaaa', 0, 'b', 0,
-                    'c', 0, 'd', 0, 'e', 0, 'foo', 0, 'zap', 0, 'zip', 0, 'ALPHA', 0, 'alpha', 0)
-    assert (ret == 10)
-    ret = conn.zremrangebylex(key, '[alpha', '[omega')
-    assert(ret == 6)
-
-    ret = conn.zrangebylex(key, '-', '+')
-    assert (ret == ['ALPHA', 'aaaa', 'zap', 'zip'])
-
-    ret = conn.zrange(key, 0, -1)
-    assert (ret == ['ALPHA', 'aaaa', 'zap', 'zip'])
-
-    ret = conn.zremrangebylex(key, '-', '+')
-    assert (ret == 4)
-
-    ret = conn.zrangebylex(key, '-', '+')
-    assert (ret == [])
-
-    
-def test_zscan():
-    conn = get_redis_conn()
-    key = "test_zscan"
-    ret = conn.zadd(key, 'a', 1.3)
-    assert (ret == 1)
-    ret = conn.execute_command("ZSCAN " + key + " 0")
-    assert (ret == ['a', ['a']])
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-
-
-def test_zunionstore():
-    conn = get_redis_conn()
-    key = "test_zunionstore"
-    key1 = key + "_1"
-    key2 = key + "_2"
-    ret = conn.zadd(key1, 'one', 1, 'two', 2)
-    assert (ret == 2)
-    ret = conn.zadd(key2, 'one', 1, 'two', 2, 'three', 3)
-    assert (ret == 3)
-
-    ret = conn.zunionstore(key, [key1, key2])
-    assert (ret == 3)
-    ret = conn.zrange(key, 0, -1, False, True)
-    assert (ret == [('one', 2.0), ('three', 3), ('two', 4.0)])
-
-    ret = conn.zunionstore(key, [key1, key2], "MIN")
-    assert (ret == 3)
-    ret = conn.zrange(key, 0, -1, False, True)
-    assert (ret == [('one', 1.0), ('two', 2.0), ('three', 3)])
-
-    ret = conn.zunionstore(key, [key1, key2], "MAX")
-    assert (ret == 3)
-    ret = conn.zrange(key, 0, -1, False, True)
-    assert (ret == [('one', 1.0), ('two', 2.0), ('three', 3)])
-
-    ret = conn.zunionstore(key, {key1: 10, key2: 30})
-    assert (ret == 3)
-    ret = conn.zrange(key, 0, -1, False, True)
-    assert (ret == [('one', 40.0), ('two', 80.0), ('three', 90.0)])
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-    ret = conn.delete(key1)
-    assert (ret == 1)
-    ret = conn.delete(key2)
-    assert (ret == 1)
-
-
-def test_zinterstore():
-    conn = get_redis_conn()
-    key = "test_zinterstore"
-    key1 = key + "_1"
-    key2 = key + "_2"
-    conn.delete(key1);
-    conn.delete(key2);
-    conn.delete(key);
-    ret = conn.zadd(key1, 'one', 1, 'two', 2)
-    assert (ret == 2)
-    ret = conn.zadd(key2, 'one', 1, 'two', 2, 'three', 3)
-    assert (ret == 3)
-
-    ret = conn.zinterstore(key, [key1, key2])
-    assert (ret == 2)
-    ret = conn.zrange(key, 0, -1, False, True)
-    assert (ret == [('one', 2.0), ('two', 4.0)])
-
-    ret = conn.zinterstore(key, [key1, key2], "MIN")
-    assert (ret == 2)
-    ret = conn.zrange(key, 0, -1, False, True)
-    assert (ret == [('one', 1.0), ('two', 2.0)])
-
-    ret = conn.zinterstore(key, [key1, key2], "MAX")
-    assert (ret == 2)
-    ret = conn.zrange(key, 0, -1, False, True)
-    assert (ret == [('one', 1.0), ('two', 2.0)])
-
-    ret = conn.zinterstore(key, {key1: 10, key2: 30})
-    assert (ret == 2)
-    ret = conn.zrange(key, 0, -1, False, True)
-    assert (ret == [('one', 40.0), ('two', 80.0)])
-
-    ret = conn.delete(key)
-    assert (ret == 1)
-    ret = conn.delete(key1)
-    assert (ret == 1)
-    ret = conn.delete(key2)
-    assert (ret == 1)
-
diff --git a/tests/main.cc b/tests/main.cc
deleted file mode 100644
index cf47e81..0000000
--- a/tests/main.cc
+++ /dev/null
@@ -1,8 +0,0 @@
-#include <gtest/gtest.h>
-#include <gflags/gflags.h>
-
-int main(int argc, char **argv) {
-  gflags::SetUsageMessage("kvrocks unittest");
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/tests/redisdb_test.cc b/tests/redisdb_test.cc
deleted file mode 100644
index e69de29..0000000
--- a/tests/redisdb_test.cc
+++ /dev/null
diff --git a/tests/rwlock_test.cc b/tests/rwlock_test.cc
deleted file mode 100644
index f87127e..0000000
--- a/tests/rwlock_test.cc
+++ /dev/null
@@ -1,12 +0,0 @@
-#include "lock_manager.h"
-#include <thread>
-#include <gtest/gtest.h>
-
-TEST(LockManager, LockKey) {
-  LockManager locks(8);
-  std::vector<rocksdb::Slice> keys = {"abc", "123", "456", "abc", "123"};
-  for (const auto key : keys) {
-    locks.Lock(key);
-    locks.UnLock(key);
-  }
-}
\ No newline at end of file
diff --git a/tests/scripts/setup-env.sh b/tests/scripts/setup-env.sh
deleted file mode 100755
index 9b05f86..0000000
--- a/tests/scripts/setup-env.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-if [ $# -eq 0 ]; then
-    echo "usage: ./setup-env.sh bin_dir"
-    exit 0
-fi
-
-yum install -y nc
-
-BIN="$1/kvrocks"
-
-# setup the master and wait for ready
-$BIN -c tests/scripts/test-master.conf
-until nc -z 127.0.0.1 6666; do echo "master is not ready"; sleep 1; done
-
-# setup the slave and wait for ready
-$BIN -c tests/scripts/test-slave.conf
-until nc -z 127.0.0.1 6668; do echo "slave is not ready"; sleep 1; done
diff --git a/tests/scripts/test-master.conf b/tests/scripts/test-master.conf
deleted file mode 100644
index 8d634e8..0000000
--- a/tests/scripts/test-master.conf
+++ /dev/null
@@ -1,260 +0,0 @@
-################################ GENERAL #####################################
-
-# By default kvrocks listens for connections from all the network interfaces
-# available on the server. It is possible to listen to just one or multiple
-# interfaces using the "bind" configuration directive, followed by one or
-# more IP addresses.
-#
-# Examples:
-#
-# bind 192.168.1.100 10.0.0.1
-# bind 127.0.0.1
-bind 0.0.0.0
-
-# Accept connections on the specified port, default is 6666.
-port 6666
-
-# Close the connection after a client is idle for N seconds (0 to disable)
-timeout 0
-
-# The number of worker's threads, increase or decrease it would effect the performance.
-workers 8
-
-# The number of replication worker's threads, increase or decrease it would effect the replication performance.
-# default is 1
-repl-workers 1
-
-# The value should be INFO, WARNING, ERROR, FATAL
-# default is INFO
-loglevel INFO
-
-# By default kvrocks does not run as a daemon. Use 'yes' if you need it.
-# Note that kvrocks will write a pid file in /var/run/kvrocks.pid when daemonized.
-daemonize yes 
-
-# Require clients to issue AUTH <PASSWORD> before processing any other
-# commands.  This might be useful in environments in which you do not trust
-# others with access to the host running kvrocks.
-#
-# This should stay commented out for backward compatibility and because most
-# people do not need auth (e.g. they run their own servers).
-#
-# Warning: since kvrocks is pretty fast an outside user can try up to
-# 150k passwords per second against a good box. This means that you should
-# use a very strong password otherwise it will be very easy to break.
-#
-requirepass foobared
-
-# If the master is password protected (using the "requirepass" configuration
-# directive below) it is possible to tell the slave to authenticate before
-# starting the replication synchronization process, otherwise the master will
-# refuse the slave request.
-#
-masterauth foobared
-
-# Master-Salve replication would check db name is matched. if not, the slave should
-# refuse to sync the db from master. Don't use default value, set the db-name to identify
-# the cluster.
-db-name change.me.db
-
-# The working directory
-#
-# The DB will be written inside this directory
-# Note that you must specify a directory here, not a file name.
-dir /tmp/kvrocks-master
-
-# The backup directory
-#
-# The DB will be written inside this directory
-# Note that you must specify a directory here, not a file name.
-# backup-dir /tmp/kvrocks/backup
-
-# When running daemonized, kvrocks writes a pid file in ${CONFIG_DIR}/kvrocks.pid by
-# default. You can specify a custom pid file location here.
-# pidfile /var/run/kvrocks.pid
-
-# You can configure a slave instance to accept writes or not. Writing against
-# a slave instance may be useful to store some ephemeral data (because data
-# written on a slave will be easily deleted after resync with the master) but
-# may also cause problems if clients are writing to it because of a
-# misconfiguration.
-slave-read-only yes
-
-# The slave priority is an integer number published by Kvrocks in the INFO output.
-# It is used by Redis Sentinel in order to select a slave to promote into a
-# master if the master is no longer working correctly.
-#
-# A slave with a low priority number is considered better for promotion, so
-# for instance if there are three slave with priority 10, 100, 25 Sentinel will
-# pick the one with priority 10, that is the lowest.
-#
-# However a special priority of 0 marks the replica as not able to perform the
-# role of master, so a slave with priority of 0 will never be selected by
-# Redis Sentinel for promotion.
-#
-# By default the priority is 100.
-slave-priority 100
-
-# TCP listen() backlog.
-#
-# In high requests-per-second environments you need an high backlog in order
-# to avoid slow clients connections issues. Note that the Linux kernel
-# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
-# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
-# in order to Get the desired effect.
-tcp-backlog 511
-
-#
-# repl-bind 192.168.1.100 10.0.0.1
-# repl-bind 127.0.0.1
-repl-bind 0.0.0.0
-
-# Master-Slave replication. Use slaveof to make a kvrocks instance a copy of
-# another kvrocks server. A few things to understand ASAP about kvrocks replication.
-#
-# 1) Kvrocks replication is asynchronous, but you can configure a master to
-#    stop accepting writes if it appears to be not connected with at least
-#    a given number of slaves.
-# 2) Kvrocks slaves are able to perform a partial resynchronization with the
-#    master if the replication link is lost for a relatively small amount of
-#    time. You may want to configure the replication backlog size (see the next
-#    sections of this file) with a sensible value depending on your needs.
-# 3) Replication is automatic and does not need user intervention. After a
-#    network partition slaves automatically try to reconnect to masters
-#    and resynchronize with them.
-#
-# slaveof <masterip> <masterport>
-# slaveof 127.0.0.1 6379
-
-# The maximum allowed rate (in MB/s) that should be used by Replication.
-# If the rate exceeds max-replication-mb, replication will slow down.
-# Default: 0 (i.e. no limit)
-max-replication-mb 0
-
-# The maximum allowed aggregated write rate of flush and compaction (in MB/s).
-# If the rate exceeds max-io-mb, io will slow down.
-# 0 is no limit
-# Default: 500
-max-io-mb 500
-
-# The maximum allowed space (in GB) that should be used by RocksDB.
-# If the total size of the SST files exceeds max_allowed_space, writes to RocksDB will fail.
-# Please see: https://github.com/facebook/rocksdb/wiki/Managing-Disk-Space-Utilization
-# Default: 0 (i.e. no limit)
-max-db-size 0
-
-# The maximum backup to keep, server cron would run every minutes to check the num of current
-# backup, and purge the old backup if exceed the max backup num to keep. If num-backup-to-keep
-# is 0, no backup would be keep.
-max-backup-to-keep 1
-
-# The maximum hours to keep the backup. If max-backup-keep-hours is 0, wouldn't purge any backup.
-# default is 168, 1 week
-max-backup-keep-hours 168
-
-
-################################## SLOW LOG ###################################
-
-# The Kvrocks Slow Log is a system to log queries that exceeded a specified
-# execution time. The execution time does not include the I/O operations
-# like talking with the client, sending the reply and so forth,
-# but just the time needed to actually execute the command (this is the only
-# stage of command execution where the thread is blocked and can not serve
-# other requests in the meantime).
-#
-# You can configure the slow log with two parameters: one tells Kvrocks
-# what is the execution time, in microseconds, to exceed in order for the
-# command to get logged, and the other parameter is the length of the
-# slow log. When a new command is logged the oldest one is removed from the
-# queue of logged commands.
-
-# The following time is expressed in microseconds, so 1000000 is equivalent
-# to one second. Note that a negative number disables the slow log, while
-# a value of zero forces the logging of every command.
-slowlog-log-slower-than 100000
-
-# There is no limit to this length. Just be aware that it will consume memory.
-# You can reclaim memory used by the slow log with SLOWLOG RESET.
-slowlog-max-len 128
-
-################################## CRON ###################################
-
-# Compact Scheduler, auto compact at schedule time
-# time expression format is the same as crontab(currently only support * and int)
-# e.g. compact-cron 0 3 * * * 0 4 * * *
-# would compact the db at 3am and 4am everyday
-compact-cron 0 3 * * *
-
-# Backup Scheduler, auto backup at schedule time
-# time expression format is the same as compact-cron
-# e.g. compact-cron 0 3 * * * 0 4 * * *
-# would backup the db at 3am and 4am everyday
-# bgsave-cron 0 4 * * *
-
-################################ ROCKSDB #####################################
-
-# Specify the capacity  of metadata column family block cache. Larger block cache
-# may make request faster while more keys would be cached. Max Size is 200*1024.
-# unit is MiB, default 4096
-rocksdb.metadata_block_cache_size 4096
-
-# Specify the capacity  of subkey column family block cache. Larger block cache
-# may make request faster while more keys would be cached. Max Size is 200*1024.
-# unit is MiB, default 8192
-rocksdb.subkey_block_cache_size 8192
-
-# Number of open files that can be used by the DB.  You may need to
-# increase this if your database has a large working set. Value -1 means
-# files opened are always kept open. You can estimate number of files based
-# on target_file_size_base and target_file_size_multiplier for level-based
-# compaction. For universal-style compaction, you can usually set it to -1.
-rocksdb.max_open_files 8096
-
-# Amount of data to build up in memory (backed by an unsorted log
-# on disk) before converting to a sorted on-disk file.
-#
-# Larger values increase performance, especially during bulk loads.
-# Up to max_write_buffer_number write buffers may be held in memory
-# at the same time,
-# so you may wish to adjust this parameter to control memory usage.
-# Also, a larger write buffer will result in a longer recovery time
-# the next time the database is opened.
-#
-# Note that write_buffer_size is enforced per column family.
-# See db_write_buffer_size for sharing memory across column families.
-
-# default is 256MB
-rocksdb.write_buffer_size 256
-
-# The maximum number of write buffers that are built up in memory.
-# The default and the minimum number is 2, so that when 1 write buffer
-# is being flushed to storage, new writes can continue to the other
-# write buffer.
-# If max_write_buffer_number > 3, writing will be slowed down to
-# options.delayed_write_rate if we are writing to the last write buffer
-# allowed.
-rocksdb.max_write_buffer_number 2
-
-# Maximum number of concurrent background compaction jobs, submitted to
-# the default LOW priority thread pool.
-rocksdb.max_background_compactions 2
-
-# Maximum number of concurrent background memtable flush jobs, submitted by
-# default to the HIGH priority thread pool. If the HIGH priority thread pool
-# is configured to have zero threads, flush jobs will share the LOW priority
-# thread pool with compaction jobs.
-rocksdb.max_background_flushes 2
-
-# This value represents the maximum number of threads that will
-# concurrently perform a compaction job by breaking it into multiple,
-# smaller ones that are run simultaneously.
-# Default: 1 (i.e. no subcompactions)
-rocksdb.max_sub_compactions 1
-
-# Specify the compression to use.
-# Accept value: "no", "snappy"
-# default snappy
-rocksdb.compression snappy
-
-################################ NAMESPACE #####################################
-namespace.test change.me
diff --git a/tests/scripts/test-slave.conf b/tests/scripts/test-slave.conf
deleted file mode 100644
index 910c7d4..0000000
--- a/tests/scripts/test-slave.conf
+++ /dev/null
@@ -1,260 +0,0 @@
-################################ GENERAL #####################################
-
-# By default kvrocks listens for connections from all the network interfaces
-# available on the server. It is possible to listen to just one or multiple
-# interfaces using the "bind" configuration directive, followed by one or
-# more IP addresses.
-#
-# Examples:
-#
-# bind 192.168.1.100 10.0.0.1
-# bind 127.0.0.1
-bind 0.0.0.0
-
-# Accept connections on the specified port, default is 6666.
-port 6668
-
-# Close the connection after a client is idle for N seconds (0 to disable)
-timeout 0
-
-# The number of worker's threads, increase or decrease it would effect the performance.
-workers 8
-
-# The number of replication worker's threads, increase or decrease it would effect the replication performance.
-# default is 1
-repl-workers 1
-
-# The value should be INFO, WARNING, ERROR, FATAL
-# default is INFO
-loglevel INFO
-
-# By default kvrocks does not run as a daemon. Use 'yes' if you need it.
-# Note that kvrocks will write a pid file in /var/run/kvrocks.pid when daemonized.
-daemonize yes 
-
-# Require clients to issue AUTH <PASSWORD> before processing any other
-# commands.  This might be useful in environments in which you do not trust
-# others with access to the host running kvrocks.
-#
-# This should stay commented out for backward compatibility and because most
-# people do not need auth (e.g. they run their own servers).
-#
-# Warning: since kvrocks is pretty fast an outside user can try up to
-# 150k passwords per second against a good box. This means that you should
-# use a very strong password otherwise it will be very easy to break.
-#
-requirepass foobared
-
-# If the master is password protected (using the "requirepass" configuration
-# directive below) it is possible to tell the slave to authenticate before
-# starting the replication synchronization process, otherwise the master will
-# refuse the slave request.
-#
-masterauth foobared
-
-# Master-Salve replication would check db name is matched. if not, the slave should
-# refuse to sync the db from master. Don't use default value, set the db-name to identify
-# the cluster.
-db-name change.me.db
-
-# The working directory
-#
-# The DB will be written inside this directory
-# Note that you must specify a directory here, not a file name.
-dir /tmp/kvrocks-slave
-
-# The backup directory
-#
-# The DB will be written inside this directory
-# Note that you must specify a directory here, not a file name.
-# backup-dir /tmp/kvrocks/backup
-
-# When running daemonized, kvrocks writes a pid file in ${CONFIG_DIR}/kvrocks.pid by
-# default. You can specify a custom pid file location here.
-# pidfile /var/run/kvrocks.pid
-
-# You can configure a slave instance to accept writes or not. Writing against
-# a slave instance may be useful to store some ephemeral data (because data
-# written on a slave will be easily deleted after resync with the master) but
-# may also cause problems if clients are writing to it because of a
-# misconfiguration.
-slave-read-only yes
-
-# The slave priority is an integer number published by Kvrocks in the INFO output.
-# It is used by Redis Sentinel in order to select a slave to promote into a
-# master if the master is no longer working correctly.
-#
-# A slave with a low priority number is considered better for promotion, so
-# for instance if there are three slave with priority 10, 100, 25 Sentinel will
-# pick the one with priority 10, that is the lowest.
-#
-# However a special priority of 0 marks the replica as not able to perform the
-# role of master, so a slave with priority of 0 will never be selected by
-# Redis Sentinel for promotion.
-#
-# By default the priority is 100.
-slave-priority 100
-
-# TCP listen() backlog.
-#
-# In high requests-per-second environments you need an high backlog in order
-# to avoid slow clients connections issues. Note that the Linux kernel
-# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
-# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
-# in order to Get the desired effect.
-tcp-backlog 511
-
-#
-# repl-bind 192.168.1.100 10.0.0.1
-# repl-bind 127.0.0.1
-repl-bind 0.0.0.0
-
-# Master-Slave replication. Use slaveof to make a kvrocks instance a copy of
-# another kvrocks server. A few things to understand ASAP about kvrocks replication.
-#
-# 1) Kvrocks replication is asynchronous, but you can configure a master to
-#    stop accepting writes if it appears to be not connected with at least
-#    a given number of slaves.
-# 2) Kvrocks slaves are able to perform a partial resynchronization with the
-#    master if the replication link is lost for a relatively small amount of
-#    time. You may want to configure the replication backlog size (see the next
-#    sections of this file) with a sensible value depending on your needs.
-# 3) Replication is automatic and does not need user intervention. After a
-#    network partition slaves automatically try to reconnect to masters
-#    and resynchronize with them.
-#
-# slaveof <masterip> <masterport>
-slaveof 127.0.0.1 6666 
-
-# The maximum allowed rate (in MB/s) that should be used by Replication.
-# If the rate exceeds max-replication-mb, replication will slow down.
-# Default: 0 (i.e. no limit)
-max-replication-mb 0
-
-# The maximum allowed aggregated write rate of flush and compaction (in MB/s).
-# If the rate exceeds max-io-mb, io will slow down.
-# 0 is no limit
-# Default: 500
-max-io-mb 500
-
-# The maximum allowed space (in GB) that should be used by RocksDB.
-# If the total size of the SST files exceeds max_allowed_space, writes to RocksDB will fail.
-# Please see: https://github.com/facebook/rocksdb/wiki/Managing-Disk-Space-Utilization
-# Default: 0 (i.e. no limit)
-max-db-size 0
-
-# The maximum backup to keep, server cron would run every minutes to check the num of current
-# backup, and purge the old backup if exceed the max backup num to keep. If num-backup-to-keep
-# is 0, no backup would be keep.
-max-backup-to-keep 1
-
-# The maximum hours to keep the backup. If max-backup-keep-hours is 0, wouldn't purge any backup.
-# default is 168, 1 week
-max-backup-keep-hours 168
-
-
-################################## SLOW LOG ###################################
-
-# The Kvrocks Slow Log is a system to log queries that exceeded a specified
-# execution time. The execution time does not include the I/O operations
-# like talking with the client, sending the reply and so forth,
-# but just the time needed to actually execute the command (this is the only
-# stage of command execution where the thread is blocked and can not serve
-# other requests in the meantime).
-#
-# You can configure the slow log with two parameters: one tells Kvrocks
-# what is the execution time, in microseconds, to exceed in order for the
-# command to get logged, and the other parameter is the length of the
-# slow log. When a new command is logged the oldest one is removed from the
-# queue of logged commands.
-
-# The following time is expressed in microseconds, so 1000000 is equivalent
-# to one second. Note that a negative number disables the slow log, while
-# a value of zero forces the logging of every command.
-slowlog-log-slower-than 100000
-
-# There is no limit to this length. Just be aware that it will consume memory.
-# You can reclaim memory used by the slow log with SLOWLOG RESET.
-slowlog-max-len 128
-
-################################## CRON ###################################
-
-# Compact Scheduler, auto compact at schedule time
-# time expression format is the same as crontab(currently only support * and int)
-# e.g. compact-cron 0 3 * * * 0 4 * * *
-# would compact the db at 3am and 4am everyday
-compact-cron 0 3 * * *
-
-# Backup Scheduler, auto backup at schedule time
-# time expression format is the same as compact-cron
-# e.g. compact-cron 0 3 * * * 0 4 * * *
-# would backup the db at 3am and 4am everyday
-# bgsave-cron 0 4 * * *
-
-################################ ROCKSDB #####################################
-
-# Specify the capacity  of metadata column family block cache. Larger block cache
-# may make request faster while more keys would be cached. Max Size is 200*1024.
-# unit is MiB, default 4096
-rocksdb.metadata_block_cache_size 4096
-
-# Specify the capacity  of subkey column family block cache. Larger block cache
-# may make request faster while more keys would be cached. Max Size is 200*1024.
-# unit is MiB, default 8192
-rocksdb.subkey_block_cache_size 8192
-
-# Number of open files that can be used by the DB.  You may need to
-# increase this if your database has a large working set. Value -1 means
-# files opened are always kept open. You can estimate number of files based
-# on target_file_size_base and target_file_size_multiplier for level-based
-# compaction. For universal-style compaction, you can usually set it to -1.
-rocksdb.max_open_files 8096
-
-# Amount of data to build up in memory (backed by an unsorted log
-# on disk) before converting to a sorted on-disk file.
-#
-# Larger values increase performance, especially during bulk loads.
-# Up to max_write_buffer_number write buffers may be held in memory
-# at the same time,
-# so you may wish to adjust this parameter to control memory usage.
-# Also, a larger write buffer will result in a longer recovery time
-# the next time the database is opened.
-#
-# Note that write_buffer_size is enforced per column family.
-# See db_write_buffer_size for sharing memory across column families.
-
-# default is 256MB
-rocksdb.write_buffer_size 256
-
-# The maximum number of write buffers that are built up in memory.
-# The default and the minimum number is 2, so that when 1 write buffer
-# is being flushed to storage, new writes can continue to the other
-# write buffer.
-# If max_write_buffer_number > 3, writing will be slowed down to
-# options.delayed_write_rate if we are writing to the last write buffer
-# allowed.
-rocksdb.max_write_buffer_number 2
-
-# Maximum number of concurrent background compaction jobs, submitted to
-# the default LOW priority thread pool.
-rocksdb.max_background_compactions 2
-
-# Maximum number of concurrent background memtable flush jobs, submitted by
-# default to the HIGH priority thread pool. If the HIGH priority thread pool
-# is configured to have zero threads, flush jobs will share the LOW priority
-# thread pool with compaction jobs.
-rocksdb.max_background_flushes 2
-
-# This value represents the maximum number of threads that will
-# concurrently perform a compaction job by breaking it into multiple,
-# smaller ones that are run simultaneously.
-# Default: 1 (i.e. no subcompactions)
-rocksdb.max_sub_compactions 1
-
-# Specify the compression to use.
-# Accept value: "no", "snappy"
-# default snappy
-rocksdb.compression snappy
-
-################################ NAMESPACE #####################################
-namespace.test change.me
diff --git a/tests/stats_test.cc b/tests/stats_test.cc
deleted file mode 100644
index 79ab9e7..0000000
--- a/tests/stats_test.cc
+++ /dev/null
@@ -1,8 +0,0 @@
-#include <gtest/gtest.h>
-
-#include "stats.h"
-
-TEST(Stats, GetMemoryRss) {
-  Stats stats;
-  std::cout << "rss: " << stats.GetMemoryRSS() << std::endl;
-}
diff --git a/tests/string_util_test.cc b/tests/string_util_test.cc
deleted file mode 100644
index e6a62fe..0000000
--- a/tests/string_util_test.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-#include <gtest/gtest.h>
-#include <map>
-#include "util.h"
-
-TEST(StringUtil, ToLower) {
-  std::map<std::string, std::string> cases {
-          {"ABC", "abc"},
-          {"AbC", "abc"},
-          {"abc", "abc"},
-  };
-  for (auto iter = cases.begin(); iter != cases.end(); iter++) {
-    std::string input = iter->first;
-    input = Util::ToLower(input);
-    ASSERT_EQ(input, iter->second);
-  }
-}
-
-TEST(StringUtil, Trim) {
-  std::map<std::string, std::string> cases {
-          {"abc", "abc"},
-          {"   abc    ", "abc"},
-          {"\t\tabc\t\t", "abc"},
-          {"\t\tabc\n\n", "abc"},
-          {"\n\nabc\n\n", "abc"},
-  };
-  for (auto iter = cases.begin(); iter != cases.end(); iter++) {
-    std::string input = iter->first;
-    std::string output;
-    Util::Trim(input, " \t\n", &output);
-    ASSERT_EQ(output, iter->second);
-  }
-}
-
-TEST(StringUtil, Split) {
-  std::vector<std::string> array;
-  std::vector<std::string> expected = {"a", "b", "c", "d"};
-  Util::Split("a,b,c,d", ",", &array);
-  ASSERT_EQ(expected, array);
-  Util::Split("a,b,,c,d,", ",", &array);
-  ASSERT_EQ(expected, array);
-  Util::Split(",a,b,c,d,", ",", &array);
-  ASSERT_EQ(expected, array);
-  Util::Split("a     b  c  d   ", " ", &array);
-  ASSERT_EQ(expected, array);
-  Util::Split("a\tb\nc\t\nd   ", " \t\n", &array);
-  ASSERT_EQ(expected, array);
-}
\ No newline at end of file
diff --git a/tests/t_bitmap_test.cc b/tests/t_bitmap_test.cc
deleted file mode 100644
index a4ca031..0000000
--- a/tests/t_bitmap_test.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-#include <gtest/gtest.h>
-
-#include "test_base.h"
-#include "redis_bitmap.h"
-
-class RedisBitmapTest : public TestBase {
- protected:
-  explicit RedisBitmapTest() : TestBase() {
-    bitmap = new Redis::Bitmap(storage_, "bitmap_ns");
-  }
-  ~RedisBitmapTest() {
-    delete bitmap;
-  }
-  void SetUp() override {
-    key_ = "test_bitmap_key";
-  }
-  void TearDown() override {}
-
- protected:
-  Redis::Bitmap *bitmap;
-};
-
-TEST_F(RedisBitmapTest, GetAndSetBit) {
-  uint32_t offsets[] = {0, 123, 1024*8, 1024*8+1, 3*1024*8,  3*1024*8+1};
-  for (const auto &offset : offsets) {
-    bool bit = false;
-    bitmap->GetBit(key_, offset, &bit);
-    EXPECT_FALSE(bit);
-    bitmap->SetBit(key_, offset, true, &bit);
-    bitmap->GetBit(key_, offset, &bit);
-    EXPECT_TRUE(bit);
-  }
-  bitmap->Del(key_);
-}
-
-TEST_F(RedisBitmapTest, BitCount) {
-  uint32_t offsets[] = {0, 123, 1024*8, 1024*8+1, 3*1024*8,  3*1024*8+1};
-  for (const auto &offset : offsets) {
-    bool bit = false;
-    bitmap->SetBit(key_, offset, true, &bit);
-  }
-  uint32_t cnt;
-  bitmap->BitCount(key_, 0, 4*1024, &cnt);
-  EXPECT_EQ(cnt, 6);
-  bitmap->BitCount(key_, 0, -1, &cnt);
-  EXPECT_EQ(cnt, 6);
-  bitmap->Del(key_);
-}
-
-TEST_F(RedisBitmapTest, BitPosClearBit) {
-  int pos;
-  bool old_bit;
-  for (int i = 0; i < 1024+16;i ++) {
-    bitmap->BitPos(key_, false, 0, -1, &pos);
-    EXPECT_EQ(pos, i);
-    bitmap->SetBit(key_, i, true, &old_bit);
-    EXPECT_FALSE(old_bit);
-  }
-  bitmap->Del(key_);
-}
-
-TEST_F(RedisBitmapTest, BitPosSetBit) {
-  uint32_t offsets[] = {0, 123, 1024*8, 1024*8+16, 3*1024*8,  3*1024*8+16};
-  for (const auto &offset : offsets) {
-    bool bit = false;
-    bitmap->SetBit(key_, offset, true, &bit);
-  }
-  int pos;
-  int start_indexes[] = {0, 1, 124, 1025, 1027, 3*1024+1};
-  for (int i = 0; i < sizeof(start_indexes)/ sizeof(start_indexes[0]); i++) {
-    bitmap->BitPos(key_, true, start_indexes[i], -1, &pos);
-    EXPECT_EQ(pos, offsets[i]);
-  }
-  bitmap->Del(key_);
-}
diff --git a/tests/t_encoding_test.cc b/tests/t_encoding_test.cc
deleted file mode 100644
index 127524a..0000000
--- a/tests/t_encoding_test.cc
+++ /dev/null
@@ -1,18 +0,0 @@
-#include <gtest/gtest.h>
-#include "encoding.h"
-
-#include <limits>
-TEST(Util, EncodeAndDecodeDouble) {
-  std::vector<double> values = {-1234, -100.1234, -1.2345, 0, 1.2345, 100.1234, 1234};
-  std::string prev_bytes;
-  for (auto value : values) {
-    std::string bytes;
-    PutDouble(&bytes, value);
-    double got = DecodeDouble(bytes.data());
-    if (!prev_bytes.empty()) {
-      ASSERT_LT(prev_bytes, bytes);
-    }
-    prev_bytes.assign(bytes);
-    ASSERT_EQ(value, got);
-  }
-}
\ No newline at end of file
diff --git a/tests/t_hash_test.cc b/tests/t_hash_test.cc
deleted file mode 100644
index 57d56d3..0000000
--- a/tests/t_hash_test.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-#include <gtest/gtest.h>
-
-#include "test_base.h"
-#include "redis_hash.h"
-class RedisHashTest : public TestBase {
-protected:
-  explicit RedisHashTest() : TestBase() {
-    hash = new Redis::Hash(storage_, "hash_ns");
-  }
-  ~RedisHashTest() {
-    delete hash;
-  }
-  void SetUp() override {
-    key_ = "test_hash->key";
-    fields_ = {"test-hash-key-1", "test-hash-key-2", "test-hash-key-3"};
-    values_  = {"hash-test-value-1", "hash-test-value-2", "hash-test-value-3"};
-  }
-  void TearDown() override {
-  }
-
-protected:
-  Redis::Hash *hash;
-};
-
-TEST_F(RedisHashTest, GetAndSet) {
-  int ret;
-  for (int i = 0; i < fields_.size(); i++) {
-    rocksdb::Status s = hash->Set(key_, fields_[i], values_[i], &ret);
-    EXPECT_TRUE(s.ok() && ret == 1);
-  }
-  for (int i = 0; i < fields_.size(); i++) {
-    std::string got;
-    rocksdb::Status s = hash->Get(key_, fields_[i], &got);
-    EXPECT_EQ(values_[i], got);
-  }
-  rocksdb::Status s = hash->Delete(key_, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  hash->Del(key_);
-}
-
-TEST_F(RedisHashTest, MGetAndMSet) {
-  int ret;
-  std::vector<FieldValue> fvs;
-  for (int i = 0; i < fields_.size(); i++) {
-    fvs.emplace_back(FieldValue{fields_[i].ToString(), values_[i].ToString()});
-  }
-  rocksdb::Status s = hash->MSet(key_, fvs, false, &ret);
-  EXPECT_TRUE(s.ok() && fvs.size()==ret);
-  s = hash->MSet(key_, fvs, false, &ret);
-  EXPECT_EQ(ret ,0);
-  std::vector<std::string> values;
-  s = hash->MGet(key_, fields_, &values);
-  for (int i = 0; i < fields_.size(); i++) {
-    EXPECT_EQ(values[i], values_[i].ToString());
-  }
-  s = hash->Delete(key_, fields_, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  hash->Del(key_);
-}
-
-TEST_F(RedisHashTest, SetNX) {
-  int ret;
-  Slice field("foo");
-  rocksdb::Status s = hash->Set(key_, field, "bar", &ret);
-  EXPECT_TRUE(s.ok() && ret == 1);
-  s = hash->Set(key_, field, "bar", &ret);
-  EXPECT_TRUE(s.ok() && ret == 0);
-  std::vector<Slice> fields = {field};
-  s = hash->Delete(key_, fields, &ret);
-  EXPECT_EQ(fields.size(), ret);
-  hash->Del(key_);
-}
-
-TEST_F(RedisHashTest, HGetAll) {
-  int ret;
-  for (int i = 0; i < fields_.size(); i++) {
-    rocksdb::Status s = hash->Set(key_, fields_[i], values_[i], &ret);
-    EXPECT_TRUE(s.ok() && ret == 1);
-  }
-  std::vector<FieldValue> fvs;
-  rocksdb::Status s = hash->GetAll(key_, &fvs);
-  EXPECT_TRUE(s.ok() && fvs.size() == fields_.size());
-  s = hash->Delete(key_, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  hash->Del(key_);
-}
-
-TEST_F(RedisHashTest, HIncr) {
-  int64_t value;
-  Slice field("hash-incrby-invalid-field");
-  for (int i = 0; i < 32; i++) {
-    rocksdb::Status s = hash->IncrBy(key_, field, 1, &value);
-    EXPECT_TRUE(s.ok());
-  }
-  std::string bytes;
-  hash->Get(key_, field, &bytes);
-  value = std::stoll(bytes);
-  EXPECT_EQ(32, value);
-  hash->Del(key_);
-}
-
-TEST_F(RedisHashTest, HIncrInvalid) {
-  int ret;
-  int64_t value;
-  Slice field("hash-incrby-invalid-field");
-  rocksdb::Status s = hash->IncrBy(key_, field, 1, &value);
-  EXPECT_TRUE(s.ok() && value == 1);
-
-  s = hash->IncrBy(key_, field, LLONG_MAX, &value);
-  EXPECT_TRUE(s.IsInvalidArgument());
-  hash->Set(key_, field, "abc", &ret);
-  s = hash->IncrBy(key_, field, 1, &value);
-  EXPECT_TRUE(s.IsInvalidArgument());
-
-  hash->Set(key_, field, "-1", &ret);
-  s = hash->IncrBy(key_, field, -1, &value);
-  EXPECT_TRUE(s.ok());
-  s = hash->IncrBy(key_, field, LLONG_MIN, &value);
-  EXPECT_TRUE(s.IsInvalidArgument());
-
-  hash->Del(key_);
-}
-
-TEST_F(RedisHashTest, HIncrByFloat) {
-  float value;
-  Slice field("hash-incrbyfloat-invalid-field");
-  for (int i = 0; i < 32; i++) {
-    rocksdb::Status s = hash->IncrByFloat(key_, field, 1.2, &value);
-    EXPECT_TRUE(s.ok());
-  }
-  std::string bytes;
-  hash->Get(key_, field, &bytes);
-  value = std::stof(bytes);
-  EXPECT_FLOAT_EQ(32*1.2, value);
-  hash->Del(key_);
-}
\ No newline at end of file
diff --git a/tests/t_list_test.cc b/tests/t_list_test.cc
deleted file mode 100644
index 2d0b330..0000000
--- a/tests/t_list_test.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-#include "test_base.h"
-#include "redis_list.h"
-
-class RedisListTest : public TestBase {
-protected:
-  explicit RedisListTest():TestBase() {
-    list = new Redis::List(storage_, "list_ns");
-  }
-  ~RedisListTest() {
-    delete list;
-  }
-  void SetUp() override {
-    key_ = "test-list-key";
-    fields_ = {"list-test-key-1", "list-test-key-2", "list-test-key-3", "list-test-key-4", "list-test-key-5"};
-  }
-
-protected:
-  Redis::List *list;
-};
-
-TEST_F(RedisListTest, PushAndPop) {
-  int ret;
-  list->Push(key_, fields_, true, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  for (int i = 0; i < fields_.size(); i++) {
-    std::string elem;
-    list->Pop(key_, &elem, false);
-    EXPECT_EQ(elem, fields_[i].ToString());
-  }
-  list->Push(key_, fields_, false, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  for (int i = 0; i < fields_.size(); i++) {
-    std::string elem;
-    list->Pop(key_, &elem, true);
-    EXPECT_EQ(elem, fields_[i].ToString());
-  }
-  list->Del(key_);
-}
-
-TEST_F(RedisListTest, Pushx) {
-  int ret;
-  Slice pushx_key("test-pushx-key");
-  rocksdb::Status s = list->PushX(pushx_key, fields_, true, &ret);
-  EXPECT_TRUE(s.ok());
-  list->Push(pushx_key, fields_, true, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  s = list->PushX(pushx_key, fields_, true, &ret);
-  EXPECT_EQ(ret, fields_.size()*2);
-  list->Del(pushx_key);
-}
-
-TEST_F(RedisListTest, Index) {
-  int ret;
-  list->Push(key_, fields_, false, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  std::string elem;
-  for (int i = 0; i < fields_.size(); i++) {
-    list->Index(key_,i, &elem);
-    EXPECT_EQ(fields_[i].ToString(), elem);
-  }
-  for (int i = 0; i < fields_.size(); i++) {
-    list->Pop(key_, &elem, true);
-    EXPECT_EQ(elem, fields_[i].ToString());
-  }
-  rocksdb::Status s = list->Index(key_,-1, &elem);
-  EXPECT_TRUE(s.IsNotFound());
-  list->Del(key_);
-}
-
-TEST_F(RedisListTest, Set) {
-  int ret;
-  list->Push(key_, fields_, false, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  Slice new_elem("new_elem");
-  list->Set(key_, -1, new_elem);
-  std::string elem;
-  list->Index(key_, -1, &elem);
-  EXPECT_EQ(new_elem.ToString(), elem);
-  for (int i = 0; i < fields_.size(); i++) {
-    list->Pop(key_, &elem, true);
-  }
-  list->Del(key_);
-}
-
-TEST_F(RedisListTest, Range) {
-  int ret;
-  list->Push(key_, fields_, false, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  std::vector<std::string> elems;
-  list->Range(key_, 0, int(elems.size()-1), &elems);
-  EXPECT_EQ(elems.size(), fields_.size());
-  for (int i = 0; i < elems.size(); i++) {
-    EXPECT_EQ(fields_[i].ToString(), elems[i]);
-  }
-  for (int i = 0; i < fields_.size(); i++) {
-    std::string elem;
-    list->Pop(key_, &elem, true);
-    EXPECT_EQ(elem, fields_[i].ToString());
-  }
-  list->Del(key_);
-}
-
-TEST_F(RedisListTest, Trim) {
-  int ret;
-  list->Push(key_, fields_, false, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  list->Trim(key_, 1, 2000);
-  uint32_t len;
-  list->Size(key_, &len);
-  EXPECT_EQ(fields_.size()-1, len);
-  for (int i = 1; i < fields_.size(); i++) {
-    std::string elem;
-    list->Pop(key_, &elem, true);
-    EXPECT_EQ(elem, fields_[i].ToString());
-  }
-  list->Del(key_);
-}
-
-TEST_F(RedisListTest, RPopLPush) {
-  int ret;
-  list->Push(key_, fields_, true, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  Slice dst("test-list-rpoplpush-key");
-  for (int i = 0; i < fields_.size(); i++) {
-    std::string elem;
-    list->RPopLPush(key_, dst, &elem);
-    EXPECT_EQ(fields_[i].ToString(), elem);
-  }
-  for (int i = 0; i < fields_.size(); i++) {
-    std::string elem;
-    list->Pop(dst, &elem, false);
-    EXPECT_EQ(elem, fields_[i].ToString());
-  }
-  list->Del(key_);
-  list->Del(dst);
-}
\ No newline at end of file
diff --git a/tests/t_metadata_test.cc b/tests/t_metadata_test.cc
deleted file mode 100644
index 2a43dfb..0000000
--- a/tests/t_metadata_test.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-#include "redis_metadata.h"
-#include "redis_hash.h"
-#include "test_base.h"
-#include <gtest/gtest.h>
-
-TEST(InternalKey, EncodeAndDecode) {
-  Slice key = "test-metadata-key";
-  Slice sub_key = "test-metadata-sub-key";
-  Slice ns = "namespace";
-  uint64_t version = 12;
-  std::string ns_key;
-
-  ComposeNamespaceKey(ns, key, &ns_key);
-  InternalKey ikey(ns_key, sub_key, version);
-  ASSERT_EQ(ikey.GetKey(), key);
-  ASSERT_EQ(ikey.GetSubKey(), sub_key);
-  ASSERT_EQ(ikey.GetVersion(), version);
-  std::string bytes;
-  ikey.Encode(&bytes);
-  InternalKey ikey1(bytes);
-  EXPECT_EQ(ikey, ikey1);
-}
-
-TEST(Metadata, EncodeAndDeocde) {
-  std::string string_bytes;
-  Metadata string_md(kRedisString);
-  string_md.expire = 123;
-  string_md.Encode(&string_bytes);
-  Metadata string_md1(kRedisNone);
-  string_md1.Decode(string_bytes);
-  ASSERT_EQ(string_md, string_md1);
-  ListMetadata list_md;
-  list_md.flags = 13;
-  list_md.expire = 123;
-  list_md.version = 2;
-  list_md.size = 1234;
-  list_md.head = 123;
-  list_md.tail = 321;
-  ListMetadata list_md1;
-  std::string list_bytes;
-  list_md.Encode(&list_bytes);
-  list_md1.Decode(list_bytes);
-  ASSERT_EQ(list_md, list_md1);
-}
-
-class RedisTypeTest : public TestBase {
-public:
-  RedisTypeTest() :TestBase() {
-    redis = new Redis::Database(storage_, "default_ns");
-    hash = new Redis::Hash(storage_, "default_ns");
-    key_ = "test-redis-type";
-    fields_ = {"test-hash-key-1", "test-hash-key-2", "test-hash-key-3"};
-    values_  = {"hash-test-value-1", "hash-test-value-2", "hash-test-value-3"};
-  }
-  ~RedisTypeTest() {
-    delete redis;
-    delete hash;
-  }
-protected:
-  Redis::Database *redis;
-  Redis::Hash *hash;
-};
-
-TEST_F(RedisTypeTest, GetMetadata) {
-  int ret;
-  std::vector<FieldValue> fvs;
-  for (int i = 0; i < fields_.size(); i++) {
-    fvs.emplace_back(FieldValue{fields_[i].ToString(), values_[i].ToString()});
-  }
-  rocksdb::Status s = hash->MSet(key_, fvs, false, &ret);
-  EXPECT_TRUE(s.ok() && fvs.size()==ret);
-  HashMetadata metadata;
-  std::string ns_key;
-  redis->AppendNamespacePrefix(key_, &ns_key);
-  redis->GetMetadata(kRedisHash, ns_key, &metadata);
-  EXPECT_EQ(fvs.size(), metadata.size);
-  s = redis->Del(key_);
-  EXPECT_TRUE(s.ok());
-}
-
-TEST_F(RedisTypeTest, Expire) {
-  int ret;
-  std::vector<FieldValue> fvs;
-  for (int i = 0; i < fields_.size(); i++) {
-    fvs.emplace_back(FieldValue{fields_[i].ToString(), values_[i].ToString()});
-  }
-  rocksdb::Status s = hash->MSet(key_, fvs, false, &ret);
-  EXPECT_TRUE(s.ok() && fvs.size()==ret);
-  int64_t now;
-  rocksdb::Env::Default()->GetCurrentTime(&now);
-  redis->Expire(key_,int(now+2));
-  int ttl;
-  redis->TTL(key_, &ttl);
-  ASSERT_TRUE(ttl >= 1 && ttl <= 2);
-  sleep(2);
-}
\ No newline at end of file
diff --git a/tests/t_set_test.cc b/tests/t_set_test.cc
deleted file mode 100644
index 8ea656f..0000000
--- a/tests/t_set_test.cc
+++ /dev/null
@@ -1,175 +0,0 @@
-#include <gtest/gtest.h>
-#include "redis_set.h"
-#include "test_base.h"
-
-class RedisSetTest : public TestBase {
-protected:
-  explicit RedisSetTest() : TestBase() {
-    set = new Redis::Set(storage_, "set_ns");
-  }
-  ~RedisSetTest() {
-    delete set;
-  }
-  void SetUp() override {
-    key_ = "test-set-key";
-    fields_ = {"set-key-1", "set-key-2", "set-key-3", "set-key-4"};
-  }
-
-protected:
-  Redis::Set *set;
-};
-
-TEST_F(RedisSetTest, AddAndRemove) {
-  int ret;
-   rocksdb::Status s = set->Add(key_, fields_, &ret);
-   EXPECT_TRUE(s.ok() && fields_.size() == ret);
-   s = set->Card(key_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  s = set->Remove(key_, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  s = set->Card(key_, &ret);
-  EXPECT_TRUE(s.ok() && ret == 0);
-  set->Del(key_);
-}
-
-TEST_F(RedisSetTest, Members) {
-  int ret;
-  rocksdb::Status s = set->Add(key_, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  std::vector<std::string> members;
-  s = set->Members(key_, &members);
-  EXPECT_TRUE(s.ok() && fields_.size() == members.size());
-  // Note: the members was fetched by iterator, so the order should be asec
-  for (int i = 0; i < fields_.size(); i++) {
-    EXPECT_EQ(fields_[i], members[i]);
-  }
-  s = set->Remove(key_, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  set->Del(key_);
-}
-
-TEST_F(RedisSetTest, IsMember) {
-  int ret;
-  rocksdb::Status s = set->Add(key_, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  for (int i = 0; i < fields_.size(); i++) {
-    s = set->IsMember(key_, fields_[i], &ret);
-    EXPECT_TRUE(s.ok() && ret == 1);
-  }
-  set->IsMember(key_, "foo", &ret);
-  EXPECT_TRUE(s.ok() && ret == 0);
-  s = set->Remove(key_, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  set->Del(key_);
-}
-
-TEST_F(RedisSetTest, Move) {
-  int ret;
-  rocksdb::Status s = set->Add(key_, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  Slice dst("set-test-move-key");
-  for (int i = 0; i < fields_.size(); i++) {
-    s = set->Move(key_, dst, fields_[i], &ret);
-    EXPECT_TRUE(s.ok() && ret == 1);
-  }
-  s = set->Move(key_, dst, "set-no-exists-key", &ret);
-  EXPECT_TRUE(s.ok() && ret == 0);
-  s = set->Card(key_, &ret);
-  EXPECT_TRUE(s.ok() && ret == 0);
-  s = set->Card(dst, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  s = set->Remove(dst, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  set->Del(key_);
-  set->Del(dst);
-}
-
-TEST_F(RedisSetTest, TakeWithPop) {
-  int ret;
-  rocksdb::Status s = set->Add(key_, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  std::vector<std::string> members;
-  s = set->Take(key_, &members, 3, true);
-  EXPECT_EQ(members.size(),3);
-  s = set->Take(key_, &members, 2, true);
-  EXPECT_EQ(members.size(),1);
-  s = set->Take(key_, &members, 1, true);
-  EXPECT_TRUE(s.ok() && members.size() == 0);
-  set->Del(key_);
-}
-
-TEST_F(RedisSetTest, Diff) {
-  int ret;
-  std::string k1 = "key1", k2 = "key2", k3 = "key3";
-  rocksdb::Status s = set->Add(k1, {"a", "b", "c", "d"}, &ret);
-  EXPECT_EQ(ret, 4);
-  set->Add(k2, {"c"}, &ret);
-  EXPECT_EQ(ret, 1);
-  set->Add(k3, {"a", "c", "e"}, &ret);
-  EXPECT_EQ(ret, 3);
-  std::vector<std::string> members;
-  set->Diff({k1, k2, k3}, &members);
-  EXPECT_EQ(2, members.size());
-  set->Del(k1);
-  set->Del(k2);
-  set->Del(k3);
-}
-
-TEST_F(RedisSetTest, Union) {
-  int ret;
-  std::string k1 = "key1", k2 = "key2", k3 = "key3";
-  rocksdb::Status s = set->Add(k1, {"a", "b", "c", "d"}, &ret);
-  EXPECT_EQ(ret, 4);
-  set->Add(k2, {"c"}, &ret);
-  EXPECT_EQ(ret, 1);
-  set->Add(k3, {"a", "c", "e"}, &ret);
-  EXPECT_EQ(ret, 3);
-  std::vector<std::string> members;
-  set->Union({k1, k2, k3}, &members);
-  EXPECT_EQ(5, members.size());
-  set->Del(k1);
-  set->Del(k2);
-  set->Del(k3);
-}
-
-TEST_F(RedisSetTest, Inter) {
-  int ret;
-  std::string k1 = "key1", k2 = "key2", k3 = "key3";
-  rocksdb::Status s = set->Add(k1, {"a", "b", "c", "d"}, &ret);
-  EXPECT_EQ(ret, 4);
-  set->Add(k2, {"c"}, &ret);
-  EXPECT_EQ(ret, 1);
-  set->Add(k3, {"a", "c", "e"}, &ret);
-  EXPECT_EQ(ret, 3);
-  std::vector<std::string> members;
-  set->Inter({k1, k2, k3}, &members);
-  EXPECT_EQ(1, members.size());
-  set->Del(k1);
-  set->Del(k2);
-  set->Del(k3);
-}
-
-TEST_F(RedisSetTest, Overwrite) {
-  int ret;
-  rocksdb::Status s = set->Add(key_, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  set->Overwrite(key_, {"a"});
-  int count;
-  set->Card(key_, &count);
-  EXPECT_EQ(count, 1);
-  set->Del(key_);
-}
-
-TEST_F(RedisSetTest, TakeWithoutPop) {
-  int ret;
-  rocksdb::Status s = set->Add(key_, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  std::vector<std::string> members;
-  s = set->Take(key_, &members, int(fields_.size()+1), false);
-  EXPECT_EQ(members.size(), fields_.size());
-  s = set->Take(key_, &members, int(fields_.size()-1), false);
-  EXPECT_EQ(members.size(), fields_.size()-1);
-  s = set->Remove(key_, fields_, &ret);
-  EXPECT_TRUE(s.ok() && fields_.size() == ret);
-  set->Del(key_);
-}
diff --git a/tests/t_string_test.cc b/tests/t_string_test.cc
deleted file mode 100644
index ded0252..0000000
--- a/tests/t_string_test.cc
+++ /dev/null
@@ -1,182 +0,0 @@
-#include <redis_string.h>
-#include "test_base.h"
-#include "redis_string.h"
-
-class RedisStringTest : public TestBase {
-protected:
-  explicit RedisStringTest() : TestBase() {
-    string = new Redis::String(storage_, "string_ns");
-  }
-  ~RedisStringTest() {
-    delete string;
-  }
-  void SetUp() override {
-    key_ = "test-string-key";
-    pairs_ = {
-            {"test-string-key1", "test-strings-value1"},
-            {"test-string-key2", "test-strings-value2"},
-            {"test-string-key3", "test-strings-value3"},
-            {"test-string-key4", "test-strings-value4"},
-            {"test-string-key5", "test-strings-value5"},
-            {"test-string-key6", "test-strings-value6"},
-    };
-  }
-
-protected:
-  Redis::String *string;
-  std::vector<StringPair> pairs_;
-};
-
-TEST_F(RedisStringTest, Append) {
-  int ret;
-  for (int i = 0; i < 32; i++) {
-    rocksdb::Status s = string->Append(key_, "a", &ret);
-    EXPECT_TRUE(s.ok());
-    EXPECT_EQ(i+1, ret);
-  }
-  string->Del(key_);
-}
-
-TEST_F(RedisStringTest, GetAndSet) {
-  for (int i = 0; i < pairs_.size(); i++) {
-    string->Set(pairs_[i].key, pairs_[i].value);
-  }
-  for (int i = 0; i < pairs_.size(); i++) {
-    std::string got_value;
-    string->Get(pairs_[i].key, &got_value);
-    EXPECT_EQ(pairs_[i].value, got_value);
-  }
-  for (int i = 0; i < pairs_.size(); i++) {
-    string->Del(pairs_[i].key);
-  }
-}
-
-TEST_F(RedisStringTest, MGetAndMSet) {
-  string->MSet(pairs_);
-  std::vector<Slice> keys;
-  std::vector<std::string> values;
-  for (const auto pair : pairs_) {
-    keys.emplace_back(pair.key);
-  }
-  string->MGet(keys, &values);
-  for (int i = 0; i < pairs_.size(); i++) {
-    EXPECT_EQ(pairs_[i].value.ToString(), values[i]);
-  }
-  for (int i = 0; i < pairs_.size(); i++) {
-    string->Del(pairs_[i].key);
-  }
-}
-
-TEST_F(RedisStringTest, IncrBy) {
-  int64_t ret;
-  string->IncrBy(key_, 1, &ret);
-  EXPECT_EQ(1, ret);
-  string->IncrBy(key_, INT64_MAX-1, &ret);
-  EXPECT_EQ(INT64_MAX, ret);
-  rocksdb::Status s = string->IncrBy(key_, 1, &ret);
-  EXPECT_TRUE(s.IsInvalidArgument());
-  string->IncrBy(key_, INT64_MIN+1, &ret);
-  EXPECT_EQ(0, ret);
-  string->IncrBy(key_, INT64_MIN, &ret);
-  EXPECT_EQ(INT64_MIN, ret);
-  s = string->IncrBy(key_, -1, &ret);
-  EXPECT_TRUE(s.IsInvalidArgument());
-  // key hold value is not the number
-  string->Set(key_, "abc");
-  s = string->IncrBy(key_, 1, &ret);
-  EXPECT_TRUE(s.IsInvalidArgument());
-  string->Del(key_);
-
-}
-
-TEST_F(RedisStringTest, GetSet) {
-  std::vector<Slice> values = {"a", "b", "c", "d"};
-  for(int i = 0; i < values.size(); i++) {
-    std::string old_value;
-    string->GetSet(key_, values[i], &old_value);
-    if (i != 0) {
-      EXPECT_EQ(values[i - 1], old_value);
-    } else {
-      EXPECT_TRUE(old_value.empty());
-    }
-  }
-  string->Del(key_);
-}
-
-TEST_F(RedisStringTest, MSetXX) {
-  int ret;
-  string->SetXX(key_, "test-value", 3, &ret);
-  EXPECT_EQ(ret, 0);
-  string->Set(key_, "test-value");
-  string->SetXX(key_, "test-value", 3, &ret);
-  EXPECT_EQ(ret, 1);
-  int ttl;
-  string->TTL(key_, &ttl);
-  EXPECT_TRUE(ttl >= 2 && ttl <= 3);
-  string->Del(key_);
-}
-
-TEST_F(RedisStringTest, MSetNX) {
-  int ret;
-  string->MSetNX(pairs_, 0, &ret);
-  EXPECT_EQ(1, ret);
-  std::vector<Slice> keys;
-  std::vector<std::string> values;
-  for (const auto pair : pairs_) {
-    keys.emplace_back(pair.key);
-  }
-  string->MGet(keys, &values);
-  for (int i = 0; i < pairs_.size(); i++) {
-    EXPECT_EQ(pairs_[i].value.ToString(), values[i]);
-  }
-
-  std::vector<StringPair> new_pairs{
-          {"a", "1"},
-          {"b", "2"},
-          {"c", "3"},
-          {pairs_[0].key, pairs_[0].value},
-          {"d", "4"},
-  };
-  string->MSetNX(pairs_, 0, &ret);
-  EXPECT_EQ(0, ret);
-
-  for (int i = 0; i < pairs_.size(); i++) {
-    string->Del(pairs_[i].key);
-  }
-}
-
-TEST_F(RedisStringTest, MSetNXWithTTL) {
-  int ret;
-  string->SetNX(key_, "test-value", 3, &ret);
-  int ttl;
-  string->TTL(key_, &ttl);
-  EXPECT_TRUE(ttl >= 2 && ttl <= 3);
-  string->Del(key_);
-}
-
-TEST_F(RedisStringTest, SetEX) {
-  string->SetEX(key_, "test-value", 3);
-  int ttl;
-  string->TTL(key_, &ttl);
-  EXPECT_TRUE(ttl >= 2 && ttl <= 3);
-  string->Del(key_);
-}
-
-TEST_F(RedisStringTest, SetRange) {
-  int ret;
-  string->Set(key_, "hello,world");
-  string->SetRange(key_, 6, "redis", &ret);
-  EXPECT_EQ(11, ret);
-  std::string value;
-  string->Get(key_, &value);
-  EXPECT_EQ("hello,redis", value);
-  string->SetRange(key_, 6, "redis-1", &ret);
-  EXPECT_EQ(13, ret);
-  string->Get(key_, &value);
-  EXPECT_EQ("hello,redis-1", value);
-  string->SetRange(key_, 15, "1", &ret);
-  EXPECT_EQ(16, ret);
-  string->Get(key_, &value);
-  EXPECT_EQ(16, value.size());
-  string->Del(key_);
-}
\ No newline at end of file
diff --git a/tests/t_zset_test.cc b/tests/t_zset_test.cc
deleted file mode 100644
index 5ed13b3..0000000
--- a/tests/t_zset_test.cc
+++ /dev/null
@@ -1,292 +0,0 @@
-#include <redis_zset.h>
-#include "test_base.h"
-#include "redis_zset.h"
-
-class RedisZSetTest : public TestBase {
-protected:
-  RedisZSetTest() : TestBase() {
-    zset = new Redis::ZSet(storage_, "zset_ns"
-    );
-  }
-  ~RedisZSetTest() {
-    delete zset;
-  }
-  void SetUp() {
-    key_ = "test_zset_key";
-    fields_ = {"zset_test_key-1", "zset_test_key-2", "zset_test_key-3", "zset_test_key-4", "zset_test_key-5",
-               "zset_test_key-6", "zset_test_key-7"};
-    scores_ = {-100.1, -100.1, -1.234, 0, 1.234, 1.234, 100.1};
-  }
-
-protected:
-  std::vector<double> scores_;
-  Redis::ZSet *zset;
-};
-
-TEST_F(RedisZSetTest, Add) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  for (int i = 0; i < fields_.size(); i++) {
-    double got;
-    rocksdb::Status s = zset->Score(key_, fields_[i], &got);
-    EXPECT_EQ(scores_[i], got);
-  }
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(ret, 0);
-  zset->Del(key_);
-}
-
-TEST_F(RedisZSetTest, IncrBy) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  for (int i = 0; i < fields_.size(); i++) {
-    double increment = 12.3, score;
-    zset->IncrBy(key_, fields_[i], increment, &score);
-    EXPECT_EQ(scores_[i]+increment, score);
-  }
-  zset->Del(key_);
-}
-
-TEST_F(RedisZSetTest, Remove) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  zset->Remove(key_, fields_, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  for (int i = 0; i < fields_.size(); i++) {
-    double score;
-    rocksdb::Status s = zset->Score(key_, fields_[i], &score);
-    EXPECT_TRUE(s.IsNotFound());
-  }
-  zset->Del(key_);
-}
-
-TEST_F(RedisZSetTest, Range) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  int count = mscores.size()-1;
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  zset->Range(key_, 0, -2, 0, &mscores);
-  EXPECT_EQ(mscores.size(), count);
-  for (int i = 0; i < mscores.size(); i++) {
-    EXPECT_EQ(mscores[i].member, fields_[i].ToString());
-    EXPECT_EQ(mscores[i].score, scores_[i]);
-  }
-  zset->Del(key_);
-}
-
-TEST_F(RedisZSetTest, RevRange) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  int count = mscores.size()-1;
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  zset->Range(key_, 0, -2, ZSET_REVERSED, &mscores);
-  EXPECT_EQ(mscores.size(), count);
-  for (int i = 0; i < mscores.size(); i++) {
-    EXPECT_EQ(mscores[i].member, fields_[count-i].ToString());
-    EXPECT_EQ(mscores[i].score, scores_[count-i]);
-  }
-  zset->Del(key_);
-}
-
-TEST_F(RedisZSetTest, PopMin) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  zset->Pop(key_, mscores.size()-1, true, &mscores);
-  for (int i = 0; i < mscores.size(); i++) {
-    EXPECT_EQ(mscores[i].member, fields_[i].ToString());
-    EXPECT_EQ(mscores[i].score, scores_[i]);
-  }
-  zset->Pop(key_, 1, true, &mscores);
-  EXPECT_EQ(mscores[0].member, fields_[fields_.size()-1].ToString());
-  EXPECT_EQ(mscores[0].score, scores_[fields_.size()-1]);
-}
-
-TEST_F(RedisZSetTest, PopMax) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  int count = fields_.size();
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  zset->Pop(key_, mscores.size()-1, false, &mscores);
-  for (int i = 0; i < mscores.size(); i++) {
-    EXPECT_EQ(mscores[i].member, fields_[count-i-1].ToString());
-    EXPECT_EQ(mscores[i].score, scores_[count-i-1]);
-  }
-  zset->Pop(key_, 1, true, &mscores);
-  EXPECT_EQ(mscores[0].member, fields_[0].ToString());
-}
-
-TEST_F(RedisZSetTest, RangeByScore) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-
-  // test case: inclusive the min and max score
-  ZRangeSpec spec;
-  spec.min = scores_[0];
-  spec.max = scores_[scores_.size()-2];
-  zset->RangeByScore(key_, spec, &mscores, nullptr);
-  EXPECT_EQ(mscores.size(), scores_.size()-1);
-  for (int i = 0; i < mscores.size(); i++) {
-    EXPECT_EQ(mscores[i].member, fields_[i].ToString());
-    EXPECT_EQ(mscores[i].score, scores_[i]);
-  }
-  // test case: exclusive the min score
-  spec.minex = true;
-  zset->RangeByScore(key_, spec, &mscores, nullptr);
-  EXPECT_EQ(mscores.size(), scores_.size()-3);
-  for (int i = 0; i < mscores.size(); i++) {
-    EXPECT_EQ(mscores[i].member, fields_[i+2].ToString());
-    EXPECT_EQ(mscores[i].score, scores_[i+2]);
-  }
-  // test case: exclusive the max score
-  spec.minex = false;
-  spec.maxex = true;
-  zset->RangeByScore(key_, spec, &mscores, nullptr);
-  EXPECT_EQ(mscores.size(), scores_.size()-3);
-  for (int i = 0; i < mscores.size(); i++) {
-    EXPECT_EQ(mscores[i].member, fields_[i].ToString());
-    EXPECT_EQ(mscores[i].score, scores_[i]);
-  }
-  // test case: exclusive the min and max score
-  spec.minex = true;
-  spec.maxex = true;
-  zset->RangeByScore(key_, spec, &mscores, nullptr);
-  EXPECT_EQ(mscores.size(), scores_.size()-5);
-  for (int i = 0; i < mscores.size(); i++) {
-    EXPECT_EQ(mscores[i].member, fields_[i+2].ToString());
-    EXPECT_EQ(mscores[i].score, scores_[i+2]);
-  }
-  zset->Del(key_);
-}
-
-TEST_F(RedisZSetTest, RangeByScoreWithLimit) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-
-  ZRangeSpec spec;
-  spec.offset = 1;
-  spec.count = 2;
-  zset->RangeByScore(key_, spec, &mscores, nullptr);
-  EXPECT_EQ(mscores.size(), 2);
-  for (int i = 0; i < mscores.size(); i++) {
-    EXPECT_EQ(mscores[i].member, fields_[i+1].ToString());
-    EXPECT_EQ(mscores[i].score, scores_[i+1]);
-  }
-  zset->Del(key_);
-}
-
-TEST_F(RedisZSetTest, RemRangeByScore) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  ZRangeSpec spec;
-  spec.min = scores_[0];
-  spec.max= scores_[scores_.size()-2];
-  zset->RemoveRangeByScore(key_, spec, &ret);
-  EXPECT_EQ(scores_.size()-1, ret);
-  spec.min = scores_[scores_.size()-1];
-  spec.max = spec.min;
-  zset->RemoveRangeByScore(key_, spec, &ret);
-  EXPECT_EQ(1, ret);
-}
-
-TEST_F(RedisZSetTest, RemoveRangeByRank) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  zset->RemoveRangeByRank(key_, 0, fields_.size() - 2, &ret);
-  EXPECT_EQ(fields_.size()-1, ret);
-  zset->RemoveRangeByRank(key_, 0, 2, &ret);
-  EXPECT_EQ(1, ret);
-}
-
-TEST_F(RedisZSetTest, RemoveRevRangeByRank) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-  zset->RemoveRangeByRank(key_, 0, fields_.size() - 2, &ret);
-  EXPECT_EQ(fields_.size()-1, ret);
-  zset->RemoveRangeByRank(key_, 0, 2, &ret);
-  EXPECT_EQ(1, ret);
-}
-
-TEST_F(RedisZSetTest, Rank) {
-  int ret;
-  std::vector<MemberScore> mscores;
-  for (int i = 0; i < fields_.size(); i++) {
-    mscores.emplace_back(MemberScore{fields_[i].ToString(), scores_[i]});
-  }
-  zset->Add(key_, 0, &mscores, &ret);
-  EXPECT_EQ(fields_.size(), ret);
-
-  for (int i = 0; i < fields_.size(); i++) {
-    int rank;
-    zset->Rank(key_, fields_[i], false, &rank);
-    EXPECT_EQ(i, rank);
-  }
-  for (int i = 0; i < fields_.size(); i++) {
-    int rank;
-    zset->Rank(key_, fields_[i], true, &rank);
-    EXPECT_EQ(i, fields_.size()-rank-1);
-  }
-  std::vector<std::string> no_exist_members = {"a", "b"};
-  for (const auto &member : no_exist_members) {
-    int rank;
-    zset->Rank(key_, member, true, &rank);
-    EXPECT_EQ(-1, rank);
-  }
-  zset->Del(key_);
-}
\ No newline at end of file
diff --git a/tests/task_runner_test.cc b/tests/task_runner_test.cc
deleted file mode 100644
index aa126b3..0000000
--- a/tests/task_runner_test.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-#include <gtest/gtest.h>
-#include <atomic>
-#include "task_runner.h"
-
-TEST(TaskRunner, PublishOverflow) {
-  TaskRunner tr(2, 3);
-  Task t;
-  Status s;
-  for(int i = 0; i < 5; i++) {
-    s = tr.Publish(t);
-    if (i < 3) {
-      ASSERT_TRUE(s.IsOK());
-    } else {
-      std::cout << "i:" << i <<std::endl;
-      ASSERT_FALSE(s.IsOK());
-    }
-  }
-}
-
-TEST(TaskRunner, PublishToStopQueue) {
-  TaskRunner tr(2, 3);
-  tr.Stop();
-
-  Task t;
-  Status s;
-  for(int i = 0; i < 5; i++) {
-    s = tr.Publish(t);
-    ASSERT_FALSE(s.IsOK());
-  }
-}
-
-TEST(TaskRunner, Run) {
-  std::atomic<int> counter = {0};
-  TaskRunner tr(3, 1024);
-  tr.Start();
-
-  Status s;
-  Task t;
-  for(int i = 0; i < 100; i++) {
-    t.callback = [](void *arg){auto ptr = (std::atomic<int>*)arg; ptr->fetch_add(1);};
-    t.arg = (void*) &counter;
-    s = tr.Publish(t);
-    ASSERT_TRUE(s.IsOK());
-  }
-  usleep(1000);
-  ASSERT_EQ(100, counter);
-  tr.Stop();
-  tr.Join();
-}
\ No newline at end of file
diff --git a/tests/test_base.h b/tests/test_base.h
deleted file mode 100644
index c06d406..0000000
--- a/tests/test_base.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef KVROCKS_TEST_BASE_H
-#define KVROCKS_TEST_BASE_H
-
-#include <gtest/gtest.h>
-#include "redis_db.h"
-#include "redis_hash.h"
-
-class TestBase : public testing::Test {
-protected:
-  explicit TestBase() {
-    config_ = new Config();
-    config_->db_dir = "testsdb";
-    config_->backup_dir = "testsdb/backup";
-    storage_ = new Engine::Storage(config_);
-    Status s = storage_->Open();
-    assert(s.IsOK());
-  }
-  ~TestBase() override {
-    rmdir("testsdb");
-    delete storage_;
-    delete config_;
-  }
-
-protected:
-  Engine::Storage *storage_;
-  Config *config_ = nullptr;
-  Slice key_;
-  std::vector<Slice> fields_;
-  std::vector<Slice> values_;
-};
-#endif //KVROCKS_TEST_BASE_H
diff --git a/tools/kvrocks2redis/config.cc b/tools/kvrocks2redis/config.cc
deleted file mode 100644
index 4020f65..0000000
--- a/tools/kvrocks2redis/config.cc
+++ /dev/null
@@ -1,109 +0,0 @@
-#include "config.h"
-#include <fcntl.h>
-#include <string.h>
-#include <strings.h>
-#include <glog/logging.h>
-#include <rocksdb/env.h>
-
-#include <fstream>
-#include <iostream>
-#include <sstream>
-#include <vector>
-
-#include "../../src/util.h"
-#include "../../src/status.h"
-#include "../../src/config.h"
-
-namespace Kvrocks2redis {
-
-static const char *kLogLevels[] = {"info", "warning", "error", "fatal"};
-static const size_t kNumLogLevel = sizeof(kLogLevels) / sizeof(kLogLevels[0]);
-
-int Config::yesnotoi(std::string input) {
-  if (strcasecmp(input.data(), "yes") == 0) {
-    return 1;
-  } else if (strcasecmp(input.data(), "no") == 0) {
-    return 0;
-  }
-  return -1;
-}
-
-Status Config::parseConfigFromString(std::string input) {
-  std::vector<std::string> args;
-  Util::Split(input, " \t\r\n", &args);
-  // omit empty line and comment
-  if (args.empty() || args[0].front() == '#') return Status::OK();
-
-  size_t size = args.size();
-  if (size == 2 && args[0] == "daemonize") {
-    int i;
-    if ((i = yesnotoi(args[1])) == -1) {
-      return Status(Status::NotOK, "argument must be 'yes' or 'no'");
-    }
-    daemonize = (i == 1);
-  } else if (size == 2 && args[0] == "dir") {
-    dir = args[1];
-    db_dir = dir + "/db";
-    next_seq_file_path = dir + "/last_next_seq.txt";
-  } else if (size == 2 && args[0] == "loglevel") {
-    for (size_t i = 0; i < kNumLogLevel; i++) {
-      if (Util::ToLower(args[1]) == kLogLevels[i]) {
-        loglevel = static_cast<int>(i);
-        break;
-      }
-    }
-  } else if (size >= 3 && args[0] == "kvrocks") {
-    kvrocks_host = args[1];
-    // we use port + 1 as repl port, so incr the kvrocks port here
-    kvrocks_port = std::stoi(args[2]) + 1;
-    if (kvrocks_port <= 0 || kvrocks_port >= 65535) {
-      return Status(Status::NotOK, "kvrocks port range should be between 0 and 65535");
-    }
-    if (size == 4) {
-      kvrocks_auth = args[3];
-    }
-  } else if (size >= 3 && !strncasecmp(args[0].data(), "namespace.", 10)) {
-    std::string ns = args[0].substr(10, args.size() - 10);
-    if (ns.size() > INT8_MAX) {
-      return Status(Status::NotOK, std::string("namespace size exceed limit ") + std::to_string(INT8_MAX));
-    }
-    tokens[ns].host = args[1];
-    tokens[ns].port = std::stoi(args[2]);
-    if (size == 4) {
-      tokens[ns].auth = args[3];
-    }
-  } else {
-    return Status(Status::NotOK, "Bad directive or wrong number of arguments");
-  }
-  return Status::OK();
-}
-
-Status Config::Load(std::string path) {
-  path_ = std::move(path);
-  std::ifstream file(path_);
-  if (!file.is_open()) {
-    return Status(Status::NotOK, strerror(errno));
-  }
-
-  std::string line;
-  int line_num = 1;
-  while (!file.eof()) {
-    std::getline(file, line);
-    line = Util::ToLower(line);
-    Status s = parseConfigFromString(line);
-    if (!s.IsOK()) {
-      file.close();
-      return Status(Status::NotOK, "at line: #L" + std::to_string(line_num) + ", err: " + s.Msg());
-    }
-    line_num++;
-  }
-
-  auto s = rocksdb::Env::Default()->CreateDirIfMissing(dir);
-  if (!s.ok()) return Status(Status::NotOK, s.ToString());
-
-  file.close();
-  return Status::OK();
-}
-
-}  // namespace Kvrocks2redis
-
diff --git a/tools/kvrocks2redis/config.h b/tools/kvrocks2redis/config.h
deleted file mode 100644
index f66ea14..0000000
--- a/tools/kvrocks2redis/config.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#pragma once
-
-#include <string>
-#include <map>
-#include <vector>
-
-#include "../../src/status.h"
-
-namespace Kvrocks2redis {
-
-struct redis_server {
-  std::string host;
-  uint32_t port;
-  std::string auth;
-};
-struct Config {
- public:
-  int loglevel = 0;
-  bool daemonize = false;
-
-  std::string dir = "/tmp/ev";
-  std::string db_dir = dir + "/db";
-  std::string aof_file_name = "appendonly.aof";
-  std::string next_offset_file_name = "last_next_offset.txt";
-  std::string next_seq_file_path = dir + "/last_next_seq.txt";
-
-  std::string kvrocks_auth;
-  std::string kvrocks_host;
-  int kvrocks_port = 0;
-  std::map<std::string, redis_server> tokens;
-
- public:
-  Status Load(std::string path);
-  Config() = default;
-  ~Config() = default;
-
- private:
-  std::string path_;
-  int yesnotoi(std::string input);
-  Status parseConfigFromString(std::string input);
-};
-
-}  // namespace Kvrocks2redis
-
diff --git a/tools/kvrocks2redis/main.cc b/tools/kvrocks2redis/main.cc
deleted file mode 100644
index b23456c..0000000
--- a/tools/kvrocks2redis/main.cc
+++ /dev/null
@@ -1,158 +0,0 @@
-#include <getopt.h>
-#include <event2/thread.h>
-#include <gflags/gflags.h>
-#include <glog/logging.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <csignal>
-
-#include "../../src/config.h"
-#include "../../src/worker.h"
-#include "../../src/storage.h"
-#include "../../src/server.h"
-#include "../../src/util.h"
-
-#include "sync.h"
-#include "redis_writer.h"
-#include "parser.h"
-#include "config.h"
-#include "version.h"
-
-const char *kDefaultConfPath = "../kvrocks2redis.conf";
-const char *kDefaultPidPath = "/var/run/kvrocks2redis.pid";
-
-std::function<void()> hup_handler;
-
-struct Options {
-  std::string conf_file = kDefaultConfPath;
-  std::string pid_file = kDefaultPidPath;
-  bool show_usage = false;
-};
-
-extern "C" void signal_handler(int sig) {
-  if (hup_handler) hup_handler();
-}
-
-static void usage(const char *program) {
-  std::cout << program << " sync kvrocks to redis\n"
-            << "\t-c config file, default is " << kDefaultConfPath << "\n"
-            << "\t-p pid file, default is " << kDefaultPidPath << "\n"
-            << "\t-h help\n";
-  exit(0);
-}
-
-static Options parseCommandLineOptions(int argc, char **argv) {
-  int ch;
-  Options opts;
-  while ((ch = ::getopt(argc, argv, "c:p:hv")) != -1) {
-    switch (ch) {
-      case 'c': opts.conf_file = optarg;
-        break;
-      case 'p': opts.pid_file = optarg;
-        break;
-      case 'h': opts.show_usage = true;
-        break;
-      case 'v': exit(0);
-      default: usage(argv[0]);
-    }
-  }
-  return opts;
-}
-
-static void initGoogleLog(const Kvrocks2redis::Config *config) {
-  FLAGS_minloglevel = config->loglevel;
-  FLAGS_max_log_size = 100;
-  FLAGS_logbufsecs = 0;
-  FLAGS_log_dir = config->dir;
-}
-
-static Status createPidFile(const std::string &path) {
-  int fd = open(path.data(), O_RDWR | O_CREAT | O_EXCL, 0660);
-  if (fd < 0) {
-    return Status(Status::NotOK, strerror(errno));
-  }
-  std::string pid_str = std::to_string(getpid());
-  write(fd, pid_str.data(), pid_str.size());
-  close(fd);
-  return Status::OK();
-}
-
-static void removePidFile(const std::string &path) {
-  std::remove(path.data());
-}
-
-static void daemonize() {
-  pid_t pid;
-
-  pid = fork();
-  if (pid < 0) {
-    LOG(ERROR) << "Failed to fork the process, err: " << strerror(errno);
-    exit(1);
-  }
-  if (pid > 0) exit(EXIT_SUCCESS);  // parent process
-  // change the file mode
-  umask(0);
-  if (setsid() < 0) {
-    LOG(ERROR) << "Failed to setsid, err: %s" << strerror(errno);
-    exit(1);
-  }
-  close(STDIN_FILENO);
-  close(STDOUT_FILENO);
-  close(STDERR_FILENO);
-}
-
-int main(int argc, char *argv[]) {
-  google::InitGoogleLogging("kvrocks2redis");
-  gflags::SetUsageMessage("kvrocks2redis");
-  evthread_use_pthreads();
-
-  signal(SIGPIPE, SIG_IGN);
-  signal(SIGINT, signal_handler);
-  signal(SIGTERM, signal_handler);
-
-  std::cout << "Version: " << VERSION << " @" << GIT_COMMIT << std::endl;
-  auto opts = parseCommandLineOptions(argc, argv);
-  if (opts.show_usage) usage(argv[0]);
-  std::string config_file_path = std::move(opts.conf_file);
-
-  Kvrocks2redis::Config config;
-  Status s = config.Load(config_file_path);
-  if (!s.IsOK()) {
-    std::cout << "Failed to load config, err: " << s.Msg() << std::endl;
-    exit(1);
-  }
-  initGoogleLog(&config);
-
-  if (config.daemonize) daemonize();
-  s = createPidFile(opts.pid_file);
-  if (!s.IsOK()) {
-    LOG(ERROR) << "Failed to create pidfile: " << s.Msg();
-    exit(1);
-  }
-
-  Config kvrocks_config;
-  kvrocks_config.db_dir = config.db_dir;
-
-  Engine::Storage storage(&kvrocks_config);
-  s = storage.OpenForReadOnly();
-  if (!s.IsOK()) {
-    LOG(ERROR) << "Failed to open: " << s.Msg();
-    exit(1);
-  }
-
-  Server srv(&storage, &kvrocks_config);
-
-  RedisWriter writer(&config);
-  Parser parser(&storage, &writer);
-
-  Sync sync(&srv, &writer, &parser, &config);
-  hup_handler = [&sync, &opts]() {
-    if (!sync.IsStopped()) {
-      LOG(INFO) << "Bye Bye";
-      sync.Stop();
-      removePidFile(opts.pid_file);
-    }
-  };
-  sync.Start();
-  return 0;
-}
diff --git a/tools/kvrocks2redis/parser.cc b/tools/kvrocks2redis/parser.cc
deleted file mode 100644
index 8f46620..0000000
--- a/tools/kvrocks2redis/parser.cc
+++ /dev/null
@@ -1,309 +0,0 @@
-#include <glog/logging.h>
-
-#include <rocksdb/write_batch.h>
-
-#include "../../src/redis_bitmap.h"
-#include "parser.h"
-#include "util.h"
-
-Status Parser::ParseFullDB() {
-  rocksdb::DB *db_ = storage_->GetDB();
-  if (!lastest_snapshot_) lastest_snapshot_ = new LatestSnapShot(db_);
-  rocksdb::ColumnFamilyHandle *metadata_cf_handle_ = storage_->GetCFHandle("metadata");
-
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = lastest_snapshot_->GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options, metadata_cf_handle_);
-  for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
-    Metadata metadata(kRedisNone);
-    metadata.Decode(iter->value().ToString());
-    if (metadata.Expired()) {  // ignore the expired key
-      continue;
-    }
-    Status s;
-    if (metadata.Type() == kRedisString) {
-      s = parseSimpleKV(iter->key(), iter->value(), metadata.expire);
-    } else {
-      s = parseComplexKV(iter->key(), metadata);
-    }
-    if (!s.IsOK()) return s;
-  }
-  delete iter;
-  delete lastest_snapshot_;
-  lastest_snapshot_ = nullptr;
-
-  return Status::OK();
-}
-
-Status Parser::parseSimpleKV(const Slice &ns_key, const Slice &value, int expire) {
-  std::string op, ns, user_key;
-  ExtractNamespaceKey(ns_key, &ns, &user_key);
-  std::string output;
-  output = Rocksdb2Redis::Command2RESP(
-      {"SET", user_key, value.ToString().substr(5, value.size() - 5)});
-  Status s = writer_->Write(ns, {output});
-  if (!s.IsOK()) return s;
-
-  if (expire > 0) {
-    output = Rocksdb2Redis::Command2RESP({"EXPIREAT", user_key, std::to_string(expire)});
-    s = writer_->Write(ns, {output});
-  }
-  return s;
-}
-
-Status Parser::parseComplexKV(const Slice &ns_key, const Metadata &metadata) {
-  RedisType type = metadata.Type();
-  if (type < kRedisHash || type > kRedisBitmap) {
-    return Status(Status::NotOK, "unknown metadata type: " + std::to_string(type));
-  }
-
-  std::string ns, prefix_key, user_key, sub_key, value, output;
-  ExtractNamespaceKey(ns_key, &ns, &user_key);
-  InternalKey(ns_key, "", metadata.version).Encode(&prefix_key);
-
-  rocksdb::DB *db_ = storage_->GetDB();
-  rocksdb::ReadOptions read_options;
-  read_options.snapshot = lastest_snapshot_->GetSnapShot();
-  read_options.fill_cache = false;
-  auto iter = db_->NewIterator(read_options);
-  for (iter->Seek(prefix_key); iter->Valid(); iter->Next()) {
-    if (!iter->key().starts_with(prefix_key)) {
-      break;
-    }
-    Status s;
-    InternalKey ikey(iter->key());
-    sub_key = ikey.GetSubKey().ToString();
-    value = iter->value().ToString();
-    switch (type) {
-      case kRedisHash:output = Rocksdb2Redis::Command2RESP({"HSET", user_key, sub_key, value});
-        break;
-      case kRedisSet:output = Rocksdb2Redis::Command2RESP({"SADD", user_key, sub_key});
-        break;
-      case kRedisList:output = Rocksdb2Redis::Command2RESP({"RPUSH", user_key, value});
-        break;
-      case kRedisZSet: {
-        double score = DecodeDouble(value.data());
-        output = Rocksdb2Redis::Command2RESP({"ZADD", user_key, std::to_string(score), sub_key});
-        break;
-      }
-      case kRedisBitmap: {
-        int index = std::stoi(sub_key);
-        s = Parser::parseBitmapSegment(ns, user_key, index, value);
-        break;
-      }
-      default:break;  // should never get here
-    }
-    if (type != kRedisBitmap) {
-      s = writer_->Write(ns, {output});
-    }
-    if (!s.IsOK()) return s;
-  }
-
-  if (metadata.expire > 0) {
-    output = Rocksdb2Redis::Command2RESP({"EXPIREAT", user_key, std::to_string(metadata.expire)});
-    Status s = writer_->Write(ns, {output});
-    if (!s.IsOK()) return s;
-  }
-
-  delete iter;
-  return Status::OK();
-}
-
-Status Parser::parseBitmapSegment(const Slice &ns, const Slice &user_key, int index, const Slice &bitmap) {
-  Status s;
-  for (size_t i = 0; i < bitmap.size(); i++) {
-    if (bitmap[i] == 0) continue;  // ignore zero byte
-    for (int j = 0; j < 8; j++) {
-      if (!(bitmap[i] & (1 << j))) continue;  // ignore zero bit
-      s = writer_->Write(ns.ToString(), {Rocksdb2Redis::Command2RESP(
-          {"SETBIT", user_key.ToString(), std::to_string(index * 8 + i * 8 + j), "1"})
-      });
-      if (!s.IsOK()) return s;
-    }
-  }
-  return Status::OK();
-}
-
-rocksdb::Status Parser::ParseWriteBatch(const std::string &batch_string) {
-  rocksdb::WriteBatch write_batch(batch_string);
-  WriteBatchExtractor write_batch_extractor;
-  rocksdb::Status status;
-
-  status = write_batch.Iterate(&write_batch_extractor);
-  if (!status.ok()) return status;
-  auto aof_strings = write_batch_extractor.GetAofStrings();
-  for (const auto &iter : *aof_strings) {
-    auto s = writer_->Write(iter.first, iter.second);
-    if (!s.IsOK()) {
-      LOG(ERROR) << "[kvrocks2redis] Failed to parse WriteBatch, encounter error: " << s.Msg();
-    }
-  }
-  return rocksdb::Status::OK();
-}
-
-void WriteBatchExtractor::LogData(const rocksdb::Slice &blob) {
-  log_data_.Decode(blob);
-}
-
-rocksdb::Status WriteBatchExtractor::PutCF(uint32_t column_family_id, const Slice &key,
-                                           const Slice &value) {
-  if (column_family_id == kColumnFamilyIDZSetScore) {
-    return rocksdb::Status::OK();
-  }
-
-  std::string ns, user_key, sub_key;
-  std::vector<std::string> command_args;
-  if (column_family_id == kColumnFamilyIDMetadata) {
-    ExtractNamespaceKey(key, &ns, &user_key);
-    Metadata metadata(kRedisNone);
-    metadata.Decode(value.ToString());
-    if (metadata.Type() == kRedisString) {
-      command_args = {"SET", user_key, value.ToString().substr(5, value.size() - 5)};
-      aof_strings_[ns].emplace_back(Rocksdb2Redis::Command2RESP(command_args));
-      if (metadata.expire > 0) {
-        command_args = {"EXPIREAT", user_key, std::to_string(metadata.expire)};
-        aof_strings_[ns].emplace_back(Rocksdb2Redis::Command2RESP(command_args));
-      }
-    } else if (metadata.expire > 0) {
-      auto args = log_data_.GetArguments();
-      if (args->size() > 0) {
-        RedisCommand cmd = static_cast<RedisCommand >(std::stoi((*args)[0]));
-        if (cmd == kRedisCmdExpire) {
-          command_args = {"EXPIREAT", user_key, std::to_string(metadata.expire)};
-          aof_strings_[ns].emplace_back(Rocksdb2Redis::Command2RESP(command_args));
-        }
-      }
-    }
-
-    return rocksdb::Status::OK();
-  }
-
-  if (column_family_id == kColumnFamilyIDDefault) {
-    InternalKey ikey(key);
-    user_key = ikey.GetKey().ToString();
-    sub_key = ikey.GetSubKey().ToString();
-    ns = ikey.GetNamespace().ToString();
-    switch (log_data_.GetRedisType()) {
-      case kRedisHash:command_args = {"HSET", user_key, sub_key, value.ToString()};
-        break;
-      case kRedisList: {
-        auto args = log_data_.GetArguments();
-        if (args->size() < 1) {
-          LOG(ERROR) << "Fail to parse write_batch in putcf type list : args error ,should at least contain cmd";
-          return rocksdb::Status::OK();
-        }
-        RedisCommand cmd = static_cast<RedisCommand >(std::stoi((*args)[0]));
-        switch (cmd) {
-          case kRedisCmdLSet:
-            if (args->size() < 2) {
-              LOG(ERROR) << "Fail to parse write_batch in putcf cmd lset : args error ,should contain lset index";
-              return rocksdb::Status::OK();
-            }
-            command_args = {"LSET", user_key, (*args)[1], value.ToString()};
-            break;
-          case kRedisCmdLInsert:
-            if (firstSeen_) {
-              if (args->size() < 4) {
-                LOG(ERROR)
-                    << "Fail to parse write_batch in putcf cmd linsert : args error ,should contain before pivot value ";
-                return rocksdb::Status::OK();
-              }
-              command_args = {"LINSERT", user_key, (*args)[1] == "1" ? "before" : "after", (*args)[2], (*args)[3]};
-              firstSeen_ = false;
-            }
-            break;
-          default:command_args = {cmd == kRedisCmdLPush ? "LPUSH" : "RPUSH", user_key, value.ToString()};
-        }
-        break;
-      }
-      case kRedisSet:command_args = {"SADD", user_key, sub_key};
-        break;
-      case kRedisZSet: {
-        double score = DecodeDouble(value.data());
-        command_args = {"ZADD", user_key, std::to_string(score), sub_key};
-        break;
-      }
-      case kRedisBitmap: {
-        auto args = log_data_.GetArguments();
-        if (args->size() < 1) {
-          LOG(ERROR) << "Fail to parse write_batch in putcf cmd setbit : args error ,should contain setbit offset";
-          return rocksdb::Status::OK();
-        }
-        bool bit_value = Redis::Bitmap::GetBitFromValueAndOffset(value.ToString(), std::stoi((*args)[0]));
-        command_args = {"SETBIT", user_key, (*args)[0], bit_value ? "1" : "0"};
-        break;
-      }
-      default: break;
-    }
-  }
-
-  if (!command_args.empty()) {
-    aof_strings_[ns].emplace_back(Rocksdb2Redis::Command2RESP(command_args));
-  }
-  return rocksdb::Status::OK();
-}
-
-rocksdb::Status WriteBatchExtractor::DeleteCF(uint32_t column_family_id, const Slice &key) {
-  if (column_family_id == kColumnFamilyIDZSetScore) {
-    return rocksdb::Status::OK();
-  }
-
-  std::string ns, user_key, sub_key;
-  std::vector<std::string> command_args;
-  if (column_family_id == kColumnFamilyIDMetadata) {
-    ExtractNamespaceKey(key, &ns, &user_key);
-    command_args = {"DEL", user_key};
-  } else if (column_family_id == kColumnFamilyIDDefault) {
-    InternalKey ikey(key);
-    user_key = ikey.GetKey().ToString();
-    sub_key = ikey.GetSubKey().ToString();
-    ns = ikey.GetNamespace().ToString();
-    switch (log_data_.GetRedisType()) {
-      case kRedisHash: command_args = {"HDEL", user_key, sub_key};
-        break;
-      case kRedisSet: command_args = {"SREM", user_key, sub_key};
-        break;
-      case kRedisZSet: command_args = {"ZREM", user_key, sub_key};
-        break;
-      case kRedisList: {
-        auto args = log_data_.GetArguments();
-        if (args->size() < 1) {
-          LOG(ERROR) << "Fail to parse write_batch in DeleteCF type list : args error ,should contain cmd";
-          return rocksdb::Status::OK();
-        }
-        RedisCommand cmd = static_cast<RedisCommand >(std::stoi((*args)[0]));
-        switch (cmd) {
-          case kRedisCmdLTrim:
-            if (firstSeen_) {
-              if (args->size() < 3) {
-                LOG(ERROR) << "Fail to parse write_batch in DeleteCF cmd ltrim : args error ,should contain start,stop";
-                return rocksdb::Status::OK();
-              }
-              command_args = {"LTRIM", user_key, (*args)[1], (*args)[2]};
-              firstSeen_ = false;
-            }
-            break;
-          case kRedisCmdLRem:
-            if (firstSeen_) {
-              if (args->size() < 3) {
-                LOG(ERROR) << "Fail to parse write_batch in DeleteCF cmd lrem : args error ,should contain count,value";
-                return rocksdb::Status::OK();
-              }
-              command_args = {"LREM", user_key, (*args)[1], (*args)[2]};
-              firstSeen_ = false;
-            }
-            break;
-          default:command_args = {cmd == kRedisCmdLPop ? "LPOP" : "RPOP", user_key};
-        }
-        break;
-      }
-      default: break;
-    }
-  }
-
-  if (!command_args.empty()) {
-    aof_strings_[ns].emplace_back(Rocksdb2Redis::Command2RESP(command_args));
-  }
-  return rocksdb::Status::OK();
-}
diff --git a/tools/kvrocks2redis/parser.h b/tools/kvrocks2redis/parser.h
deleted file mode 100644
index 9593780..0000000
--- a/tools/kvrocks2redis/parser.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#pragma once
-
-#include <string>
-#include <map>
-#include <vector>
-
-#include "../../src/redis_db.h"
-#include "../../src/status.h"
-#include "../../src/storage.h"
-#include "../../src/redis_metadata.h"
-
-#include "config.h"
-#include "writer.h"
-
-class LatestSnapShot {
- public:
-  explicit LatestSnapShot(rocksdb::DB *db) : db_(db) {
-    snapshot_ = db_->GetSnapshot();
-  }
-  ~LatestSnapShot() {
-    db_->ReleaseSnapshot(snapshot_);
-  }
-  const rocksdb::Snapshot *GetSnapShot() { return snapshot_; }
- private:
-  rocksdb::DB *db_ = nullptr;
-  const rocksdb::Snapshot *snapshot_ = nullptr;
-};
-
-class Parser {
- public:
-  explicit Parser(Engine::Storage *storage, Writer *writer)
-      : storage_(storage), writer_(writer) {
-    lastest_snapshot_ = new LatestSnapShot(storage->GetDB());
-  }
-  ~Parser() { delete lastest_snapshot_; }
-  Status ParseFullDB();
-  rocksdb::Status ParseWriteBatch(const std::string &batch_string);
-
- protected:
-  Engine::Storage *storage_ = nullptr;
-  Writer *writer_ = nullptr;
-  LatestSnapShot *lastest_snapshot_ = nullptr;
-
-  Status parseSimpleKV(const Slice &ns_key, const Slice &value, int expire);
-  Status parseComplexKV(const Slice &ns_key, const Metadata &metadata);
-  Status parseBitmapSegment(const Slice &ns, const Slice &user_key, int index, const Slice &bitmap);
-};
-
-/*
- * An extractor to extract update from raw writebatch
- */
-class WriteBatchExtractor : public rocksdb::WriteBatch::Handler {
- public:
-  void LogData(const rocksdb::Slice &blob) override;
-  rocksdb::Status PutCF(uint32_t column_family_id, const Slice &key,
-                        const Slice &value) override;
-
-  rocksdb::Status DeleteCF(uint32_t column_family_id, const Slice &key) override;
-  std::map<std::string, std::vector<std::string>> *GetAofStrings() { return &aof_strings_; }
- private:
-  std::map<std::string, std::vector<std::string>> aof_strings_;
-  Redis::WriteBatchLogData log_data_;
-  bool firstSeen_ = true;
-};
diff --git a/tools/kvrocks2redis/redis_writer.cc b/tools/kvrocks2redis/redis_writer.cc
deleted file mode 100644
index 7d636c5..0000000
--- a/tools/kvrocks2redis/redis_writer.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-#include "redis_writer.h"
-#include <fcntl.h>
-#include <unistd.h>
-#include <assert.h>
-#include <system_error>
-
-#include "../../src/util.h"
-#include "../../src/redis_reply.h"
-
-#include "util.h"
-
-RedisWriter::RedisWriter(Kvrocks2redis::Config *config) : Writer(config) {
-  try {
-    t_ = std::thread([this]() {
-      Util::ThreadSetName("redis-writer");
-      this->sync();
-      assert(stop_flag_);
-    });
-  } catch (const std::system_error &e) {
-    LOG(ERROR) << "[kvrocks2redis] Failed to create thread: " << e.what();
-    return;
-  }
-}
-
-RedisWriter::~RedisWriter() {
-  for (const auto &iter : next_offset_fds_) {
-    close(iter.second);
-  }
-  for (const auto &iter : redis_fds_) {
-    close(iter.second);
-  }
-}
-
-Status RedisWriter::Write(const std::string &ns, const std::vector<std::string> &aofs) {
-  auto s = Writer::Write(ns, aofs);
-  if (!s.IsOK()) {
-    return s;
-  }
-
-  return Status::OK();
-
-}
-
-Status RedisWriter::FlushAll(const std::string &ns) {
-  auto s = Writer::FlushAll(ns);
-  if (!s.IsOK()) {
-    return s;
-  }
-
-  updateNextOffset(ns, 0);
-
-  //Warning: this will flush all redis data
-  s = Write(ns, {Rocksdb2Redis::Command2RESP({"FLUSHALL"})});
-  if (!s.IsOK()) return s;
-
-  return Status::OK();
-}
-
-void RedisWriter::Stop() {
-  if (stop_flag_) return;
-
-  stop_flag_ = true;  // Stopping procedure is asynchronous,
-
-  t_.join();
-  // handled by sync func
-  LOG(INFO) << "[kvrocks2redis] redis_writer Stopped";
-}
-
-void RedisWriter::sync() {
-  for (const auto &iter : config_->tokens) {
-    Status s = readNextOffsetFromFile(iter.first, &next_offsets_[iter.first]);
-    if (!s.IsOK()) {
-      LOG(ERROR) << s.Msg();
-      return;
-    }
-  }
-
-  size_t chunk_size = 4 * 1024 * 1024;
-  char *buffer = new char[chunk_size];
-  while (!stop_flag_) {
-    for (const auto &iter : config_->tokens) {
-      Status s = GetAofFd(iter.first);
-      if (!s.IsOK()) {
-        LOG(ERROR) << s.Msg();
-        continue;
-      }
-
-      s = getRedisConn(iter.first, iter.second.host, iter.second.port, iter.second.auth);
-      if (!s.IsOK()) {
-        LOG(ERROR) << s.Msg();
-        continue;
-      }
-
-      while (true) {
-        auto getted_line_leng = pread(aof_fds_[iter.first], buffer, chunk_size, next_offsets_[iter.first]);
-        if (getted_line_leng <= 0) {
-          if (getted_line_leng < 0 ){
-            LOG(ERROR) << "ERR read aof file : " << strerror(errno);
-          }
-          break;
-        }
-
-        s = Util::SockSend(redis_fds_[iter.first], std::string(buffer, getted_line_leng));
-        if (!s.IsOK()) {
-          LOG(ERROR) << "ERR send data to redis err: " + s.Msg();
-        }
-
-        updateNextOffset(iter.first, next_offsets_[iter.first] + getted_line_leng);
-      }
-
-      std::this_thread::sleep_for(std::chrono::milliseconds(1));
-    }
-
-  }
-
-  delete[] buffer;
-}
-
-Status RedisWriter::getRedisConn(const std::string &ns,
-                                 const std::string &host,
-                                 const uint32_t &port,
-                                 const std::string &auth) {
-  auto iter = redis_fds_.find(ns);
-  if (iter == redis_fds_.end()) {
-    auto s = Util::SockConnect(host, port, &redis_fds_[ns]);
-    if (!s.IsOK()) {
-      return Status(Status::NotOK, std::string("Failed to connect to redis :") + s.Msg());
-    }
-
-    if (!auth.empty()) {
-      auto s = authRedis(ns, auth);
-      if (!s.IsOK()) {
-        close(redis_fds_[ns]);
-        redis_fds_.erase(ns);
-        return Status(Status::NotOK, s.Msg());
-      }
-    }
-  }
-
-  return Status::OK();
-}
-
-Status RedisWriter::authRedis(const std::string &ns, const std::string &auth) {
-  const auto auth_len_str = std::to_string(auth.length());
-  Util::SockSend(redis_fds_[ns], "*2" CRLF "$4" CRLF "auth" CRLF "$" + auth_len_str + CRLF +
-      auth + CRLF);
-  LOG(INFO) << "[kvrocks2redis] Auth request was sent, waiting for response";
-
-  size_t line_len;
-  evbuffer *evbuf = evbuffer_new();
-  // Read auth response
-  while (true) {
-    if (evbuffer_read(evbuf, redis_fds_[ns], -1) <= 0) {
-      evbuffer_free(evbuf);
-      return Status(Status::NotOK, std::string("read auth response err: ") + strerror(errno));
-    }
-    char *line = evbuffer_readln(evbuf, &line_len, EVBUFFER_EOL_CRLF_STRICT);
-    if (!line) continue;
-    if (strncmp(line, "+OK", 3) != 0) {
-      // Auth failed
-      free(line);
-      evbuffer_free(evbuf);
-      return Status(Status::NotOK, "[kvrocks2redis] Auth failed: " + std::string(line, line_len));;
-    }
-    free(line);
-    break;
-  }
-
-  evbuffer_free(evbuf);
-
-  return Status::OK();
-}
-
-Status RedisWriter::updateNextOffset(const std::string &ns, std::istream::off_type offset) {
-  next_offsets_[ns] = offset;
-  return writeNextOffsetToFile(ns, offset);
-}
-
-Status RedisWriter::readNextOffsetFromFile(const std::string &ns, std::istream::off_type *offset) {
-  next_offset_fds_[ns] = open(getNextOffsetFilePath(ns).data(), O_RDWR | O_CREAT, 0666);
-  if (next_offset_fds_[ns] < 0) {
-    return Status(Status::NotOK, std::string("Failed to open next offset file :") + strerror(errno));
-  }
-
-  *offset = 0;
-  // 256 + 1 byte, extra one byte for the ending \0
-  char buf[257];
-  memset(buf, '\0', sizeof(buf));
-  if (read(next_offset_fds_[ns], buf, sizeof(buf)) > 0) {
-    *offset = std::stoll(buf);
-  }
-
-  return Status::OK();
-}
-
-Status RedisWriter::writeNextOffsetToFile(const std::string &ns, std::istream::off_type offset) {
-  std::string offset_string = std::to_string(offset);
-  // append to 256 byte (overwrite entire first 21 byte, aka the largest SequenceNumber size )
-  int append_byte = 256 - offset_string.size();
-  while (append_byte-- > 0) {
-    offset_string += " ";
-  }
-  offset_string += '\0';
-  pwrite(next_offset_fds_[ns], offset_string.data(), offset_string.size(), 0);
-  return Status::OK();
-}
-
-std::string RedisWriter::getNextOffsetFilePath(const std::string &ns) {
-  return config_->dir + "/" + ns + "_" + config_->next_offset_file_name;
-}
diff --git a/tools/kvrocks2redis/redis_writer.h b/tools/kvrocks2redis/redis_writer.h
deleted file mode 100644
index 9f4b0a7..0000000
--- a/tools/kvrocks2redis/redis_writer.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#pragma once
-
-#include <glog/logging.h>
-#include <string>
-#include <vector>
-#include <thread>
-
-#include "writer.h"
-
-class RedisWriter : public Writer {
- public:
-  explicit RedisWriter(Kvrocks2redis::Config *config);
-  ~RedisWriter();
-  Status Write(const std::string &ns, const std::vector<std::string> &aofs) override;
-  Status FlushAll(const std::string &ns) override;
-
-  void Stop() override;
-
- private:
-  std::thread t_;
-  bool stop_flag_ = false;
-  std::map<std::string, int> next_offset_fds_;
-  std::map<std::string, std::istream::off_type> next_offsets_;
-  std::map<std::string, int> redis_fds_;
-
-  void sync();
-  Status getRedisConn(const std::string &ns, const std::string &host, const uint32_t &port, const std::string &auth);
-  Status authRedis(const std::string &ns, const std::string &auth);
-
-  Status updateNextOffset(const std::string &ns, std::istream::off_type offset);
-  Status readNextOffsetFromFile(const std::string &ns, std::istream::off_type *offset);
-  Status writeNextOffsetToFile(const std::string &ns, std::istream::off_type offset);
-  std::string getNextOffsetFilePath(const std::string &ns);
-};
diff --git a/tools/kvrocks2redis/sync.cc b/tools/kvrocks2redis/sync.cc
deleted file mode 100644
index cef9198..0000000
--- a/tools/kvrocks2redis/sync.cc
+++ /dev/null
@@ -1,234 +0,0 @@
-#include "sync.h"
-#include <event2/buffer.h>
-#include <event2/bufferevent.h>
-#include <event2/event.h>
-#include <glog/logging.h>
-#include <rocksdb/write_batch.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <string>
-#include <fstream>
-
-#include "../../src/redis_reply.h"
-
-#include "util.h"
-
-void send_string_to_event(bufferevent *bev, const std::string &data) {
-  auto output = bufferevent_get_output(bev);
-  evbuffer_add(output, data.c_str(), data.length());
-}
-
-Sync::Sync(Server *srv, Writer *writer, Parser *parser, Kvrocks2redis::Config *config)
-    : ReplicationThread(config->kvrocks_host, config->kvrocks_port, srv, config->kvrocks_auth),
-      storage_(srv->storage_),
-      writer_(writer),
-      parser_(parser),
-      config_(config),
-      sync_state_(kReplConnecting),
-      psync_steps_(this,
-                   CallbacksStateMachine::CallbackList{
-                       CallbacksStateMachine::CallbackType{
-                           CallbacksStateMachine::WRITE, "psync write", tryPSyncWriteCB
-                       },
-                       CallbacksStateMachine::CallbackType{
-                           CallbacksStateMachine::READ, "psync read", tryPSyncReadCB
-                       },
-                       CallbacksStateMachine::CallbackType{
-                           CallbacksStateMachine::READ, "batch loop", incrementBatchLoopCB
-                       }
-                   }) {
-}
-/*
- * Run connect to kvrocks, and start the following steps
- * asynchronously
- *  - TryPsync
- *  - - if ok, IncrementBatchLoop
- *  - - not, parseAllLocalStorage and restart TryPsync when done
- */
-void Sync::Start() {
-  auto s = readNextSeqFromFile(&next_seq_);
-  if (!s.IsOK()) {
-    LOG(ERROR) << s.Msg();
-    return;
-  }
-
-  base_ = event_base_new();
-  if (base_ == nullptr) {
-    LOG(ERROR) << "[kvrocks2redis] Failed to create new ev base";
-    return;
-  }
-
-  LOG(INFO) << "[kvrocks2redis] Start sync the data from kvrocks to redis";
-
-  psync_steps_.Start();
-
-  auto timer = event_new(base_, -1, EV_PERSIST, EventTimerCB, this);
-  timeval tmo{1, 0};  // 1 sec
-  evtimer_add(timer, &tmo);
-
-  event_base_dispatch(base_);
-  event_free(timer);
-  event_base_free(base_);
-}
-
-void Sync::Stop() {
-  if (stop_flag_) return;
-
-  stop_flag_ = true;  // Stopping procedure is asynchronous,
-  // handled by timer
-  LOG(INFO) << "[kvrocks2redis] Stopped";
-}
-
-Sync::CBState Sync::tryPSyncWriteCB(
-    bufferevent *bev, void *ctx) {
-  auto self = static_cast<Sync *>(ctx);
-
-  const auto seq_str = std::to_string(self->next_seq_);
-  const auto seq_len_str = std::to_string(seq_str.length());
-  const auto cmd_str = "*2" CRLF "$5" CRLF "PSYNC" CRLF "$" + seq_len_str +
-      CRLF + seq_str + CRLF;
-  send_string_to_event(bev, cmd_str);
-  self->sync_state_ = kReplSendPSync;
-  LOG(INFO) << "[kvrocks2redis] Try to use psync, next seq: " << self->next_seq_;
-  return CBState::NEXT;
-}
-
-Sync::CBState Sync::tryPSyncReadCB(bufferevent *bev,
-                                   void *ctx) {
-  char *line;
-  size_t line_len;
-  auto self = static_cast<Sync *>(ctx);
-  auto input = bufferevent_get_input(bev);
-  line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
-  if (!line) return CBState::AGAIN;
-
-  if (strncmp(line, "+OK", 3) != 0) {
-    if (self->next_seq_ > 0) {
-      // Ooops, Failed to psync , sync process has been terminated, administrator should be notified
-      // when full sync is needed, please remove last_next_seq config file, and restart kvrocks2redis
-      LOG(ERROR) << "[kvrocks2redis] CRITICAL - Failed to psync , administrator confirm needed : ";
-      self->stop_flag_ = true;
-      return CBState::QUIT;
-    }
-    // PSYNC isn't OK, we should use parseAllLocalStorage
-    // Switch to parseAllLocalStorage
-    self->parseKVFromLocalStorage();
-    LOG(INFO) << "[kvrocks2redis] Failed to psync, switch to parseAllLocalStorage";
-    LOG(INFO) << line;
-    free(line);
-    // Restart psync state machine
-    return CBState::RESTART;
-  } else {
-    // PSYNC is OK, use IncrementBatchLoop
-    free(line);
-    LOG(INFO) << "[kvrocks2redis] PSync is ok, start increment batch loop";
-    return CBState::NEXT;
-  }
-}
-
-Sync::CBState Sync::incrementBatchLoopCB(
-    bufferevent *bev, void *ctx) {
-  char *line = nullptr;
-  size_t line_len = 0;
-  char *bulk_data = nullptr;
-  auto self = static_cast<Sync *>(ctx);
-  self->sync_state_ = kReplConnected;
-  auto input = bufferevent_get_input(bev);
-  while (true) {
-    switch (self->incr_state_) {
-      case Incr_batch_size:
-        // Read bulk length
-        line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
-        if (!line) return CBState::AGAIN;
-        self->incr_bulk_len_ = line_len > 0 ? std::strtoull(line + 1, nullptr, 10) : 0;
-        free(line);
-        if (self->incr_bulk_len_ == 0) {
-          LOG(ERROR) << "[kvrocks2redis] Invalid increment data size";
-          return CBState::RESTART;
-        }
-        self->incr_state_ = Incr_batch_data;
-      case Incr_batch_data:
-        // Read bulk data (batch data)
-        if (self->incr_bulk_len_ + 2 <= evbuffer_get_length(input)) {  // We got enough data
-          bulk_data = reinterpret_cast<char *>(evbuffer_pullup(input, self->incr_bulk_len_ + 2));
-          auto bat = rocksdb::WriteBatch(std::string(bulk_data, self->incr_bulk_len_));
-          int count = bat.Count();
-
-          self->parser_->ParseWriteBatch(std::string(bulk_data, self->incr_bulk_len_));
-
-          self->updateNextSeq(self->next_seq_ + count);
-
-          evbuffer_drain(input, self->incr_bulk_len_ + 2);
-          self->incr_state_ = Incr_batch_size;
-        } else {
-          return CBState::AGAIN;
-        }
-    }
-  }
-}
-
-// Check if stop_flag_ is set, when do, tear down kvrocks2redis
-void Sync::EventTimerCB(int, int16_t, void *ctx) {
-  // DLOG(INFO) << "[kvrocks2redis] timer";
-  auto self = static_cast<Sync *>(ctx);
-  if (self->stop_flag_) {
-    LOG(INFO) << "[kvrocks2redis] Stop ev loop";
-    event_base_loopbreak(self->base_);
-    self->psync_steps_.Stop();
-    self->writer_->Stop();
-    // stop parseAllLocalStorage ?
-  }
-}
-
-void Sync::parseKVFromLocalStorage() {
-  LOG(INFO) << "[kvrocks2redis] Start parsing kv from the local storage";
-  for (const auto &iter : config_->tokens) {
-    auto s = writer_->FlushAll(iter.first);
-    if (!s.IsOK()) {
-      LOG(ERROR) << "[kvrocks2redis] Failed to flush all in namespace: " << iter.first
-                 << ", encounter error: " << s.Msg();
-      return;
-    }
-  }
-
-  Status s = parser_->ParseFullDB();
-  if (!s.IsOK()) {
-    LOG(ERROR) << "[kvrocks2redis] Failed to parse full db, encounter error: " << s.Msg();
-    return;
-  }
-  updateNextSeq(storage_->LatestSeq() + 1);
-}
-
-Status Sync::updateNextSeq(rocksdb::SequenceNumber seq) {
-  next_seq_ = seq;
-  return writeNextSeqToFile(seq);
-}
-
-Status Sync::readNextSeqFromFile(rocksdb::SequenceNumber *seq) {
-  next_seq_fd_ = open(config_->next_seq_file_path.data(), O_RDWR | O_CREAT, 0666);
-  if (next_seq_fd_ < 0) {
-    return Status(Status::NotOK, std::string("Failed to open next seq file :") + strerror(errno));
-  }
-
-  *seq = 0;
-  // 21 + 1 byte, extra one byte for the ending \0
-  char buf[22];
-  memset(buf, '\0', sizeof(buf));
-  if (read(next_seq_fd_, buf, sizeof(buf)) > 0) {
-    *seq = static_cast<rocksdb::SequenceNumber>(std::stoi(buf));
-  }
-
-  return Status::OK();
-}
-
-Status Sync::writeNextSeqToFile(rocksdb::SequenceNumber seq) {
-  std::string seq_string = std::to_string(seq);
-  // append to 21 byte (overwrite entire first 21 byte, aka the largest SequenceNumber size )
-  int append_byte = 21 - seq_string.size();
-  while (append_byte-- > 0) {
-    seq_string += " ";
-  }
-  seq_string += '\0';
-  pwrite(next_seq_fd_, seq_string.data(), seq_string.size(), 0);
-  return Status::OK();
-}
diff --git a/tools/kvrocks2redis/sync.h b/tools/kvrocks2redis/sync.h
deleted file mode 100644
index ed1bd5b..0000000
--- a/tools/kvrocks2redis/sync.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#pragma once
-
-#include <event2/bufferevent.h>
-#include <unistd.h>
-#include <fstream>
-
-#include "../../src/status.h"
-#include "../../src/storage.h"
-#include "../../src/replication.h"
-#include "../../src/server.h"
-
-#include "config.h"
-#include "writer.h"
-#include "parser.h"
-
-class Sync : public ReplicationThread {
- public:
-  explicit Sync(Server *srv, Writer *writer, Parser *parser, Kvrocks2redis::Config *config);
-  ~Sync() {
-    if (next_seq_fd_) close(next_seq_fd_);
-  }
-  void Start();
-  void Stop();
-  bool IsStopped() { return stop_flag_; }
-
- private:
-  bool stop_flag_ = false;
-  Engine::Storage *storage_ = nullptr;
-  Writer *writer_ = nullptr;
-  Parser *parser_ = nullptr;
-  Kvrocks2redis::Config *config_ = nullptr;
-  ReplState sync_state_;
-  int next_seq_fd_;
-  rocksdb::SequenceNumber next_seq_ = static_cast<rocksdb::SequenceNumber>(0);
-
-  using CBState = CallbacksStateMachine::State;
-  CallbacksStateMachine psync_steps_;
-
-  // Internal states managed by IncrementBatchLoop procedure
-  enum IncrementBatchLoopState {
-    Incr_batch_size,
-    Incr_batch_data,
-  } incr_state_ = Incr_batch_size;
-
-  size_t incr_bulk_len_ = 0;
-
-  static CBState tryPSyncWriteCB(bufferevent *bev, void *ctx);
-  static CBState tryPSyncReadCB(bufferevent *bev, void *ctx);
-  static CBState incrementBatchLoopCB(bufferevent *bev, void *ctx);
-
-  static void EventTimerCB(int, int16_t, void *ctx);
-
-  void parseKVFromLocalStorage();
-
-  Status updateNextSeq(rocksdb::SequenceNumber seq);
-  Status readNextSeqFromFile(rocksdb::SequenceNumber *seq);
-  Status writeNextSeqToFile(rocksdb::SequenceNumber seq);
-};
diff --git a/tools/kvrocks2redis/tests/README.md b/tools/kvrocks2redis/tests/README.md
deleted file mode 100644
index 0db2a56..0000000
--- a/tools/kvrocks2redis/tests/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-## Populate data and append new data script 
-* for testing the kvrocks2redis, manually check generate aof )
-
-## Usage
-
-* Start kvrocks and kvrocks2redis
-    * TODO automatic create docker env
-* install dependency::
-    * pip install git+https://github.com/andymccurdy/redis-py.git@2.10.3   
-* Usage 
-```
-# populate data
-python populate-kvrocks.py  
-# check generated aof file 
-# append new data 
-python append-data-to-kvrocks.py
-# check appended new aof data  
diff --git a/tools/kvrocks2redis/tests/append-data-to-kvrocks.py b/tools/kvrocks2redis/tests/append-data-to-kvrocks.py
deleted file mode 100644
index 39a19a6..0000000
--- a/tools/kvrocks2redis/tests/append-data-to-kvrocks.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import redis
-
-range=100
-factor=32
-port=6666
-
-r = redis.StrictRedis(host='localhost', port=port, db=0, password='foobared')
-
-# string
-rst = r.set('foo', 2)  # update old
-assert rst
-rst = r.set('foo2', 2)  # add new
-assert rst
-
-rst = r.setex('foo_ex', 7200, 2)
-assert rst
-
-# zset
-rst = r.zadd('zfoo', 4, 'd')
-assert(rst == 1)
-rst = r.zrem('zfoo', 'd')
-assert(rst == 1)
-
-# list
-rst = r.lset('lfoo', 0, 'a')
-assert(rst == 1)
-rst = r.rpush('lfoo', 'a')
-assert(rst == 5)
-rst = r.lpush('lfoo', 'b')
-assert(rst == 6)
-rst = r.lpop('lfoo')
-assert(rst == 'b')
-rst = r.rpop('lfoo')
-assert(rst == 'a')
-rst = r.ltrim('lfoo', 0, 2)
-assert rst
-
-# set
-rst = r.sadd('sfoo', 'f')
-assert(rst == 1)
-rst = r.srem('sfoo', 'f')
-assert(rst == 1)
-
-# hash
-rst = r.hset('hfoo', 'b', 2)
-assert(rst == 1)
-rst = r.hdel('hfoo', 'b')
-assert(rst == 1)
-
-# bitmap
-rst = r.setbit('bfoo', 0, 0)  # update old
-assert(rst == 1)
-rst = r.setbit('bfoo', 900000, 1)  # add new
-assert(rst == 0)
-
-# expire cmd
-rst = r.expire('foo', 7200)
-assert rst
-rst = r.expire('zfoo', 7200)
-assert rst
-
-# del cmd
-rst = r.delete('foo')
-assert rst
-rst = r.delete('zfoo')
-assert rst
-
diff --git a/tools/kvrocks2redis/tests/populate-kvrocks.py b/tools/kvrocks2redis/tests/populate-kvrocks.py
deleted file mode 100644
index 4c2026c..0000000
--- a/tools/kvrocks2redis/tests/populate-kvrocks.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import redis
-
-range=100
-factor=32
-port=6666
-
-r = redis.StrictRedis(host='localhost', port=port, db=0, password='foobared')
-
-# flushall ?
-# rst = r.flushall()
-# assert rst
-
-# string
-rst = r.set('foo', 1)
-assert rst
-
-rst = r.setex('foo_ex', 3600, 1)
-assert rst
-
-# zset
-rst = r.zadd('zfoo', 1, 'a', 2, 'b', 3, 'c')
-assert(rst == 3)
-
-# list
-rst = r.rpush('lfoo', 1, 2, 3, 4)
-assert(rst == 4)
-
-# set
-rst = r.sadd('sfoo', 'a', 'b', 'c', 'd')
-assert(rst == 4)
-
-# hash
-rst = r.hset('hfoo', 'a', 1)
-assert(rst == 1)
-
-# bitmap
-rst = r.setbit('bfoo', 0, 1)
-assert(rst == 0)
-rst = r.setbit('bfoo', 1, 1)
-assert(rst == 0)
-rst = r.setbit('bfoo', 800000, 1)
-assert(rst == 0)
-
-# expire cmd
-rst = r.expire('foo', 3600)
-assert rst
-rst = r.expire('zfoo', 3600)
-assert rst
-
-
-
-
-
-
diff --git a/tools/kvrocks2redis/util.cc b/tools/kvrocks2redis/util.cc
deleted file mode 100644
index 519b256..0000000
--- a/tools/kvrocks2redis/util.cc
+++ /dev/null
@@ -1,13 +0,0 @@
-#include "util.h"
-
-#include "../../src/redis_reply.h"
-
-std::string Rocksdb2Redis::Command2RESP(const std::vector<std::string> &cmd_args) {
-  std::string output;
-  output.append("*" + std::to_string(cmd_args.size()) + CRLF);
-  for (const auto &arg : cmd_args) {
-    output.append("$" + std::to_string(arg.size()) + CRLF);
-    output.append(arg + CRLF);
-  }
-  return output;
-}
diff --git a/tools/kvrocks2redis/util.h b/tools/kvrocks2redis/util.h
deleted file mode 100644
index bc49473..0000000
--- a/tools/kvrocks2redis/util.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#pragma once
-
-#include <string>
-#include <vector>
-
-class Rocksdb2Redis {
- public:
-  static std::string Command2RESP(const std::vector<std::string> &cmd_args);
-};
diff --git a/tools/kvrocks2redis/writer.cc b/tools/kvrocks2redis/writer.cc
deleted file mode 100644
index 5598c18..0000000
--- a/tools/kvrocks2redis/writer.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-#include "writer.h"
-#include <fcntl.h>
-#include <unistd.h>
-#include <cstring>
-
-Writer::~Writer() {
-  for (const auto &iter : aof_fds_) {
-    close(iter.second);
-  }
-}
-
-Status Writer::Write(const std::string &ns, const std::vector<std::string> &aofs) {
-  auto s = GetAofFd(ns);
-  if (!s.IsOK()) {
-    return Status(Status::NotOK, s.Msg());
-  }
-  for (size_t i = 0; i < aofs.size(); i++) {
-    write(aof_fds_[ns], aofs[i].data(), aofs[i].size());
-  }
-
-  return Status::OK();
-}
-
-Status Writer::FlushAll(const std::string &ns) {
-  auto s = GetAofFd(ns, true);
-  if (!s.IsOK()) {
-    return Status(Status::NotOK, s.Msg());
-  }
-
-  return Status::OK();
-}
-
-Status Writer::GetAofFd(const std::string &ns, bool truncate) {
-  auto aof_fd = aof_fds_.find(ns);
-  if (aof_fd == aof_fds_.end()) {
-    return OpenAofFile(ns, truncate);
-  } else if (truncate) {
-    close(aof_fds_[ns]);
-    return OpenAofFile(ns, truncate);
-  }
-  if (aof_fds_[ns] < 0) {
-    return Status(Status::NotOK, std::string("Failed to open aof file :") + strerror(errno));
-  }
-  return Status::OK();
-}
-
-Status Writer::OpenAofFile(const std::string &ns, bool truncate) {
-  int openmode = O_RDWR | O_CREAT | O_APPEND;
-  if (truncate) {
-    openmode |= O_TRUNC;
-  }
-  aof_fds_[ns] = open(GetAofFilePath(ns).data(), openmode, 0666);
-  if (aof_fds_[ns] < 0) {
-    return Status(Status::NotOK, std::string("Failed to open aof file :") + strerror(errno));
-  }
-
-  return Status::OK();
-}
-
-std::string Writer::GetAofFilePath(const std::string &ns) {
-  return config_->dir + "/" + ns + "_" + config_->aof_file_name;
-}
-
diff --git a/tools/kvrocks2redis/writer.h b/tools/kvrocks2redis/writer.h
deleted file mode 100644
index 3e09aa0..0000000
--- a/tools/kvrocks2redis/writer.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#pragma once
-
-#include <string>
-#include <map>
-#include <fstream>
-#include <vector>
-
-#include "../../src/status.h"
-
-#include "config.h"
-
-class Writer {
- public:
-  explicit Writer(Kvrocks2redis::Config *config) : config_(config) {}
-  ~Writer();
-  virtual Status Write(const std::string &ns, const std::vector<std::string> &aofs);
-  virtual Status FlushAll(const std::string &ns);
-  virtual void Stop() {};
-  Status OpenAofFile(const std::string &ns, bool truncate);
-  Status GetAofFd(const std::string &ns, bool truncate = false);
-  std::string GetAofFilePath(const std::string &ns);
-
- protected:
-  Kvrocks2redis::Config *config_ = nullptr;
-  std::map<std::string, int> aof_fds_;
-};